mirror of
				https://github.com/karl0ss/bazarr-ai-sub-generator.git
				synced 2025-10-31 14:44:13 +00:00 
			
		
		
		
	docstrings
This commit is contained in:
		
							parent
							
								
									830d2a3157
								
							
						
					
					
						commit
						cf3e4acc43
					
				| @ -2,6 +2,7 @@ import os | |||||||
| import warnings | import warnings | ||||||
| import tempfile | import tempfile | ||||||
| import time | import time | ||||||
|  | from typing import List, Dict, Any | ||||||
| from utils.files import filename, write_srt | from utils.files import filename, write_srt | ||||||
| from utils.ffmpeg import get_audio, add_subtitles_to_mp4 | from utils.ffmpeg import get_audio, add_subtitles_to_mp4 | ||||||
| from utils.bazarr import get_wanted_episodes, get_episode_details, sync_series | from utils.bazarr import get_wanted_episodes, get_episode_details, sync_series | ||||||
| @ -11,7 +12,18 @@ from utils.whisper import WhisperAI | |||||||
| from utils.decorator import measure_time | from utils.decorator import measure_time | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def process_audio_and_subtitles(file_path, model_args, args, backend): | def process_audio_and_subtitles(file_path: str, model_args: Dict[str, Any], args: Dict[str, Any], backend: str) -> None: | ||||||
|  |     """Processes audio extraction and subtitle generation for a given file. | ||||||
|  | 
 | ||||||
|  |     Args: | ||||||
|  |         file_path (str): Path to the video file. | ||||||
|  |         model_args (Dict[str, Any]): Model arguments for subtitle generation. | ||||||
|  |         args (Dict[str, Any]): Additional arguments for subtitle generation. | ||||||
|  |         backend (str): Backend to use ('whisper' or 'faster_whisper'). | ||||||
|  | 
 | ||||||
|  |     Returns: | ||||||
|  |         None | ||||||
|  |     """ | ||||||
|     try: |     try: | ||||||
|         audios = get_audio([file_path], 0, None) |         audios = get_audio([file_path], 0, None) | ||||||
|         subtitles = get_subtitles(audios, tempfile.gettempdir(), model_args, args, backend) |         subtitles = get_subtitles(audios, tempfile.gettempdir(), model_args, args, backend) | ||||||
| @ -21,7 +33,18 @@ def process_audio_and_subtitles(file_path, model_args, args, backend): | |||||||
|         print(f"Skipping file {file_path} due to - {ex}") |         print(f"Skipping file {file_path} due to - {ex}") | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def folder_flow(folder, model_args, args, backend): | def folder_flow(folder: str, model_args: Dict[str, Any], args: Dict[str, Any], backend: str) -> None: | ||||||
|  |     """Processes all files within a specified folder. | ||||||
|  | 
 | ||||||
|  |     Args: | ||||||
|  |         folder (str): Path to the folder containing video files. | ||||||
|  |         model_args (Dict[str, Any]): Model arguments for subtitle generation. | ||||||
|  |         args (Dict[str, Any]): Additional arguments for subtitle generation. | ||||||
|  |         backend (str): Backend to use ('whisper' or 'faster_whisper'). | ||||||
|  | 
 | ||||||
|  |     Returns: | ||||||
|  |         None | ||||||
|  |     """ | ||||||
|     print(f"Processing folder {folder}") |     print(f"Processing folder {folder}") | ||||||
|     files = os.listdir(folder) |     files = os.listdir(folder) | ||||||
|     for file in files: |     for file in files: | ||||||
| @ -30,12 +53,34 @@ def folder_flow(folder, model_args, args, backend): | |||||||
|         process_audio_and_subtitles(path, model_args, args, backend) |         process_audio_and_subtitles(path, model_args, args, backend) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def file_flow(file_path, model_args, args, backend): | def file_flow(file_path: str, model_args: Dict[str, Any], args: Dict[str, Any], backend: str) -> None: | ||||||
|  |     """Processes a single specified file. | ||||||
|  | 
 | ||||||
|  |     Args: | ||||||
|  |         file_path (str): Path to the video file. | ||||||
|  |         model_args (Dict[str, Any]): Model arguments for subtitle generation. | ||||||
|  |         args (Dict[str, Any]): Additional arguments for subtitle generation. | ||||||
|  |         backend (str): Backend to use ('whisper' or 'faster_whisper'). | ||||||
|  | 
 | ||||||
|  |     Returns: | ||||||
|  |         None | ||||||
|  |     """ | ||||||
|     print(f"Processing file {file_path}") |     print(f"Processing file {file_path}") | ||||||
|     process_audio_and_subtitles(file_path, model_args, args, backend) |     process_audio_and_subtitles(file_path, model_args, args, backend) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def bazzar_flow(show, model_args, args, backend): | def bazzar_flow(show: str, model_args: Dict[str, Any], args: Dict[str, Any], backend: str) -> None: | ||||||
|  |     """Processes episodes needing subtitles from Bazarr API. | ||||||
|  | 
 | ||||||
|  |     Args: | ||||||
|  |         show (str): The show name. | ||||||
|  |         model_args (Dict[str, Any]): Model arguments for subtitle generation. | ||||||
|  |         args (Dict[str, Any]): Additional arguments for subtitle generation. | ||||||
|  |         backend (str): Backend to use ('whisper' or 'faster_whisper'). | ||||||
|  | 
 | ||||||
|  |     Returns: | ||||||
|  |         None | ||||||
|  |     """ | ||||||
|     list_of_episodes_needing_subtitles = get_wanted_episodes(show) |     list_of_episodes_needing_subtitles = get_wanted_episodes(show) | ||||||
|     print(f"Found {list_of_episodes_needing_subtitles['total']} episodes needing subtitles.") |     print(f"Found {list_of_episodes_needing_subtitles['total']} episodes needing subtitles.") | ||||||
|     for episode in list_of_episodes_needing_subtitles["data"]: |     for episode in list_of_episodes_needing_subtitles["data"]: | ||||||
| @ -47,7 +92,19 @@ def bazzar_flow(show, model_args, args, backend): | |||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @measure_time | @measure_time | ||||||
| def get_subtitles(audio_paths: list, output_dir: str, model_args: dict, transcribe_args: dict, backend: str): | def get_subtitles(audio_paths: List[str], output_dir: str, model_args: Dict[str, Any], transcribe_args: Dict[str, Any], backend: str) -> Dict[str, str]: | ||||||
|  |     """Generates subtitles for given audio files using the specified model. | ||||||
|  | 
 | ||||||
|  |     Args: | ||||||
|  |         audio_paths (List[str]): List of paths to the audio files. | ||||||
|  |         output_dir (str): Directory to save the generated subtitle files. | ||||||
|  |         model_args (Dict[str, Any]): Model arguments for subtitle generation. | ||||||
|  |         transcribe_args (Dict[str, Any]): Transcription arguments for subtitle generation. | ||||||
|  |         backend (str): Backend to use ('whisper' or 'faster_whisper'). | ||||||
|  | 
 | ||||||
|  |     Returns: | ||||||
|  |         Dict[str, str]: A dictionary mapping audio file paths to generated subtitle file paths. | ||||||
|  |     """ | ||||||
|     if backend == 'whisper': |     if backend == 'whisper': | ||||||
|         model = WhisperAI(model_args, transcribe_args) |         model = WhisperAI(model_args, transcribe_args) | ||||||
|     else: |     else: | ||||||
| @ -68,7 +125,15 @@ def get_subtitles(audio_paths: list, output_dir: str, model_args: dict, transcri | |||||||
|     return subtitles_path |     return subtitles_path | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def process(args: dict): | def process(args: Dict[str, Any]) -> None: | ||||||
|  |     """Main entry point to determine which processing flow to use. | ||||||
|  | 
 | ||||||
|  |     Args: | ||||||
|  |         args (Dict[str, Any]): Dictionary of arguments including model, language, show, file, folder, and backend. | ||||||
|  | 
 | ||||||
|  |     Returns: | ||||||
|  |         None | ||||||
|  |     """ | ||||||
|     model_name: str = args.pop("model") |     model_name: str = args.pop("model") | ||||||
|     language: str = args.pop("language") |     language: str = args.pop("language") | ||||||
|     show: str = args.pop("show") |     show: str = args.pop("show") | ||||||
|  | |||||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user