mirror of
https://github.com/karl0ss/bazarr-ai-sub-generator.git
synced 2025-04-26 22:59:23 +01:00
modularize a bit more
This commit is contained in:
parent
967bd97992
commit
830d2a3157
@ -11,57 +11,43 @@ from utils.whisper import WhisperAI
|
|||||||
from utils.decorator import measure_time
|
from utils.decorator import measure_time
|
||||||
|
|
||||||
|
|
||||||
|
def process_audio_and_subtitles(file_path, model_args, args, backend):
|
||||||
|
try:
|
||||||
|
audios = get_audio([file_path], 0, None)
|
||||||
|
subtitles = get_subtitles(audios, tempfile.gettempdir(), model_args, args, backend)
|
||||||
|
add_subtitles_to_mp4(subtitles)
|
||||||
|
time.sleep(5)
|
||||||
|
except Exception as ex:
|
||||||
|
print(f"Skipping file {file_path} due to - {ex}")
|
||||||
|
|
||||||
|
|
||||||
def folder_flow(folder, model_args, args, backend):
|
def folder_flow(folder, model_args, args, backend):
|
||||||
print(f"Processing {folder}")
|
print(f"Processing folder {folder}")
|
||||||
files = os.listdir(folder)
|
files = os.listdir(folder)
|
||||||
for file in files:
|
for file in files:
|
||||||
print(f"processing {file}")
|
path = os.path.join(folder, file)
|
||||||
path = folder+file
|
print(f"Processing file {path}")
|
||||||
try:
|
process_audio_and_subtitles(path, model_args, args, backend)
|
||||||
audios = get_audio([path], 0, None)
|
|
||||||
subtitles = get_subtitles(audios, tempfile.gettempdir(), model_args, args, backend)
|
|
||||||
|
|
||||||
add_subtitles_to_mp4(subtitles)
|
|
||||||
time.sleep(5)
|
|
||||||
except Exception as ex:
|
|
||||||
print(f"skipping file due to - {ex}")
|
|
||||||
|
|
||||||
def file_flow(show, model_args, args, backend):
|
def file_flow(file_path, model_args, args, backend):
|
||||||
print(f"Processing {show}")
|
print(f"Processing file {file_path}")
|
||||||
try:
|
process_audio_and_subtitles(file_path, model_args, args, backend)
|
||||||
audios = get_audio([show], 0, None)
|
|
||||||
subtitles = get_subtitles(audios, tempfile.gettempdir(), model_args, args, backend)
|
|
||||||
|
|
||||||
add_subtitles_to_mp4(subtitles)
|
|
||||||
time.sleep(5)
|
|
||||||
except Exception as ex:
|
|
||||||
print(f"skipping file due to - {ex}")
|
|
||||||
|
|
||||||
def bazzar_flow(show, model_args, args, backend):
|
def bazzar_flow(show, model_args, args, backend):
|
||||||
list_of_episodes_needing_subtitles = get_wanted_episodes(show)
|
list_of_episodes_needing_subtitles = get_wanted_episodes(show)
|
||||||
print(
|
print(f"Found {list_of_episodes_needing_subtitles['total']} episodes needing subtitles.")
|
||||||
f"Found {list_of_episodes_needing_subtitles['total']} episodes needing subtitles."
|
|
||||||
)
|
|
||||||
for episode in list_of_episodes_needing_subtitles["data"]:
|
for episode in list_of_episodes_needing_subtitles["data"]:
|
||||||
print(f"Processing {episode['seriesTitle']} - {episode['episode_number']}")
|
print(f"Processing {episode['seriesTitle']} - {episode['episode_number']}")
|
||||||
episode_data = get_episode_details(episode["sonarrEpisodeId"])
|
episode_data = get_episode_details(episode["sonarrEpisodeId"])
|
||||||
try:
|
process_audio_and_subtitles(episode_data["path"], model_args, args, backend)
|
||||||
audios = get_audio([episode_data["path"]], 0, None)
|
|
||||||
subtitles = get_subtitles(audios, tempfile.gettempdir(), model_args, args, backend)
|
|
||||||
|
|
||||||
add_subtitles_to_mp4(subtitles)
|
|
||||||
update_show_in_sonarr(episode["sonarrSeriesId"])
|
update_show_in_sonarr(episode["sonarrSeriesId"])
|
||||||
time.sleep(5)
|
|
||||||
sync_series()
|
sync_series()
|
||||||
except Exception as ex:
|
|
||||||
print(f"skipping file due to - {ex}")
|
|
||||||
|
|
||||||
|
|
||||||
@measure_time
|
@measure_time
|
||||||
def get_subtitles(
|
def get_subtitles(audio_paths: list, output_dir: str, model_args: dict, transcribe_args: dict, backend: str):
|
||||||
audio_paths: list, output_dir: str, model_args: dict, transcribe_args: dict, backend: str
|
|
||||||
):
|
|
||||||
if backend == 'whisper':
|
if backend == 'whisper':
|
||||||
model = WhisperAI(model_args, transcribe_args)
|
model = WhisperAI(model_args, transcribe_args)
|
||||||
else:
|
else:
|
||||||
@ -83,7 +69,6 @@ def get_subtitles(
|
|||||||
|
|
||||||
|
|
||||||
def process(args: dict):
|
def process(args: dict):
|
||||||
|
|
||||||
model_name: str = args.pop("model")
|
model_name: str = args.pop("model")
|
||||||
language: str = args.pop("language")
|
language: str = args.pop("language")
|
||||||
show: str = args.pop("show")
|
show: str = args.pop("show")
|
||||||
@ -92,16 +77,12 @@ def process(args: dict):
|
|||||||
backend: str = args.pop("backend")
|
backend: str = args.pop("backend")
|
||||||
|
|
||||||
if model_name.endswith(".en"):
|
if model_name.endswith(".en"):
|
||||||
warnings.warn(
|
warnings.warn(f"{model_name} is an English-only model, forcing English detection.")
|
||||||
f"{model_name} is an English-only model, forcing English detection."
|
|
||||||
)
|
|
||||||
args["language"] = "en"
|
args["language"] = "en"
|
||||||
# if translate task used and language argument is set, then use it
|
|
||||||
elif language != "auto":
|
elif language != "auto":
|
||||||
args["language"] = language
|
args["language"] = language
|
||||||
|
|
||||||
model_args = {}
|
model_args = {"device": args.pop("device")}
|
||||||
model_args["device"] = args.pop("device")
|
|
||||||
|
|
||||||
if file:
|
if file:
|
||||||
file_flow(file, model_args, args, backend)
|
file_flow(file, model_args, args, backend)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user