cli.py 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115
  1. import os
  2. import ffmpeg
  3. import whisper
  4. import argparse
  5. import warnings
  6. import tempfile
  7. from .utils import filename, str2bool, write_srt
  8. def main():
  9. parser = argparse.ArgumentParser(
  10. formatter_class=argparse.ArgumentDefaultsHelpFormatter)
  11. parser.add_argument("video", nargs="+", type=str,
  12. help="paths to video files to transcribe")
  13. parser.add_argument("--model", default="small",
  14. choices=whisper.available_models(), help="name of the Whisper model to use")
  15. parser.add_argument("--output_dir", "-o", type=str,
  16. default=".", help="directory to save the outputs")
  17. parser.add_argument("--output_srt", type=str2bool, default=False,
  18. help="whether to output the .srt file along with the video files")
  19. parser.add_argument("--srt_only", type=str2bool, default=False,
  20. help="only generate the .srt file and not create overlayed video")
  21. parser.add_argument("--verbose", type=str2bool, default=False,
  22. help="whether to print out the progress and debug messages")
  23. parser.add_argument("--task", type=str, default="transcribe", choices=[
  24. "transcribe", "translate"], help="whether to perform X->X speech recognition ('transcribe') or X->English translation ('translate')")
  25. parser.add_argument("--language", type=str, default="auto", choices=["auto","af","am","ar","as","az","ba","be","bg","bn","bo","br","bs","ca","cs","cy","da","de","el","en","es","et","eu","fa","fi","fo","fr","gl","gu","ha","haw","he","hi","hr","ht","hu","hy","id","is","it","ja","jw","ka","kk","km","kn","ko","la","lb","ln","lo","lt","lv","mg","mi","mk","ml","mn","mr","ms","mt","my","ne","nl","nn","no","oc","pa","pl","ps","pt","ro","ru","sa","sd","si","sk","sl","sn","so","sq","sr","su","sv","sw","ta","te","tg","th","tk","tl","tr","tt","uk","ur","uz","vi","yi","yo","zh"],
  26. help="What is the origin language of the video? If unset, it is detected automatically.")
  27. args = parser.parse_args().__dict__
  28. model_name: str = args.pop("model")
  29. output_dir: str = args.pop("output_dir")
  30. output_srt: bool = args.pop("output_srt")
  31. srt_only: bool = args.pop("srt_only")
  32. language: str = args.pop("language")
  33. os.makedirs(output_dir, exist_ok=True)
  34. if model_name.endswith(".en"):
  35. warnings.warn(
  36. f"{model_name} is an English-only model, forcing English detection.")
  37. args["language"] = "en"
  38. # if translate task used and language argument is set, then use it
  39. elif language != "auto":
  40. args["language"] = language
  41. model = whisper.load_model(model_name)
  42. audios = get_audio(args.pop("video"))
  43. subtitles = get_subtitles(
  44. audios, output_srt or srt_only, output_dir, lambda audio_path: model.transcribe(audio_path, **args)
  45. )
  46. if srt_only:
  47. return
  48. for path, srt_path in subtitles.items():
  49. out_path = os.path.join(output_dir, f"{filename(path)}.mp4")
  50. print(f"Adding subtitles to {filename(path)}...")
  51. video = ffmpeg.input(path)
  52. audio = video.audio
  53. ffmpeg.concat(
  54. video.filter('subtitles', srt_path, force_style="OutlineColour=&H40000000,BorderStyle=3"), audio, v=1, a=1
  55. ).output(out_path).run(quiet=True, overwrite_output=True)
  56. print(f"Saved subtitled video to {os.path.abspath(out_path)}.")
  57. def get_audio(paths):
  58. temp_dir = tempfile.gettempdir()
  59. audio_paths = {}
  60. for path in paths:
  61. print(f"Extracting audio from {filename(path)}...")
  62. output_path = os.path.join(temp_dir, f"{filename(path)}.wav")
  63. ffmpeg.input(path).output(
  64. output_path,
  65. acodec="pcm_s16le", ac=1, ar="16k"
  66. ).run(quiet=True, overwrite_output=True)
  67. audio_paths[path] = output_path
  68. return audio_paths
  69. def get_subtitles(audio_paths: list, output_srt: bool, output_dir: str, transcribe: callable):
  70. subtitles_path = {}
  71. for path, audio_path in audio_paths.items():
  72. srt_path = output_dir if output_srt else tempfile.gettempdir()
  73. srt_path = os.path.join(srt_path, f"{filename(path)}.srt")
  74. print(
  75. f"Generating subtitles for {filename(path)}... This might take a while."
  76. )
  77. warnings.filterwarnings("ignore")
  78. result = transcribe(audio_path)
  79. warnings.filterwarnings("default")
  80. with open(srt_path, "w", encoding="utf-8") as srt:
  81. write_srt(result["segments"], file=srt)
  82. subtitles_path[path] = srt_path
  83. return subtitles_path
  84. if __name__ == '__main__':
  85. main()