cli.py 3.2 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546
  1. import argparse
  2. from faster_whisper import available_models
  3. from .main import process
  4. from .utils.convert import str2bool, str2timeinterval
  5. def main():
  6. parser = argparse.ArgumentParser(
  7. formatter_class=argparse.ArgumentDefaultsHelpFormatter)
  8. parser.add_argument("video", nargs="+", type=str,
  9. help="paths to video files to transcribe")
  10. parser.add_argument("--audio_channel", default="0",
  11. type=int, help="audio channel index to use")
  12. parser.add_argument("--sample_interval", type=str2timeinterval, default=None,
  13. help="generate subtitles for a specific fragment of the video (e.g. 01:02:05-01:03:45)")
  14. parser.add_argument("--model", default="small",
  15. choices=available_models(), help="name of the Whisper model to use")
  16. parser.add_argument("--device", type=str, default="auto", choices=[
  17. "cpu", "cuda", "auto"], help="Device to use for computation (\"cpu\", \"cuda\", \"auto\")")
  18. parser.add_argument("--compute_type", type=str, default="default", choices=[
  19. "int8", "int8_float32", "int8_float16",
  20. "int8_bfloat16", "int16", "float16",
  21. "bfloat16", "float32"], help="Type to use for computation. See https://opennmt.net/CTranslate2/quantization.html.")
  22. parser.add_argument("--output_dir", "-o", type=str,
  23. default=".", help="directory to save the outputs")
  24. parser.add_argument("--output_srt", type=str2bool, default=False,
  25. help="whether to output the .srt file along with the video files")
  26. parser.add_argument("--srt_only", type=str2bool, default=False,
  27. help="only generate the .srt file and not create overlayed video")
  28. parser.add_argument("--beam_size", type=int, default=5,
  29. help="model parameter, tweak to increase accuracy")
  30. parser.add_argument("--no_speech_threshold", type=float, default=0.6,
  31. help="model parameter, tweak to increase accuracy")
  32. parser.add_argument("--condition_on_previous_text", type=str2bool, default=True,
  33. help="model parameter, tweak to increase accuracy")
  34. parser.add_argument("--task", type=str, default="transcribe", choices=[
  35. "transcribe", "translate"], help="whether to perform X->X speech recognition ('transcribe') or X->English translation ('translate')")
  36. parser.add_argument("--language", type=str, default="auto", choices=["auto","af","am","ar","as","az","ba","be","bg","bn","bo","br","bs","ca","cs","cy","da","de","el","en","es","et","eu","fa","fi","fo","fr","gl","gu","ha","haw","he","hi","hr","ht","hu","hy","id","is","it","ja","jw","ka","kk","km","kn","ko","la","lb","ln","lo","lt","lv","mg","mi","mk","ml","mn","mr","ms","mt","my","ne","nl","nn","no","oc","pa","pl","ps","pt","ro","ru","sa","sd","si","sk","sl","sn","so","sq","sr","su","sv","sw","ta","te","tg","th","tk","tl","tr","tt","uk","ur","uz","vi","yi","yo","zh"],
  37. help="What is the origin language of the video? If unset, it is detected automatically.")
  38. args = parser.parse_args().__dict__
  39. process(args)
  40. if __name__ == '__main__':
  41. main()