From 54afe4a6c60011c5e096d952a78beb1da2e4e645 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ahmet=20=C3=96ner?= <ahmetn8@gmail.com> Date: Tue, 17 Dec 2024 01:18:44 +0100 Subject: [PATCH] Upgrade faster-whisper to v1.1.0 --- CHANGELOG.md | 5 +++++ docs/environmental-variables.md | 3 +-- poetry.lock | 11 ++++++----- pyproject.toml | 2 +- 4 files changed, 13 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b2f280e..ea0099f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,11 @@ Unreleased - Refactor classes, Add comments, implement abstract methods, and add factory method for engine selection +### Changed + +- Upgraded + - [SYSTRAN/faster-whisper](https://github.com/SYSTRAN/faster-whisper) to [v1.1.0](https://github.com/SYSTRAN/faster-whisper/releases/tag/v1.1.0) + [1.6.0] (2024-10-06) -------------------- diff --git a/docs/environmental-variables.md b/docs/environmental-variables.md index 422de63..d786927 100644 --- a/docs/environmental-variables.md +++ b/docs/environmental-variables.md @@ -15,8 +15,7 @@ export ASR_ENGINE=faster_whisper export ASR_MODEL=base ``` -Available ASR_MODELs are `tiny`, `base`, `small`, `medium`, `large`, `large-v1`, `large-v2`, `large-v3`, `turbo`(only -OpenAI Whisper) and `large-v3-turbo`(only OpenAI Whisper). +Available ASR_MODELs are `tiny`, `base`, `small`, `medium`, `large`, `large-v1`, `large-v2`, `large-v3`, `turbo` and `large-v3-turbo`. For English-only applications, the `.en` models tend to perform better, especially for the `tiny.en` and `base.en` models. We observed that the difference becomes less significant for the `small.en` and `medium.en` models. diff --git a/poetry.lock b/poetry.lock index 914b152..64f2599 100644 --- a/poetry.lock +++ b/poetry.lock @@ -375,21 +375,22 @@ standard = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.5)", "htt [[package]] name = "faster-whisper" -version = "1.0.3" +version = "1.1.0" description = "Faster Whisper transcription with CTranslate2" optional = false python-versions = ">=3.8" files = [ - {file = "faster-whisper-1.0.3.tar.gz", hash = "sha256:1a145db86450b56aaa623c8df7d4ef86e8a1159900f60533e2890e98e8453a17"}, - {file = "faster_whisper-1.0.3-py3-none-any.whl", hash = "sha256:364d0e378ab232ed26f39656e5c98548b38045224e206b20f7d8c90e2745b9d3"}, + {file = "faster-whisper-1.1.0.tar.gz", hash = "sha256:cea4bba5d4527173fdbacafa56f2ffb17dd322688f6c3fdf5fd7b6b6c193ce17"}, + {file = "faster_whisper-1.1.0-py3-none-any.whl", hash = "sha256:0f2d025676bbff1e46c4108b6f9a82578d6e33826c174af2990e45b33fab6182"}, ] [package.dependencies] -av = ">=11.0,<13" +av = ">=11" ctranslate2 = ">=4.0,<5" huggingface-hub = ">=0.13" onnxruntime = ">=1.14,<2" tokenizers = ">=0.13,<1" +tqdm = "*" [package.extras] conversion = ["transformers[torch] (>=4.23)"] @@ -2003,4 +2004,4 @@ files = [ [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "3a007512112802c1d81dea2788a1e5523f701cf84ba97a163e7ac1882707e119" +content-hash = "c190cb1d67e7b336841b59fbfa4e1df855724eaf804fd08c92e6e1e1a35840d6" diff --git a/pyproject.toml b/pyproject.toml index a362775..e1dd1e6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -26,7 +26,7 @@ fastapi = "^0.115.0" llvmlite = "^0.43.0" numba = "^0.60.0" openai-whisper = "^20240930" -faster-whisper = "^1.0.3" +faster-whisper = "^1.1.0" torch = [ { markers = "sys_platform == 'darwin' and platform_machine == 'arm64'", url = "https://download.pytorch.org/whl/cpu/torch-1.13.1-cp310-none-macosx_11_0_arm64.whl" }, { markers = "sys_platform == 'linux' and platform_machine == 'arm64'", url = "https://download.pytorch.org/whl/cpu/torch-1.13.1-cp310-none-macosx_11_0_arm64.whl" }, -- GitLab