From b2089ae33bcb42a1bdab5d6b842bffc1198d961a Mon Sep 17 00:00:00 2001
From: Tanay Upadhyaya <tanayupadhyaya@gmail.com>
Date: Sun, 12 Mar 2023 00:43:32 +0530
Subject: [PATCH] Download whisper model and convert for faster_whisper while
 building Dockerfile

---
 Dockerfile.gpu                     |  5 +++++
 README.md                          |  5 +++++
 app/webservice.py                  |  3 +--
 docker-compose.yml                 | 14 ++++++++++----
 faster_whisper_model_conversion.sh |  3 +++
 model_conversion.sh                |  4 ----
 6 files changed, 24 insertions(+), 10 deletions(-)
 create mode 100755 faster_whisper_model_conversion.sh
 delete mode 100644 model_conversion.sh

diff --git a/Dockerfile.gpu b/Dockerfile.gpu
index 0d7f0e0..5227708 100644
--- a/Dockerfile.gpu
+++ b/Dockerfile.gpu
@@ -34,4 +34,9 @@ COPY . .
 RUN poetry install
 RUN $POETRY_VENV/bin/pip install torch==1.13.0+cu117 -f https://download.pytorch.org/whl/torch
 
+ENV ASR_MODEL="base"
+
+# TODO: Skip based on ENV variable
+RUN ./faster_whisper_model_conversion.sh ${ASR_MODEL}
+
 CMD gunicorn --bind 0.0.0.0:9000 --workers 1 --timeout 0 app.webservice:app -k uvicorn.workers.UvicornWorker
diff --git a/README.md b/README.md
index 3491d98..720fb7e 100644
--- a/README.md
+++ b/README.md
@@ -70,6 +70,11 @@ Starting the Webservice:
 poetry run gunicorn --bind 0.0.0.0:9000 --workers 1 --timeout 0 app.webservice:app -k uvicorn.workers.UvicornWorker
 ```
 
+With docker compose:
+```sh
+docker-compose up --build
+```
+
 ## Quick start
 
 After running the docker image interactive Swagger API documentation is available at [localhost:9000/docs](http://localhost:9000/docs)
diff --git a/app/webservice.py b/app/webservice.py
index c209261..a4dd19f 100644
--- a/app/webservice.py
+++ b/app/webservice.py
@@ -137,11 +137,10 @@ def run_asr(
     with model_lock:   
         model = get_modal(faster)
         if faster:
-            # TODO: options_dict
             segments = []
             text = ""
             i = 0
-            segment_generator, info = model.transcribe(audio, beam_size=5)
+            segment_generator, info = model.transcribe(audio, beam_size=5, **options_dict)
             for segment in segment_generator:
                 segments.append(segment)
                 text = text + segment.text
diff --git a/docker-compose.yml b/docker-compose.yml
index 204b58e..b415375 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -13,12 +13,18 @@ services:
               count: 1
               capabilities: [gpu]
     environment:
-      - ASR_MODEL=large
+      - ASR_MODEL=base
     ports:
       - 9000:9000
     volumes:
       - ./app:/app/app
-      - ~/.cache/poetry:/root/.cache/poetry
-      - ~/.cache/whisper:/root/.cache/whisper
-      - ~/.cache/faster_whisper:/root/.cache/faster_whisper
+      - cache-pip:/root/.cache/pip
+      - cache-poetry:/root/.cache/poetry
+      - cache-whisper:/root/.cache/whisper
+      - cache-faster-whisper:/root/.cache/faster_whisper
 
+volumes:
+  cache-pip:
+  cache-poetry:
+  cache-whisper:
+  cache-faster-whisper:
diff --git a/faster_whisper_model_conversion.sh b/faster_whisper_model_conversion.sh
new file mode 100755
index 0000000..ab0671c
--- /dev/null
+++ b/faster_whisper_model_conversion.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+ct2-transformers-converter --model openai/whisper-"$ASR_MODEL" --output_dir /root/.cache/faster_whisper --quantization float16
diff --git a/model_conversion.sh b/model_conversion.sh
deleted file mode 100644
index 07ca750..0000000
--- a/model_conversion.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/bash
-
-# TODO: Add step to build setup based on ENV variable
-ct2-transformers-converter --model openai/whisper-large-v2 --output_dir /root/.cache/faster_whisper --quantization float16
-- 
GitLab