From 5613e5bc8c1853d0453cb7ef259b29611c266de1 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ahmet=20=C3=96ner?= <ahmet.oener@iais.fraunhofer.de>
Date: Sun, 1 Oct 2023 17:38:00 +0200
Subject: [PATCH] Update default model download paths to `~/.cache/whisper`

---
 CHANGELOG.md               |  6 ++++++
 README.md                  | 12 ++++++++++--
 app/faster_whisper/core.py |  2 +-
 app/openai_whisper/core.py |  6 ++++--
 docker-compose.gpu.yml     |  5 ++---
 docker-compose.yml         |  5 ++---
 6 files changed, 25 insertions(+), 11 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 6a4c8ce..5b8bcb4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,6 +7,12 @@ Unreleased
 ### Updated
 
 - Updated model conversion method (for Faster Whisper) to use Hugging Face downloader
+- Updated default model paths to `~/.cache/whisper`. 
+  - For customization, modify the `ASR_MODEL_PATH` environment variable. 
+  - Ensure Docker volume is set for the corresponding directory to use caching.
+    ```bash
+    docker run -d -p 9000:9000 -e ASR_MODEL_PATH=/data/whisper -v ./yourlocaldir:/data/whisper onerahmet/openai-whisper-asr-webservice:latest
+    ```
 
 ### Changed
 
diff --git a/README.md b/README.md
index 857fff3..c714fe5 100644
--- a/README.md
+++ b/README.md
@@ -179,10 +179,18 @@ docker run -d --gpus all -p 9000:9000 -e ASR_MODEL=base whisper-asr-webservice-g
 ```
 
 ## Cache
-The ASR model is downloaded each time you start the container, using the large model this can take some time. If you want to decrease the time it takes to start your container by skipping the download, you can store the cache directory (/root/.cache/whisper) to an persistent storage. Next time you start your container the ASR Model will be taken from the cache instead of being downloaded again.
+The ASR model is downloaded each time you start the container, using the large model this can take some time. 
+If you want to decrease the time it takes to start your container by skipping the download, you can store the cache directory (`~/.cache/whisper`) to a persistent storage. 
+Next time you start your container the ASR Model will be taken from the cache instead of being downloaded again.
 
 **Important this will prevent you from receiving any updates to the models.**
  
 ```sh
-docker run -d -p 9000:9000 -e ASR_MODEL=large -v //c/tmp/whisper:/root/.cache/whisper onerahmet/openai-whisper-asr-webservice:latest
+docker run -d -p 9000:9000 -v ./yourlocaldir:~/.cache/whisper onerahmet/openai-whisper-asr-webservice:latest
+```
+
+or
+
+```sh
+docker run -d -p 9000:9000 -e ASR_MODEL_PATH=/data/whisper -v ./yourlocaldir:/data/whisper onerahmet/openai-whisper-asr-webservice:latest
 ```
diff --git a/app/faster_whisper/core.py b/app/faster_whisper/core.py
index b72ad5f..aa6563a 100644
--- a/app/faster_whisper/core.py
+++ b/app/faster_whisper/core.py
@@ -10,7 +10,7 @@ from faster_whisper import WhisperModel
 from .utils import ResultWriter, WriteTXT, WriteSRT, WriteVTT, WriteTSV, WriteJSON
 
 model_name = os.getenv("ASR_MODEL", "base")
-model_path = os.getenv("ASR_MODEL_PATH", "/root/.cache/whisper")
+model_path = os.getenv("ASR_MODEL_PATH", os.path.join(os.path.expanduser("~"), ".cache", "whisper"))
 
 if torch.cuda.is_available():
     model = WhisperModel(model_size_or_path=model_name, device="cuda", compute_type="float32", download_root=model_path)
diff --git a/app/openai_whisper/core.py b/app/openai_whisper/core.py
index 094ca06..df3af05 100644
--- a/app/openai_whisper/core.py
+++ b/app/openai_whisper/core.py
@@ -8,10 +8,12 @@ import whisper
 from whisper.utils import ResultWriter, WriteTXT, WriteSRT, WriteVTT, WriteTSV, WriteJSON
 
 model_name = os.getenv("ASR_MODEL", "base")
+model_path = os.getenv("ASR_MODEL_PATH", os.path.join(os.path.expanduser("~"), ".cache", "whisper"))
+
 if torch.cuda.is_available():
-    model = whisper.load_model(model_name).cuda()
+    model = whisper.load_model(model_name, download_root=model_path).cuda()
 else:
-    model = whisper.load_model(model_name)
+    model = whisper.load_model(model_name, download_root=model_path)
 model_lock = Lock()
 
 
diff --git a/docker-compose.gpu.yml b/docker-compose.gpu.yml
index 5a75c8b..5003b08 100644
--- a/docker-compose.gpu.yml
+++ b/docker-compose.gpu.yml
@@ -15,13 +15,12 @@ services:
     environment:
       - ASR_MODEL=base
     ports:
-      - 9000:9000
+      - "9000:9000"
     volumes:
       - ./app:/app/app
       - cache-pip:/root/.cache/pip
       - cache-poetry:/root/.cache/poetry
-      - cache-whisper:/root/.cache/whisper
-      - cache-faster-whisper:/root/.cache/faster_whisper
+      - cache-whisper:~/.cache/whisper
 
 volumes:
   cache-pip:
diff --git a/docker-compose.yml b/docker-compose.yml
index 3102c24..c363e68 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -8,13 +8,12 @@ services:
     environment:
       - ASR_MODEL=base
     ports:
-      - 9000:9000
+      - "9000:9000"
     volumes:
       - ./app:/app/app
       - cache-pip:/root/.cache/pip
       - cache-poetry:/root/.cache/poetry
-      - cache-whisper:/root/.cache/whisper
-      - cache-faster-whisper:/root/.cache/faster_whisper
+      - cache-whisper:~/.cache/whisper
 
 volumes:
   cache-pip:
-- 
GitLab