diff --git a/CHANGELOG.md b/CHANGELOG.md
index 6a4c8ceef1e1188c3b7d106e43962c2e2205fb11..5b8bcb415b9f8a18dd1980c1a51b671ff7e919c9 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,6 +7,12 @@ Unreleased
 ### Updated
 
 - Updated model conversion method (for Faster Whisper) to use Hugging Face downloader
+- Updated default model paths to `~/.cache/whisper`. 
+  - For customization, modify the `ASR_MODEL_PATH` environment variable. 
+  - Ensure Docker volume is set for the corresponding directory to use caching.
+    ```bash
+    docker run -d -p 9000:9000 -e ASR_MODEL_PATH=/data/whisper -v ./yourlocaldir:/data/whisper onerahmet/openai-whisper-asr-webservice:latest
+    ```
 
 ### Changed
 
diff --git a/README.md b/README.md
index 857fff3e21fafe242a47356ca6ae4da13b9123a1..c714fe522114082fd06ac5577ed3c8aab4a432ef 100644
--- a/README.md
+++ b/README.md
@@ -179,10 +179,18 @@ docker run -d --gpus all -p 9000:9000 -e ASR_MODEL=base whisper-asr-webservice-g
 ```
 
 ## Cache
-The ASR model is downloaded each time you start the container, using the large model this can take some time. If you want to decrease the time it takes to start your container by skipping the download, you can store the cache directory (/root/.cache/whisper) to an persistent storage. Next time you start your container the ASR Model will be taken from the cache instead of being downloaded again.
+The ASR model is downloaded each time you start the container, using the large model this can take some time. 
+If you want to decrease the time it takes to start your container by skipping the download, you can store the cache directory (`~/.cache/whisper`) to a persistent storage. 
+Next time you start your container the ASR Model will be taken from the cache instead of being downloaded again.
 
 **Important this will prevent you from receiving any updates to the models.**
  
 ```sh
-docker run -d -p 9000:9000 -e ASR_MODEL=large -v //c/tmp/whisper:/root/.cache/whisper onerahmet/openai-whisper-asr-webservice:latest
+docker run -d -p 9000:9000 -v ./yourlocaldir:~/.cache/whisper onerahmet/openai-whisper-asr-webservice:latest
+```
+
+or
+
+```sh
+docker run -d -p 9000:9000 -e ASR_MODEL_PATH=/data/whisper -v ./yourlocaldir:/data/whisper onerahmet/openai-whisper-asr-webservice:latest
 ```
diff --git a/app/faster_whisper/core.py b/app/faster_whisper/core.py
index b72ad5f766053e4caa1829b961c365b88d382ed0..aa6563ad2c41cea6f0de4adc6632869ad22d0606 100644
--- a/app/faster_whisper/core.py
+++ b/app/faster_whisper/core.py
@@ -10,7 +10,7 @@ from faster_whisper import WhisperModel
 from .utils import ResultWriter, WriteTXT, WriteSRT, WriteVTT, WriteTSV, WriteJSON
 
 model_name = os.getenv("ASR_MODEL", "base")
-model_path = os.getenv("ASR_MODEL_PATH", "/root/.cache/whisper")
+model_path = os.getenv("ASR_MODEL_PATH", os.path.join(os.path.expanduser("~"), ".cache", "whisper"))
 
 if torch.cuda.is_available():
     model = WhisperModel(model_size_or_path=model_name, device="cuda", compute_type="float32", download_root=model_path)
diff --git a/app/openai_whisper/core.py b/app/openai_whisper/core.py
index 094ca069796542347cd566489da6f8c9fde42e2c..df3af05f583b78fbffec120f7fa431241d6f865b 100644
--- a/app/openai_whisper/core.py
+++ b/app/openai_whisper/core.py
@@ -8,10 +8,12 @@ import whisper
 from whisper.utils import ResultWriter, WriteTXT, WriteSRT, WriteVTT, WriteTSV, WriteJSON
 
 model_name = os.getenv("ASR_MODEL", "base")
+model_path = os.getenv("ASR_MODEL_PATH", os.path.join(os.path.expanduser("~"), ".cache", "whisper"))
+
 if torch.cuda.is_available():
-    model = whisper.load_model(model_name).cuda()
+    model = whisper.load_model(model_name, download_root=model_path).cuda()
 else:
-    model = whisper.load_model(model_name)
+    model = whisper.load_model(model_name, download_root=model_path)
 model_lock = Lock()
 
 
diff --git a/docker-compose.gpu.yml b/docker-compose.gpu.yml
index 5a75c8bdb2f2c74533fc08ab8253465a1c788351..5003b08dff827b21f4977bfee2ad81c64aea9808 100644
--- a/docker-compose.gpu.yml
+++ b/docker-compose.gpu.yml
@@ -15,13 +15,12 @@ services:
     environment:
       - ASR_MODEL=base
     ports:
-      - 9000:9000
+      - "9000:9000"
     volumes:
       - ./app:/app/app
       - cache-pip:/root/.cache/pip
       - cache-poetry:/root/.cache/poetry
-      - cache-whisper:/root/.cache/whisper
-      - cache-faster-whisper:/root/.cache/faster_whisper
+      - cache-whisper:~/.cache/whisper
 
 volumes:
   cache-pip:
diff --git a/docker-compose.yml b/docker-compose.yml
index 3102c24568006f21a9a15ffec862001d39f70ecf..c363e6884377efbc39808c28db454b69e0386969 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -8,13 +8,12 @@ services:
     environment:
       - ASR_MODEL=base
     ports:
-      - 9000:9000
+      - "9000:9000"
     volumes:
       - ./app:/app/app
       - cache-pip:/root/.cache/pip
       - cache-poetry:/root/.cache/poetry
-      - cache-whisper:/root/.cache/whisper
-      - cache-faster-whisper:/root/.cache/faster_whisper
+      - cache-whisper:~/.cache/whisper
 
 volumes:
   cache-pip: