From 8e30a8812cccef80f686af878decdb899866fc93 Mon Sep 17 00:00:00 2001 From: ALIHAN DIKEL Date: Sat, 14 Jun 2025 16:12:09 +0300 Subject: [PATCH] read dockerfile --- Dockerfile | 51 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 Dockerfile diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..a6097f0 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,51 @@ +# Use NVIDIA CUDA base image with Python +FROM nvidia/cuda:12.1.0-cudnn8-runtime-ubuntu22.04 + +# Install Python 3.12 +RUN apt-get update && apt-get install -y \ + software-properties-common \ + && add-apt-repository ppa:deadsnakes/ppa \ + && apt-get update && apt-get install -y \ + python3.12 \ + python3.12-venv \ + python3.12-dev \ + python3-pip \ + ffmpeg \ + git \ + && rm -rf /var/lib/apt/lists/* + +# Make python3.12 the default +RUN update-alternatives --install /usr/bin/python python /usr/bin/python3.12 1 +RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.12 1 + +# Upgrade pip +RUN python -m pip install --upgrade pip + +# Set working directory +WORKDIR /app + +# Copy requirements first for better caching +COPY fast-whisper-mcp-server/requirements.txt . + +# Install Python dependencies with CUDA support +RUN pip install --no-cache-dir \ + faster-whisper \ + torch==2.5.1 --index-url https://download.pytorch.org/whl/cu121 \ + torchaudio==2.5.1 --index-url https://download.pytorch.org/whl/cu121 \ + mcp[cli] + +# Copy application code +COPY fast-whisper-mcp-server/ . + +# Create directories for models and outputs +RUN mkdir -p /models /outputs + +# Set environment variables for GPU +ENV WHISPER_MODEL_DIR=/models +ENV TRANSCRIPTION_OUTPUT_DIR=/outputs +ENV TRANSCRIPTION_MODEL=large-v3 +ENV TRANSCRIPTION_DEVICE=cuda +ENV TRANSCRIPTION_COMPUTE_TYPE=float16 + +# Run the server +CMD ["python", "whisper_server.py"] \ No newline at end of file