init
commit
788b2ea705
|
@ -0,0 +1,20 @@
|
|||
FROM python:3.10-slim
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
build-essential \
|
||||
ffmpeg \
|
||||
curl \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Python dependencies
|
||||
WORKDIR /app
|
||||
COPY requirements.txt .
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Copy app and download model
|
||||
COPY app ./app
|
||||
COPY download-model.sh .
|
||||
RUN chmod +x download-model.sh && ./download-model.sh
|
||||
|
||||
EXPOSE 4002
|
||||
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "4002"]
|
|
@ -0,0 +1,19 @@
|
|||
from fastapi import FastAPI, File, UploadFile
|
||||
import os
|
||||
import shutil
|
||||
from whispercpp import Whisper
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
model_path = "./app/model/ggml-base.en.bin"
|
||||
whisper = Whisper(model_path)
|
||||
|
||||
@app.post("/transcribe")
|
||||
async def transcribe_audio(audio: UploadFile = File(...)):
|
||||
temp_file = f"temp_{audio.filename}"
|
||||
with open(temp_file, "wb") as buffer:
|
||||
shutil.copyfileobj(audio.file, buffer)
|
||||
|
||||
text = whisper.transcribe(temp_file)
|
||||
os.remove(temp_file)
|
||||
return {"text": text}
|
|
@ -0,0 +1,14 @@
|
|||
#!/bin/bash
|
||||
mkdir -p app/model
|
||||
cd app/model
|
||||
|
||||
# You can choose other sizes too: tiny.en, base.en, etc.
|
||||
MODEL_NAME="ggml-base.en.bin"
|
||||
MODEL_URL="https://huggingface.co/ggerganov/whisper.cpp/resolve/main/$MODEL_NAME"
|
||||
|
||||
if [ ! -f "$MODEL_NAME" ]; then
|
||||
echo "Downloading $MODEL_NAME..."
|
||||
curl -L -o $MODEL_NAME $MODEL_URL
|
||||
else
|
||||
echo "$MODEL_NAME already exists."
|
||||
fi
|
|
@ -0,0 +1,7 @@
|
|||
whisper_cpp_api/
|
||||
├── app/
|
||||
│ ├── main.py
|
||||
│ └── model/
|
||||
├── Dockerfile
|
||||
├── requirements.txt
|
||||
└── download-model.sh
|
|
@ -0,0 +1,4 @@
|
|||
fastapi
|
||||
uvicorn
|
||||
python-multipart
|
||||
whispercpp
|
Loading…
Reference in New Issue