fixed whisper shenanigans

This commit is contained in:
tcsenpai 2025-05-23 11:48:37 +02:00
parent 4ad72ffe8d
commit 02f580d195
2 changed files with 4 additions and 4 deletions

2
app.py
View File

@ -96,7 +96,7 @@ OLLAMA_MODELS = ollama.get_available_models() if OLLAMA_AVAILABLE else []
DEFAULT_OLLAMA_MODEL = ollama.get_default_model() if OLLAMA_AVAILABLE else None
def load_model(model_name: str) -> whisperx.WhisperModel:
def load_model(model_name: str):
"""Load the WhisperX model with the specified configuration."""
try:
logger.info(f"Loading WhisperX model: {model_name}")

View File

@ -1,9 +1,9 @@
gradio>=4.0.0
# Choose one of these whisper implementations:
whisperx>=3.0.0
torch>=2.0.0,<2.1.0
torchvision>=0.15.0,<0.16.0
torchaudio>=2.0.0,<2.1.0
torch>=2.0.0
torchvision>=0.15.0
torchaudio>=2.0.0
yt-dlp>=2023.0.0
python-dotenv>=1.0.0
requests>=2.31.0