more robust ollama integration

This commit is contained in:
tcsenpai 2025-05-23 10:28:57 +02:00
parent f020531160
commit 09d7d4d6bd
2 changed files with 60 additions and 9 deletions

34
app.py
View File

@ -52,6 +52,7 @@ AVAILABLE_LANGUAGES = config["languages"]["available_languages"].split(",")
ollama = OllamaHandler()
OLLAMA_AVAILABLE = ollama.is_available()
OLLAMA_MODELS = ollama.get_available_models() if OLLAMA_AVAILABLE else []
DEFAULT_OLLAMA_MODEL = ollama.get_default_model() if OLLAMA_AVAILABLE else None
def load_model(model_name: str) -> WhisperModel:
@ -221,14 +222,23 @@ def create_interface():
)
ollama_model_dropdown = gr.Dropdown(
choices=OLLAMA_MODELS,
value=OLLAMA_MODELS[0] if OLLAMA_MODELS else None,
value=DEFAULT_OLLAMA_MODEL,
label="Ollama Model",
visible=False,
)
def toggle_summary(checked):
if checked and not ollama.is_available():
gr.Warning(
"Ollama is not available. Please check your Ollama server."
)
return {"value": False, "visible": False}
return {"value": checked, "visible": checked}
summarize_checkbox.change(
fn=lambda x: {"visible": x},
fn=toggle_summary,
inputs=[summarize_checkbox],
outputs=[ollama_model_dropdown],
outputs=[summarize_checkbox, ollama_model_dropdown],
)
transcribe_btn = gr.Button("Transcribe", variant="primary")
@ -317,14 +327,26 @@ def create_interface():
)
yt_ollama_model_dropdown = gr.Dropdown(
choices=OLLAMA_MODELS,
value=OLLAMA_MODELS[0] if OLLAMA_MODELS else None,
value=DEFAULT_OLLAMA_MODEL,
label="Ollama Model",
visible=False,
)
def toggle_yt_summary(checked):
if checked and not ollama.is_available():
gr.Warning(
"Ollama is not available. Please check your Ollama server."
)
return {"value": False, "visible": False}
return {"value": checked, "visible": checked}
yt_summarize_checkbox.change(
fn=lambda x: {"visible": x},
fn=toggle_yt_summary,
inputs=[yt_summarize_checkbox],
outputs=[yt_ollama_model_dropdown],
outputs=[
yt_summarize_checkbox,
yt_ollama_model_dropdown,
],
)
yt_process_btn = gr.Button("Process Video", variant="primary")

View File

@ -69,21 +69,50 @@ class OllamaHandler:
logger.error(f"Error fetching Ollama models: {str(e)}")
return []
def validate_model(self, model_name: str) -> tuple[bool, Optional[str]]:
"""Validate if a model exists and return the first available model if not."""
available_models = self.get_available_models()
if not available_models:
return False, None
if model_name in available_models:
return True, model_name
logger.warning(
f"Model {model_name} not found in available models. Using first available model: {available_models[0]}"
)
return True, available_models[0]
def get_default_model(self) -> Optional[str]:
"""Get the default model, falling back to first available if default is not found."""
if not self.is_available():
return None
is_valid, model = self.validate_model(self.default_model)
if is_valid:
return model
return None
def summarize(self, text: str, model: Optional[str] = None) -> Optional[str]:
"""Summarize text using Ollama."""
if not self.is_available():
logger.warning("Attempted to summarize with Ollama unavailable")
return None
model = model or self.default_model
# Validate and get the correct model
is_valid, valid_model = self.validate_model(model or self.default_model)
if not is_valid:
logger.error("No valid Ollama models available")
return None
prompt = f"{self.prompt}\n\n{text}"
logger.info(f"Generating summary using model: {model}")
logger.info(f"Generating summary using model: {valid_model}")
logger.info(f"Input text length: {len(text)} characters")
try:
response = requests.post(
f"{self.url}/api/generate",
json={"model": model, "prompt": prompt, "stream": False},
json={"model": valid_model, "prompt": prompt, "stream": False},
)
if response.status_code == 200: