mirror of
https://github.com/tcsenpai/youlama.git
synced 2025-06-07 03:35:41 +00:00
more robust ollama integration
This commit is contained in:
parent
f020531160
commit
09d7d4d6bd
34
app.py
34
app.py
@ -52,6 +52,7 @@ AVAILABLE_LANGUAGES = config["languages"]["available_languages"].split(",")
|
|||||||
ollama = OllamaHandler()
|
ollama = OllamaHandler()
|
||||||
OLLAMA_AVAILABLE = ollama.is_available()
|
OLLAMA_AVAILABLE = ollama.is_available()
|
||||||
OLLAMA_MODELS = ollama.get_available_models() if OLLAMA_AVAILABLE else []
|
OLLAMA_MODELS = ollama.get_available_models() if OLLAMA_AVAILABLE else []
|
||||||
|
DEFAULT_OLLAMA_MODEL = ollama.get_default_model() if OLLAMA_AVAILABLE else None
|
||||||
|
|
||||||
|
|
||||||
def load_model(model_name: str) -> WhisperModel:
|
def load_model(model_name: str) -> WhisperModel:
|
||||||
@ -221,14 +222,23 @@ def create_interface():
|
|||||||
)
|
)
|
||||||
ollama_model_dropdown = gr.Dropdown(
|
ollama_model_dropdown = gr.Dropdown(
|
||||||
choices=OLLAMA_MODELS,
|
choices=OLLAMA_MODELS,
|
||||||
value=OLLAMA_MODELS[0] if OLLAMA_MODELS else None,
|
value=DEFAULT_OLLAMA_MODEL,
|
||||||
label="Ollama Model",
|
label="Ollama Model",
|
||||||
visible=False,
|
visible=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def toggle_summary(checked):
|
||||||
|
if checked and not ollama.is_available():
|
||||||
|
gr.Warning(
|
||||||
|
"Ollama is not available. Please check your Ollama server."
|
||||||
|
)
|
||||||
|
return {"value": False, "visible": False}
|
||||||
|
return {"value": checked, "visible": checked}
|
||||||
|
|
||||||
summarize_checkbox.change(
|
summarize_checkbox.change(
|
||||||
fn=lambda x: {"visible": x},
|
fn=toggle_summary,
|
||||||
inputs=[summarize_checkbox],
|
inputs=[summarize_checkbox],
|
||||||
outputs=[ollama_model_dropdown],
|
outputs=[summarize_checkbox, ollama_model_dropdown],
|
||||||
)
|
)
|
||||||
transcribe_btn = gr.Button("Transcribe", variant="primary")
|
transcribe_btn = gr.Button("Transcribe", variant="primary")
|
||||||
|
|
||||||
@ -317,14 +327,26 @@ def create_interface():
|
|||||||
)
|
)
|
||||||
yt_ollama_model_dropdown = gr.Dropdown(
|
yt_ollama_model_dropdown = gr.Dropdown(
|
||||||
choices=OLLAMA_MODELS,
|
choices=OLLAMA_MODELS,
|
||||||
value=OLLAMA_MODELS[0] if OLLAMA_MODELS else None,
|
value=DEFAULT_OLLAMA_MODEL,
|
||||||
label="Ollama Model",
|
label="Ollama Model",
|
||||||
visible=False,
|
visible=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def toggle_yt_summary(checked):
|
||||||
|
if checked and not ollama.is_available():
|
||||||
|
gr.Warning(
|
||||||
|
"Ollama is not available. Please check your Ollama server."
|
||||||
|
)
|
||||||
|
return {"value": False, "visible": False}
|
||||||
|
return {"value": checked, "visible": checked}
|
||||||
|
|
||||||
yt_summarize_checkbox.change(
|
yt_summarize_checkbox.change(
|
||||||
fn=lambda x: {"visible": x},
|
fn=toggle_yt_summary,
|
||||||
inputs=[yt_summarize_checkbox],
|
inputs=[yt_summarize_checkbox],
|
||||||
outputs=[yt_ollama_model_dropdown],
|
outputs=[
|
||||||
|
yt_summarize_checkbox,
|
||||||
|
yt_ollama_model_dropdown,
|
||||||
|
],
|
||||||
)
|
)
|
||||||
yt_process_btn = gr.Button("Process Video", variant="primary")
|
yt_process_btn = gr.Button("Process Video", variant="primary")
|
||||||
|
|
||||||
|
@ -69,21 +69,50 @@ class OllamaHandler:
|
|||||||
logger.error(f"Error fetching Ollama models: {str(e)}")
|
logger.error(f"Error fetching Ollama models: {str(e)}")
|
||||||
return []
|
return []
|
||||||
|
|
||||||
|
def validate_model(self, model_name: str) -> tuple[bool, Optional[str]]:
|
||||||
|
"""Validate if a model exists and return the first available model if not."""
|
||||||
|
available_models = self.get_available_models()
|
||||||
|
if not available_models:
|
||||||
|
return False, None
|
||||||
|
|
||||||
|
if model_name in available_models:
|
||||||
|
return True, model_name
|
||||||
|
|
||||||
|
logger.warning(
|
||||||
|
f"Model {model_name} not found in available models. Using first available model: {available_models[0]}"
|
||||||
|
)
|
||||||
|
return True, available_models[0]
|
||||||
|
|
||||||
|
def get_default_model(self) -> Optional[str]:
|
||||||
|
"""Get the default model, falling back to first available if default is not found."""
|
||||||
|
if not self.is_available():
|
||||||
|
return None
|
||||||
|
|
||||||
|
is_valid, model = self.validate_model(self.default_model)
|
||||||
|
if is_valid:
|
||||||
|
return model
|
||||||
|
return None
|
||||||
|
|
||||||
def summarize(self, text: str, model: Optional[str] = None) -> Optional[str]:
|
def summarize(self, text: str, model: Optional[str] = None) -> Optional[str]:
|
||||||
"""Summarize text using Ollama."""
|
"""Summarize text using Ollama."""
|
||||||
if not self.is_available():
|
if not self.is_available():
|
||||||
logger.warning("Attempted to summarize with Ollama unavailable")
|
logger.warning("Attempted to summarize with Ollama unavailable")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
model = model or self.default_model
|
# Validate and get the correct model
|
||||||
|
is_valid, valid_model = self.validate_model(model or self.default_model)
|
||||||
|
if not is_valid:
|
||||||
|
logger.error("No valid Ollama models available")
|
||||||
|
return None
|
||||||
|
|
||||||
prompt = f"{self.prompt}\n\n{text}"
|
prompt = f"{self.prompt}\n\n{text}"
|
||||||
logger.info(f"Generating summary using model: {model}")
|
logger.info(f"Generating summary using model: {valid_model}")
|
||||||
logger.info(f"Input text length: {len(text)} characters")
|
logger.info(f"Input text length: {len(text)} characters")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
response = requests.post(
|
response = requests.post(
|
||||||
f"{self.url}/api/generate",
|
f"{self.url}/api/generate",
|
||||||
json={"model": model, "prompt": prompt, "stream": False},
|
json={"model": valid_model, "prompt": prompt, "stream": False},
|
||||||
)
|
)
|
||||||
|
|
||||||
if response.status_code == 200:
|
if response.status_code == 200:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user