mirror of
https://github.com/tcsenpai/youlama.git
synced 2025-06-07 03:35:41 +00:00
slightly better system prompt
This commit is contained in:
parent
08afab9d02
commit
14db15dd77
10
src/main.py
10
src/main.py
@ -110,11 +110,15 @@ def summarize_video(
|
|||||||
transcript = None
|
transcript = None
|
||||||
|
|
||||||
if not transcript:
|
if not transcript:
|
||||||
|
print("No transcript found, trying to download audio...")
|
||||||
if not fallback_to_whisper:
|
if not fallback_to_whisper:
|
||||||
|
print("Fallback to whisper is disabled")
|
||||||
return "Unable to fetch transcript (and fallback to whisper is disabled)"
|
return "Unable to fetch transcript (and fallback to whisper is disabled)"
|
||||||
if not force_whisper:
|
if not force_whisper:
|
||||||
|
print("Force whisper is disabled")
|
||||||
st.warning("Unable to fetch transcript. Trying to download audio...")
|
st.warning("Unable to fetch transcript. Trying to download audio...")
|
||||||
try:
|
try:
|
||||||
|
print("Downloading audio...")
|
||||||
download_audio(video_url)
|
download_audio(video_url)
|
||||||
st.success("Audio downloaded successfully!")
|
st.success("Audio downloaded successfully!")
|
||||||
st.warning("Starting transcription...it might take a while...")
|
st.warning("Starting transcription...it might take a while...")
|
||||||
@ -122,18 +126,20 @@ def summarize_video(
|
|||||||
st.success("Transcription completed successfully!")
|
st.success("Transcription completed successfully!")
|
||||||
os.remove("downloads/output.m4a")
|
os.remove("downloads/output.m4a")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
print(f"Error downloading audio or transcribing: {e}")
|
||||||
st.error(f"Error downloading audio or transcribing: {e}")
|
st.error(f"Error downloading audio or transcribing: {e}")
|
||||||
if os.path.exists("downloads/output.m4a"):
|
if os.path.exists("downloads/output.m4a"):
|
||||||
os.remove("downloads/output.m4a")
|
os.remove("downloads/output.m4a")
|
||||||
return "Unable to fetch transcript."
|
return "Unable to fetch transcript."
|
||||||
|
print(f"Transcript: {transcript}")
|
||||||
ollama_client = OllamaClient(ollama_url, model)
|
ollama_client = OllamaClient(ollama_url, model)
|
||||||
st.success(f"Ollama client created with model: {model}")
|
st.success(f"Ollama client created with model: {model}")
|
||||||
|
|
||||||
st.warning("Starting summary generation, this might take a while...")
|
st.warning("Starting summary generation, this might take a while...")
|
||||||
with st.spinner("Generating summary..."):
|
with st.spinner("Generating summary..."):
|
||||||
prompt = f"Summarize the following YouTube video transcript:\n\n{transcript}\n\nSummary:"
|
prompt = f"Summarize the following YouTube video transcript in a concise yet detailed manner:\n\n```{transcript}```\n\nSummary with introduction and conclusion formatted in markdown:"
|
||||||
summary = ollama_client.generate(prompt)
|
summary = ollama_client.generate(prompt)
|
||||||
|
print(summary)
|
||||||
st.success("Summary generated successfully!")
|
st.success("Summary generated successfully!")
|
||||||
|
|
||||||
with st.spinner("Fetching video info..."):
|
with st.spinner("Fetching video info..."):
|
||||||
|
@ -11,10 +11,16 @@ class OllamaClient:
|
|||||||
def __init__(self, base_url, model):
|
def __init__(self, base_url, model):
|
||||||
self.base_url = base_url
|
self.base_url = base_url
|
||||||
self.model = model
|
self.model = model
|
||||||
self.context_size_table = {"llama3.1": 128000, "mistral-nemo": 128000}
|
self.context_size_table = {
|
||||||
|
"llama3.1": 128000,
|
||||||
|
"mistral-nemo": 128000,
|
||||||
|
"mistral_small_obliterated_22b": 128000,
|
||||||
|
}
|
||||||
self.context_size = 2048
|
self.context_size = 2048
|
||||||
if self.model not in self.context_size_table:
|
if self.model not in self.context_size_table:
|
||||||
print(f"Model {self.model} not found in context size table: using default {self.context_size}")
|
print(
|
||||||
|
f"Model {self.model} not found in context size table: using default {self.context_size}"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
self.context_size = self.context_size_table[self.model]
|
self.context_size = self.context_size_table[self.model]
|
||||||
print(f"Using context size {self.context_size} for model {self.model}")
|
print(f"Using context size {self.context_size} for model {self.model}")
|
||||||
|
Loading…
x
Reference in New Issue
Block a user