slightly better system prompt

This commit is contained in:
tcsenpai 2024-11-03 15:28:48 +01:00
parent 08afab9d02
commit 14db15dd77
2 changed files with 16 additions and 4 deletions

View File

@ -110,11 +110,15 @@ def summarize_video(
transcript = None
if not transcript:
print("No transcript found, trying to download audio...")
if not fallback_to_whisper:
print("Fallback to whisper is disabled")
return "Unable to fetch transcript (and fallback to whisper is disabled)"
if not force_whisper:
print("Force whisper is disabled")
st.warning("Unable to fetch transcript. Trying to download audio...")
try:
print("Downloading audio...")
download_audio(video_url)
st.success("Audio downloaded successfully!")
st.warning("Starting transcription...it might take a while...")
@ -122,18 +126,20 @@ def summarize_video(
st.success("Transcription completed successfully!")
os.remove("downloads/output.m4a")
except Exception as e:
print(f"Error downloading audio or transcribing: {e}")
st.error(f"Error downloading audio or transcribing: {e}")
if os.path.exists("downloads/output.m4a"):
os.remove("downloads/output.m4a")
return "Unable to fetch transcript."
print(f"Transcript: {transcript}")
ollama_client = OllamaClient(ollama_url, model)
st.success(f"Ollama client created with model: {model}")
st.warning("Starting summary generation, this might take a while...")
with st.spinner("Generating summary..."):
prompt = f"Summarize the following YouTube video transcript:\n\n{transcript}\n\nSummary:"
prompt = f"Summarize the following YouTube video transcript in a concise yet detailed manner:\n\n```{transcript}```\n\nSummary with introduction and conclusion formatted in markdown:"
summary = ollama_client.generate(prompt)
print(summary)
st.success("Summary generated successfully!")
with st.spinner("Fetching video info..."):

View File

@ -11,10 +11,16 @@ class OllamaClient:
def __init__(self, base_url, model):
self.base_url = base_url
self.model = model
self.context_size_table = {"llama3.1": 128000, "mistral-nemo": 128000}
self.context_size_table = {
"llama3.1": 128000,
"mistral-nemo": 128000,
"mistral_small_obliterated_22b": 128000,
}
self.context_size = 2048
if self.model not in self.context_size_table:
print(f"Model {self.model} not found in context size table: using default {self.context_size}")
print(
f"Model {self.model} not found in context size table: using default {self.context_size}"
)
else:
self.context_size = self.context_size_table[self.model]
print(f"Using context size {self.context_size} for model {self.model}")