responsive style

This commit is contained in:
tcsenpai 2024-12-25 20:58:12 +01:00
parent c952e9b537
commit 5fc59a9610
2 changed files with 458 additions and 415 deletions

182
src/assets/style.css Normal file
View File

@ -0,0 +1,182 @@
/* Base theme */
:root {
--primary-color: #00b4d8;
--bg-color: #1b1b1b;
--card-bg: #2d2d2d;
--text-color: #f0f0f0;
--border-color: #404040;
--hover-color: #90e0ef;
}
/* Main container */
.stApp {
background-color: var(--bg-color);
color: var(--text-color);
}
/* Responsive container */
.stApp > div:nth-child(2) {
padding: 2rem !important;
max-width: 1200px;
margin: 0 auto;
}
@media (min-width: 768px) {
.stApp > div:nth-child(2) {
padding: 2rem 5rem !important;
}
}
/* Headers */
h1,
h2,
h3,
h4,
h5,
h6 {
color: white !important;
font-weight: 600 !important;
margin-bottom: 1rem !important;
}
/* Input fields */
.stTextInput input,
.stSelectbox select {
background-color: var(--card-bg) !important;
color: var(--text-color) !important;
border: 1px solid var(--border-color) !important;
border-radius: 8px;
padding: 0.75rem 1rem;
font-size: clamp(14px, 2vw, 16px);
transition: all 0.3s;
width: 100% !important;
}
.stTextInput input:focus,
.stSelectbox select:focus {
border-color: var(--primary-color) !important;
box-shadow: 0 0 0 2px rgba(114, 137, 218, 0.2);
}
/* Buttons */
.stButton button {
background: linear-gradient(45deg, var(--primary-color), #8ea1e1) !important;
color: white !important;
border: none !important;
border-radius: 8px !important;
padding: 0.75rem 1.5rem !important;
font-weight: 600 !important;
width: 100% !important;
transition: all 0.3s !important;
font-size: clamp(14px, 2vw, 16px);
}
.stButton button:hover {
transform: translateY(-2px);
box-shadow: 0 4px 12px rgba(114, 137, 218, 0.3);
}
/* Cards */
.card {
background-color: var(--card-bg);
border-radius: 12px;
padding: clamp(1rem, 3vw, 1.5rem);
border: 1px solid var(--border-color);
margin-bottom: 1rem;
transition: all 0.3s;
}
.card:hover {
border-color: var(--primary-color);
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1);
}
/* Expander */
.streamlit-expanderHeader {
background-color: var(--card-bg) !important;
color: var(--text-color) !important;
border: 1px solid var(--border-color) !important;
border-radius: 8px !important;
padding: 1rem !important;
font-size: clamp(14px, 2vw, 16px);
}
.streamlit-expanderContent {
border: none !important;
padding: 1rem 0 0 0 !important;
}
/* Status messages */
.stSuccess,
.stInfo,
.stWarning,
.stError {
background-color: var(--card-bg) !important;
color: var(--text-color) !important;
border: 1px solid var(--border-color) !important;
border-radius: 8px !important;
padding: 1rem !important;
font-size: clamp(14px, 2vw, 16px);
}
/* Hide Streamlit branding */
#MainMenu {
visibility: hidden;
}
footer {
visibility: hidden;
}
/* Column spacing */
[data-testid="column"] {
padding: 0.5rem !important;
}
/* Checkbox and radio */
.stCheckbox,
.stRadio {
font-size: clamp(14px, 2vw, 16px);
}
/* Mobile optimizations */
@media (max-width: 768px) {
.stButton button {
padding: 0.5rem 1rem !important;
}
[data-testid="column"] {
padding: 0.25rem !important;
}
.card {
padding: 1rem;
}
}
/* Add these styles for sections */
.stTextInput,
.stSelectbox,
.stButton {
background-color: var(--card-bg);
border-radius: 8px;
padding: 1rem;
margin-bottom: 1rem;
}
.streamlit-expanderHeader {
background-color: var(--card-bg) !important;
border-radius: 8px !important;
margin-bottom: 1rem;
}
.streamlit-expanderContent {
background-color: var(--card-bg);
border-radius: 8px;
padding: 1rem !important;
margin-top: 0.5rem;
}
/* Add spacing between sections */
.stMarkdown {
margin-bottom: 1.5rem;
}

View File

@ -13,236 +13,18 @@ from pathlib import Path
# Load environment variables
load_dotenv()
# Set page config for favicon
# Set page config first, before any other st commands
st.set_page_config(
page_title="YouTube Summarizer by TCSenpai",
page_title="YouTube Video Companion by TCSenpai",
page_icon="src/assets/subtitles.png",
layout="wide", # This ensures full width
layout="wide",
)
# Add custom CSS with a modern, clean design
st.markdown(
"""
<style>
/* Base theme */
:root {
--primary-color: #7289da;
--bg-color: #1a1b1e;
--card-bg: #2c2d30;
--text-color: #e0e0e0;
--border-color: #404246;
--hover-color: #3a3b3e;
}
/* Main container */
.stApp {
background-color: var(--bg-color);
color: var(--text-color);
}
/* Fix container width */
.stApp > header {
background-color: var(--bg-color);
}
.stApp > div:nth-child(2) {
padding-left: 5rem !important;
padding-right: 5rem !important;
}
/* Headers */
h1, h2, h3, h4, h5, h6 {
color: white !important;
font-weight: 600 !important;
margin-bottom: 1rem !important;
}
/* Input fields */
.stTextInput input, .stSelectbox select {
background-color: var(--bg-color) !important;
color: var(--text-color) !important;
border: 1px solid var(--border-color) !important;
border-radius: 8px;
padding: 12px 16px;
font-size: 16px;
transition: all 0.3s;
width: 100% !important;
}
/* Buttons */
.stButton button {
background: linear-gradient(45deg, var(--primary-color), #8ea1e1) !important;
color: white !important;
border: none !important;
border-radius: 8px !important;
padding: 12px 24px !important;
font-weight: 600 !important;
width: 100% !important;
transition: all 0.3s !important;
}
.stButton button:hover {
transform: translateY(-2px);
box-shadow: 0 4px 12px rgba(114,137,218,0.3);
}
/* Settings cards */
.settings-card {
background-color: var(--card-bg);
border-radius: 12px;
padding: 1.5rem;
border: 1px solid var(--border-color);
margin-bottom: 1rem;
}
/* Remove default container styling */
.element-container {
margin: 0 !important;
padding: 0 !important;
}
/* Clean up expander */
.streamlit-expanderHeader {
background-color: var(--card-bg) !important;
color: var(--text-color) !important;
border: 1px solid var(--border-color) !important;
border-radius: 8px !important;
padding: 1rem !important;
}
.streamlit-expanderContent {
border: none !important;
padding: 1rem 0 0 0 !important;
}
/* Status messages */
.stSuccess, .stInfo, .stWarning, .stError {
background-color: var(--card-bg) !important;
color: var(--text-color) !important;
border: 1px solid var(--border-color) !important;
border-radius: 8px !important;
padding: 1rem !important;
}
/* Hide Streamlit branding */
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
/* Improve spacing */
[data-testid="column"] {
padding: 0 !important;
margin: 0 1rem !important;
}
/* Video URL input container */
.video-input-container {
background-color: var(--card-bg);
border-radius: 12px;
padding: 1.5rem;
border: 1px solid var(--border-color);
margin: 2rem 0;
}
/* Summary results container */
.results-container {
background-color: var(--card-bg);
border-radius: 12px;
padding: 1.5rem;
border: 1px solid var(--border-color);
margin-top: 2rem;
}
</style>
""",
unsafe_allow_html=True,
)
# Initialize session state for messages if not exists
if "messages" not in st.session_state:
st.session_state.messages = []
# Initialize session state for rephrased transcript if not exists
if "rephrased_transcript" not in st.session_state:
st.session_state.rephrased_transcript = None
# Create a single header container
header = st.container()
def show_warning(message):
update_header("⚠️ " + message)
def show_error(message):
update_header("🚫 " + message)
def show_info(message):
update_header("> " + message)
def update_header(message):
with header:
st.markdown(
f"""
<div class='fixed-header'>
{message}
</div>
<style>
div.fixed-header {{
position: fixed;
top: 2.875rem;
left: 0;
right: 0;
z-index: 999;
padding: 10px;
margin: 0 1rem;
border-radius: 0.5rem;
border: 1px solid rgba(128, 128, 128, 0.2);
height: 45px !important;
background-color: rgba(40, 40, 40, 0.95);
backdrop-filter: blur(5px);
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
transition: all 0.3s ease;
display: flex;
align-items: center;
}}
</style>
""",
unsafe_allow_html=True,
)
# Initialize the header with a ready message
update_header("✅ Ready to summarize!")
# Add spacing after the fixed header
# st.markdown("<div style='margin-top: 120px;'></div>", unsafe_allow_html=True)
def get_transcript(video_id):
cache_dir = "transcript_cache"
cache_file = os.path.join(cache_dir, f"{video_id}.json")
# Create cache directory if it doesn't exist
os.makedirs(cache_dir, exist_ok=True)
# Check if transcript is cached
if os.path.exists(cache_file):
with open(cache_file, "r") as f:
return json.load(f)["transcript"]
try:
transcript = YouTubeTranscriptApi.get_transcript(video_id)
full_transcript = " ".join([entry["text"] for entry in transcript])
# Cache the transcript
with open(cache_file, "w") as f:
json.dump({"transcript": full_transcript}, f)
return full_transcript
except Exception as e:
print(f"Error fetching transcript: {e}")
return None
def load_css():
css_file = Path(__file__).parent / "assets" / "style.css"
with open(css_file) as f:
st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True)
def get_ollama_models(ollama_url):
@ -251,205 +33,54 @@ def get_ollama_models(ollama_url):
return models
def summarize_video(
video_url,
model,
ollama_url,
fallback_to_whisper=True,
force_whisper=False,
use_po_token=None,
):
video_id = None
# Get the video id from the url if it's a valid youtube or invidious or any other url that contains a video id
if "v=" in video_url:
video_id = video_url.split("v=")[-1]
# Support short urls as well
elif "youtu.be/" in video_url:
video_id = video_url.split("youtu.be/")[-1]
# Also cut out any part of the url after the video id
video_id = video_id.split("&")[0]
st.write(f"Video ID: {video_id}")
with st.spinner("Fetching transcript..."):
transcript = get_transcript(video_id)
show_info("Summarizer fetched successfully!")
# Forcing whisper if specified
if force_whisper:
show_warning("Forcing whisper...")
fallback_to_whisper = True
transcript = None
if not transcript:
print("No transcript found, trying to download audio...")
if not fallback_to_whisper:
print("Fallback to whisper is disabled")
return "Unable to fetch transcript (and fallback to whisper is disabled)"
if not force_whisper:
show_warning("Unable to fetch transcript. Trying to download audio...")
try:
print("Downloading audio...")
download_audio(video_url, use_po_token=use_po_token)
show_info("Audio downloaded successfully!")
show_warning("Starting transcription...it might take a while...")
transcript = transcribe("downloads/output.m4a")
show_info("Transcription completed successfully!")
os.remove("downloads/output.m4a")
except Exception as e:
print(f"Error downloading audio or transcribing: {e}")
show_error(f"Error downloading audio or transcribing: {e}")
if os.path.exists("downloads/output.m4a"):
os.remove("downloads/output.m4a")
return "Unable to fetch transcript."
print(f"Transcript: {transcript}")
ollama_client = OllamaClient(ollama_url, model)
show_info(f"Ollama client created with model: {model}")
show_warning("Starting summary generation, this might take a while...")
with st.spinner("Generating summary..."):
prompt = f"Summarize the following YouTube video transcript in a concise yet detailed manner:\n\n```{transcript}```\n\nSummary with introduction and conclusion formatted in markdown:"
summary = ollama_client.generate(prompt)
print(summary)
show_info("Summary generated successfully!")
with st.spinner("Fetching video info..."):
video_info = get_video_info(video_id)
st.success("Video info fetched successfully!")
return {
"title": video_info["title"],
"channel": video_info["channel"],
"transcript": transcript,
"summary": summary,
}
def fix_transcript(
video_url,
model,
ollama_url,
fallback_to_whisper=True,
force_whisper=False,
use_po_token=None,
):
video_id = None
# Get the video id from the url if it's a valid youtube or invidious or any other url that contains a video id
if "v=" in video_url:
video_id = video_url.split("v=")[-1]
# Support short urls as well
elif "youtu.be/" in video_url:
video_id = video_url.split("youtu.be/")[-1]
# Also cut out any part of the url after the video id
video_id = video_id.split("&")[0]
st.write(f"Video ID: {video_id}")
with st.spinner("Fetching transcript..."):
transcript = get_transcript(video_id)
show_info("Transcript fetched successfully!")
# Forcing whisper if specified
if force_whisper:
show_warning("Forcing whisper...")
fallback_to_whisper = True
transcript = None
if not transcript:
print("No transcript found, trying to download audio...")
if not fallback_to_whisper:
print("Fallback to whisper is disabled")
return "Unable to fetch transcript (and fallback to whisper is disabled)"
if not force_whisper:
show_warning("Unable to fetch transcript. Trying to download audio...")
try:
print("Downloading audio...")
download_audio(video_url, use_po_token=use_po_token)
show_info("Audio downloaded successfully!")
show_warning("Starting transcription...it might take a while...")
transcript = transcribe("downloads/output.m4a")
show_info("Transcription completed successfully!")
os.remove("downloads/output.m4a")
except Exception as e:
print(f"Error downloading audio or transcribing: {e}")
show_error(f"Error downloading audio or transcribing: {e}")
if os.path.exists("downloads/output.m4a"):
os.remove("downloads/output.m4a")
return "Unable to fetch transcript."
ollama_client = OllamaClient(ollama_url, model)
show_info(f"Ollama client created with model: {model}")
show_warning("Starting transcript enhancement...")
with st.spinner("Enhancing transcript..."):
prompt = f"""Fix the grammar and punctuation of the following transcript, maintaining the exact same content and meaning.
Only correct grammatical errors, add proper punctuation, and fix sentence structure where needed.
Do not rephrase or change the content:\n\n{transcript}"""
enhanced = ollama_client.generate(prompt)
show_info("Transcript enhanced successfully!")
with st.spinner("Fetching video info..."):
video_info = get_video_info(video_id)
st.success("Video info fetched successfully!")
return {
"title": video_info["title"],
"channel": video_info["channel"],
"transcript": transcript,
"enhanced": enhanced,
}
def main():
# Settings section
st.write("## AI Video Summarizer")
# Load CSS
load_css()
# Ollama Settings - single card
with st.container():
st.subheader("🎯 Ollama Settings")
default_ollama_url = os.getenv("OLLAMA_URL")
ollama_url = st.text_input(
"Ollama URL",
value=default_ollama_url,
placeholder="Enter Ollama URL",
)
if not ollama_url:
ollama_url = default_ollama_url
st.write("#### YouTube Video Companion")
available_models = get_ollama_models(ollama_url)
default_model = os.getenv("OLLAMA_MODEL")
if default_model not in available_models:
available_models.append(default_model)
# Ollama Settings section
#st.subheader("🎯 Ollama Settings")
selected_model = st.selectbox(
"Model",
options=available_models,
index=(
available_models.index(default_model)
if default_model in available_models
else 0
),
)
default_ollama_url = os.getenv("OLLAMA_URL")
ollama_url = st.text_input(
"Ollama URL",
value=default_ollama_url,
placeholder="Enter Ollama URL",
)
if not ollama_url:
ollama_url = default_ollama_url
# Video URL input section
with st.container():
# URL in its own row
video_url = st.text_input(
"🎥 Video URL",
placeholder="https://www.youtube.com/watch?v=...",
)
available_models = get_ollama_models(ollama_url)
default_model = os.getenv("OLLAMA_MODEL")
if default_model not in available_models:
available_models.append(default_model)
# Buttons in a separate row
col1, col2 = st.columns(2)
selected_model = st.selectbox(
"Model",
options=available_models,
index=(
available_models.index(default_model)
if default_model in available_models
else 0
),
)
with col1:
summarize_button = st.button("🚀 Summarize", use_container_width=True)
# Video URL and buttons section
video_url = st.text_input(
"🎥 Video URL",
placeholder="https://www.youtube.com/watch?v=...",
)
with col2:
read_button = st.button("📖 Read", use_container_width=True)
col1, col2 = st.columns(2)
with col1:
summarize_button = st.button("🚀 Summarize", use_container_width=True)
with col2:
read_button = st.button("📖 Read", use_container_width=True)
# Advanced settings in collapsible sections
# Advanced settings section
with st.expander("⚙️ Advanced Settings", expanded=False):
col1, col2 = st.columns(2)
with col1:
fallback_to_whisper = st.checkbox(
"Fallback to Whisper",
@ -461,14 +92,244 @@ def main():
value=False,
help="Always use Whisper for transcription",
)
with col2:
use_po_token = st.checkbox(
"Use PO Token",
value=get_po_token_setting(), # Default from environment
value=get_po_token_setting(),
help="Use PO token for YouTube authentication (helps bypass restrictions)",
)
# Initialize session state for messages if not exists
if "messages" not in st.session_state:
st.session_state.messages = []
# Initialize session state for rephrased transcript if not exists
if "rephrased_transcript" not in st.session_state:
st.session_state.rephrased_transcript = None
# Create a single header container
header = st.container()
def show_warning(message):
update_header("⚠️ " + message)
def show_error(message):
update_header("🚫 " + message)
def show_info(message):
update_header("> " + message)
def update_header(message):
with header:
st.markdown(
f"""
<div class='fixed-header'>
{message}
</div>
<style>
div.fixed-header {{
position: fixed;
top: 2.875rem;
left: 0;
right: 0;
z-index: 999;
padding: 10px;
margin: 0 1rem;
border-radius: 0.5rem;
border: 1px solid rgba(128, 128, 128, 0.2);
height: 45px !important;
background-color: rgba(40, 40, 40, 0.95);
backdrop-filter: blur(5px);
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
transition: all 0.3s ease;
display: flex;
align-items: center;
}}
</style>
""",
unsafe_allow_html=True,
)
# Initialize the header with a ready message
update_header("✅ Ready to summarize!")
# Add spacing after the fixed header
# st.markdown("<div style='margin-top: 120px;'></div>", unsafe_allow_html=True)
def get_transcript(video_id):
cache_dir = "transcript_cache"
cache_file = os.path.join(cache_dir, f"{video_id}.json")
# Create cache directory if it doesn't exist
os.makedirs(cache_dir, exist_ok=True)
# Check if transcript is cached
if os.path.exists(cache_file):
with open(cache_file, "r") as f:
return json.load(f)["transcript"]
try:
transcript = YouTubeTranscriptApi.get_transcript(video_id)
full_transcript = " ".join([entry["text"] for entry in transcript])
# Cache the transcript
with open(cache_file, "w") as f:
json.dump({"transcript": full_transcript}, f)
return full_transcript
except Exception as e:
print(f"Error fetching transcript: {e}")
return None
def summarize_video(
video_url,
model,
ollama_url,
fallback_to_whisper=True,
force_whisper=False,
use_po_token=None,
):
video_id = None
# Get the video id from the url if it's a valid youtube or invidious or any other url that contains a video id
if "v=" in video_url:
video_id = video_url.split("v=")[-1]
# Support short urls as well
elif "youtu.be/" in video_url:
video_id = video_url.split("youtu.be/")[-1]
# Also cut out any part of the url after the video id
video_id = video_id.split("&")[0]
st.write(f"Video ID: {video_id}")
with st.spinner("Fetching transcript..."):
transcript = get_transcript(video_id)
show_info("Summarizer fetched successfully!")
# Forcing whisper if specified
if force_whisper:
show_warning("Forcing whisper...")
fallback_to_whisper = True
transcript = None
if not transcript:
print("No transcript found, trying to download audio...")
if not fallback_to_whisper:
print("Fallback to whisper is disabled")
return (
"Unable to fetch transcript (and fallback to whisper is disabled)"
)
if not force_whisper:
show_warning("Unable to fetch transcript. Trying to download audio...")
try:
print("Downloading audio...")
download_audio(video_url, use_po_token=use_po_token)
show_info("Audio downloaded successfully!")
show_warning("Starting transcription...it might take a while...")
transcript = transcribe("downloads/output.m4a")
show_info("Transcription completed successfully!")
os.remove("downloads/output.m4a")
except Exception as e:
print(f"Error downloading audio or transcribing: {e}")
show_error(f"Error downloading audio or transcribing: {e}")
if os.path.exists("downloads/output.m4a"):
os.remove("downloads/output.m4a")
return "Unable to fetch transcript."
print(f"Transcript: {transcript}")
ollama_client = OllamaClient(ollama_url, model)
show_info(f"Ollama client created with model: {model}")
show_warning("Starting summary generation, this might take a while...")
with st.spinner("Generating summary..."):
prompt = f"Summarize the following YouTube video transcript in a concise yet detailed manner:\n\n```{transcript}```\n\nSummary with introduction and conclusion formatted in markdown:"
summary = ollama_client.generate(prompt)
print(summary)
show_info("Summary generated successfully!")
with st.spinner("Fetching video info..."):
video_info = get_video_info(video_id)
st.success("Video info fetched successfully!")
return {
"title": video_info["title"],
"channel": video_info["channel"],
"transcript": transcript,
"summary": summary,
}
def fix_transcript(
video_url,
model,
ollama_url,
fallback_to_whisper=True,
force_whisper=False,
use_po_token=None,
):
video_id = None
# Get the video id from the url if it's a valid youtube or invidious or any other url that contains a video id
if "v=" in video_url:
video_id = video_url.split("v=")[-1]
# Support short urls as well
elif "youtu.be/" in video_url:
video_id = video_url.split("youtu.be/")[-1]
# Also cut out any part of the url after the video id
video_id = video_id.split("&")[0]
st.write(f"Video ID: {video_id}")
with st.spinner("Fetching transcript..."):
transcript = get_transcript(video_id)
show_info("Transcript fetched successfully!")
# Forcing whisper if specified
if force_whisper:
show_warning("Forcing whisper...")
fallback_to_whisper = True
transcript = None
if not transcript:
print("No transcript found, trying to download audio...")
if not fallback_to_whisper:
print("Fallback to whisper is disabled")
return (
"Unable to fetch transcript (and fallback to whisper is disabled)"
)
if not force_whisper:
show_warning("Unable to fetch transcript. Trying to download audio...")
try:
print("Downloading audio...")
download_audio(video_url, use_po_token=use_po_token)
show_info("Audio downloaded successfully!")
show_warning("Starting transcription...it might take a while...")
transcript = transcribe("downloads/output.m4a")
show_info("Transcription completed successfully!")
os.remove("downloads/output.m4a")
except Exception as e:
print(f"Error downloading audio or transcribing: {e}")
show_error(f"Error downloading audio or transcribing: {e}")
if os.path.exists("downloads/output.m4a"):
os.remove("downloads/output.m4a")
return "Unable to fetch transcript."
ollama_client = OllamaClient(ollama_url, model)
show_info(f"Ollama client created with model: {model}")
show_warning("Starting transcript enhancement...")
with st.spinner("Enhancing transcript..."):
prompt = f"""Fix the grammar and punctuation of the following transcript, maintaining the exact same content and meaning.
Only correct grammatical errors, add proper punctuation, and fix sentence structure where needed.
Do not rephrase or change the content:\n\n{transcript}"""
enhanced = ollama_client.generate(prompt)
show_info("Transcript enhanced successfully!")
with st.spinner("Fetching video info..."):
video_info = get_video_info(video_id)
st.success("Video info fetched successfully!")
return {
"title": video_info["title"],
"channel": video_info["channel"],
"transcript": transcript,
"enhanced": enhanced,
}
if (summarize_button or read_button) and video_url:
if read_button:
# Enhance transcript (now called read)