diff --git a/StreamingCommunity/Api/Player/Helper/Vixcloud/util.py b/StreamingCommunity/Api/Player/Helper/Vixcloud/util.py
index 4c29ee1..cccf4b7 100644
--- a/StreamingCommunity/Api/Player/Helper/Vixcloud/util.py
+++ b/StreamingCommunity/Api/Player/Helper/Vixcloud/util.py
@@ -1,17 +1,22 @@
# 23.11.24
-import re
-import logging
from typing import Dict, Any, List, Union
class Episode:
def __init__(self, data: Dict[str, Any]):
- self.id: int = data.get('id', '')
- self.number: int = data.get('number', '')
- self.name: str = data.get('name', '')
- self.plot: str = data.get('plot', '')
- self.duration: int = data.get('duration', '')
+ self.images = None
+ self.data = data
+
+ self.id: int = data.get('id')
+ self.scws_id: int = data.get('scws_id')
+ self.number: int = data.get('number')
+ self.name: str = data.get('name')
+ self.plot: str = data.get('plot')
+ self.duration: int = data.get('duration')
+
+ def collect_image(self, SITE_NAME, domain):
+ self.image = f"https://cdn.{SITE_NAME}.{domain}/images/{self.data.get('images')[0]['filename']}"
def __str__(self):
return f"Episode(id={self.id}, number={self.number}, name='{self.name}', plot='{self.plot}', duration={self.duration} sec)"
@@ -20,7 +25,7 @@ class EpisodeManager:
def __init__(self):
self.episodes: List[Episode] = []
- def add_episode(self, episode_data: Dict[str, Any]):
+ def add(self, episode_data: Dict[str, Any]):
"""
Add a new episode to the manager.
@@ -29,8 +34,20 @@ class EpisodeManager:
"""
episode = Episode(episode_data)
self.episodes.append(episode)
+
+ def get(self, index: int) -> Episode:
+ """
+ Retrieve an episode by its index in the episodes list.
+
+ Parameters:
+ - index (int): The zero-based index of the episode to retrieve.
+
+ Returns:
+ Episode: The Episode object at the specified index.
+ """
+ return self.episodes[index]
- def get_length(self) -> int:
+ def length(self) -> int:
"""
Get the number of episodes in the manager.
@@ -54,61 +71,23 @@ class EpisodeManager:
class Season:
def __init__(self, season_data: Dict[str, Union[int, str, None]]):
+ self.images = {}
+ self.season_data = season_data
+
self.id: int = season_data.get('id')
+ self.scws_id: int = season_data.get('scws_id')
+ self.imdb_id: int = season_data.get('imdb_id')
self.number: int = season_data.get('number')
self.name: str = season_data.get('name')
+ self.slug: str = season_data.get('slug')
self.plot: str = season_data.get('plot')
- self.episodes_count: int = season_data.get('episodes_count')
-
- def __str__(self):
- return f"Season(id={self.id}, number={self.number}, name='{self.name}', plot='{self.plot}', episodes_count={self.episodes_count})"
-
-class SeasonManager:
- def __init__(self):
- self.seasons: List[Season] = []
-
- def add_season(self, season_data: Dict[str, Union[int, str, None]]):
- """
- Add a new season to the manager.
-
- Parameters:
- season_data (Dict[str, Union[int, str, None]]): A dictionary containing data for the new season.
- """
- season = Season(season_data)
- self.seasons.append(season)
-
- def get(self, index: int) -> Season:
- """
- Get a season item from the list by index.
-
- Parameters:
- index (int): The index of the seasons item to retrieve.
-
- Returns:
- Season: The media item at the specified index.
- """
- return self.media_list[index]
-
- def get_length(self) -> int:
- """
- Get the number of seasons in the manager.
-
- Returns:
- int: Number of seasons.
- """
- return len(self.seasons)
-
- def clear(self) -> None:
- """
- This method clears the seasons list.
-
- Parameters:
- self: The object instance.
- """
- self.seasons.clear()
-
- def __str__(self):
- return f"SeasonManager(num_seasons={len(self.seasons)})"
+ self.type: str = season_data.get('type')
+ self.seasons_count: int = season_data.get('seasons_count')
+ self.episodes: EpisodeManager = EpisodeManager()
+
+ def collect_images(self, SITE_NAME, domain):
+ for dict_image in self.season_data.get('images'):
+ self.images[dict_image.get('type')] = f"https://cdn.{SITE_NAME}.{domain}/images/{dict_image.get('filename')}"
class Stream:
diff --git a/StreamingCommunity/Api/Player/vixcloud.py b/StreamingCommunity/Api/Player/vixcloud.py
index 6bc4bd8..73e6da6 100644
--- a/StreamingCommunity/Api/Player/vixcloud.py
+++ b/StreamingCommunity/Api/Player/vixcloud.py
@@ -120,8 +120,7 @@ class VideoSource:
response.raise_for_status()
except Exception as e:
- print("\n")
- console.print(Panel("[red bold]Coming soon", title="Notification", title_align="left", border_style="yellow"))
+ logging.error(f"Failed to get vixcloud contente with error: {e}")
sys.exit(0)
# Parse response with BeautifulSoup to get content
@@ -169,6 +168,56 @@ class VideoSource:
# Construct the new URL with updated query parameters
return urlunparse(parsed_url._replace(query=query_string))
+ def get_mp4(self, url_to_download: str, scws_id: str) -> list:
+ """
+ Generate download links for the specified resolutions from StreamingCommunity.
+
+ Args:
+ url_to_download (str): URL of the video page.
+ scws_id (str): SCWS ID of the title.
+
+ Returns:
+ list: A list of video download URLs.
+ """
+ headers = {
+ 'referer': url_to_download,
+ 'user-agent': get_headers(),
+ }
+
+ # API request to get video details
+ video_api_url = f'https://{self.base_name}.{self.domain}/api/video/{scws_id}'
+ response = httpx.get(video_api_url, headers=headers)
+
+ if response.status_code == 200:
+ response_json = response.json()
+
+ video_tracks = response_json.get('video_tracks', [])
+ track = video_tracks[-1]
+ console.print(f"[cyan]Available resolutions: [red]{[str(track['quality']) for track in video_tracks]}")
+
+ # Request download link generation for each track
+ download_response = httpx.post(
+ url=f'https://{self.base_name}.{self.domain}/api/download/generate_link?scws_id={track["video_id"]}&rendition={track["quality"]}',
+ headers={
+ 'referer': url_to_download,
+ 'user-agent': get_headers(),
+ 'x-xsrf-token': config_manager.get("SITE", self.base_name)['extra']['x-xsrf-token']
+ },
+ cookies={
+ 'streamingcommunity_session': config_manager.get("SITE", self.base_name)['extra']['streamingcommunity_session']
+ }
+ )
+
+ if download_response.status_code == 200:
+ return {'url': download_response.text, 'quality': track["quality"]}
+
+ else:
+ logging.error(f"Failed to generate link for resolution {track['quality']} (HTTP {download_response.status_code}).")
+
+ else:
+ logging.error(f"Error fetching video API URL (HTTP {response.status_code}).")
+ return []
+
class VideoSourceAnime(VideoSource):
def __init__(self, site_name: str):
@@ -221,4 +270,4 @@ class VideoSourceAnime(VideoSource):
except Exception as e:
logging.error(f"Error fetching embed URL: {e}")
- return None
+ return None
\ No newline at end of file
diff --git a/StreamingCommunity/Api/Site/streamingcommunity/__init__.py b/StreamingCommunity/Api/Site/streamingcommunity/__init__.py
index fe9ee78..badc78e 100644
--- a/StreamingCommunity/Api/Site/streamingcommunity/__init__.py
+++ b/StreamingCommunity/Api/Site/streamingcommunity/__init__.py
@@ -19,6 +19,7 @@ _useFor = "film_serie"
_deprecate = False
_priority = 1
_engineDownload = "hls"
+from .costant import SITE_NAME
def search(string_to_search: str = None, get_onylDatabase: bool = False):
@@ -27,7 +28,7 @@ def search(string_to_search: str = None, get_onylDatabase: bool = False):
"""
if string_to_search is None:
- string_to_search = msg.ask("\n[purple]Insert word to search in all site").strip()
+ string_to_search = msg.ask(f"\n[purple]Insert word to search in [red]{SITE_NAME}").strip()
# Get site domain and version and get result of the search
site_version, domain = get_version_and_domain()
diff --git a/StreamingCommunity/Api/Site/streamingcommunity/film.py b/StreamingCommunity/Api/Site/streamingcommunity/film.py
index fd66d1f..a0a798c 100644
--- a/StreamingCommunity/Api/Site/streamingcommunity/film.py
+++ b/StreamingCommunity/Api/Site/streamingcommunity/film.py
@@ -52,13 +52,9 @@ def download_film(select_title: MediaItem):
mp4_path = os.path.join(ROOT_PATH, SITE_NAME, MOVIE_FOLDER, select_title.slug)
# Download the film using the m3u8 playlist, and output filename
- r_proc = HLS_Downloader(
+ HLS_Downloader(
m3u8_playlist=master_playlist,
output_filename=os.path.join(mp4_path, title_name)
).start()
- if r_proc != None:
- console.print("[green]Result: ")
- console.print(r_proc)
-
return os.path.join(mp4_path, title_name)
diff --git a/StreamingCommunity/Api/Site/streamingcommunity/series.py b/StreamingCommunity/Api/Site/streamingcommunity/series.py
index cd2f9a0..d330dbb 100644
--- a/StreamingCommunity/Api/Site/streamingcommunity/series.py
+++ b/StreamingCommunity/Api/Site/streamingcommunity/series.py
@@ -40,15 +40,16 @@ def download_video(tv_name: str, index_season_selected: int, index_episode_selec
"""
start_message()
+ tv_name = scrape_serie.season_manager.slug
# Get info about episode
- obj_episode = scrape_serie.obj_episode_manager.episodes[index_episode_selected - 1]
+ obj_episode = scrape_serie.episode_manager.get(index_episode_selected - 1)
console.print(f"[yellow]Download: [red]{index_season_selected}:{index_episode_selected} {obj_episode.name}")
print()
# Define filename and path for the downloaded video
mp4_name = f"{map_episode_title(tv_name, index_season_selected, index_episode_selected, obj_episode.name)}.mp4"
- mp4_path = os.path.join(ROOT_PATH, SITE_NAME, SERIES_FOLDER, tv_name, f"S{index_season_selected}")
+ mp4_path = os.path.join(ROOT_PATH, SITE_NAME, SERIES_FOLDER, tv_name, f"S{index_season_selected}")
# Retrieve scws and if available master playlist
video_source.get_iframe(obj_episode.id)
@@ -56,14 +57,10 @@ def download_video(tv_name: str, index_season_selected: int, index_episode_selec
master_playlist = video_source.get_playlist()
# Download the episode
- r_proc = HLS_Downloader(
+ HLS_Downloader(
m3u8_playlist=master_playlist,
output_filename=os.path.join(mp4_path, mp4_name)
).start()
-
- if r_proc != None:
- console.print("[green]Result: ")
- console.print(r_proc)
return os.path.join(mp4_path, mp4_name)
@@ -78,13 +75,12 @@ def download_episode(tv_name: str, index_season_selected: int, scrape_serie: Scr
"""
# Clean memory of all episodes and get the number of the season
- scrape_serie.obj_episode_manager.clear()
- season_number = scrape_serie.obj_season_manager.seasons[index_season_selected - 1].number
+ scrape_serie.episode_manager.clear()
# Start message and collect information about episodes
start_message()
- scrape_serie.collect_title_season(season_number)
- episodes_count = scrape_serie.obj_episode_manager.get_length()
+ scrape_serie.collect_info_season(index_season_selected)
+ episodes_count = scrape_serie.episode_manager.length()
if download_all:
@@ -131,8 +127,8 @@ def download_series(select_season: MediaItem, version: str) -> None:
video_source.setup(select_season.id)
# Collect information about seasons
- scrape_serie.collect_info_seasons()
- seasons_count = scrape_serie.obj_season_manager.get_length()
+ scrape_serie.collect_info_title()
+ seasons_count = scrape_serie.season_manager.seasons_count
# Prompt user for season selection and download episodes
console.print(f"\n[green]Seasons found: [red]{seasons_count}")
@@ -182,7 +178,7 @@ def display_episodes_list(scrape_serie) -> str:
table_show_manager.add_column(column_info)
# Populate the table with episodes information
- for i, media in enumerate(scrape_serie.obj_episode_manager.episodes):
+ for i, media in enumerate(scrape_serie.episode_manager.episodes):
table_show_manager.add_tv_show({
'Index': str(media.number),
'Name': media.name,
diff --git a/StreamingCommunity/Api/Site/streamingcommunity/site.py b/StreamingCommunity/Api/Site/streamingcommunity/site.py
index 993c1e6..cd73bd1 100644
--- a/StreamingCommunity/Api/Site/streamingcommunity/site.py
+++ b/StreamingCommunity/Api/Site/streamingcommunity/site.py
@@ -3,6 +3,7 @@
import sys
import json
import logging
+import secrets
# External libraries
@@ -31,7 +32,7 @@ from .costant import SITE_NAME
# Variable
media_search_manager = MediaManager()
table_show_manager = TVShowManager()
-
+max_timeout = config_manager.get_int("REQUESTS", "timeout")
def get_version(text: str):
@@ -52,7 +53,7 @@ def get_version(text: str):
# Extract version
version = json.loads(soup.find("div", {"id": "app"}).get("data-page"))['version']
- #console.print(f"[cyan]Get version [white]=> [red]{version} \n")
+ console.print(f"[cyan]Get version [white]=> [red]{version} \n")
return version
@@ -74,7 +75,17 @@ def get_version_and_domain():
domain_to_use, base_url = search_domain(SITE_NAME, f"https://{SITE_NAME}")
# Extract version from the response
- version = get_version(httpx.get(base_url, headers={'user-agent': get_headers()}).text)
+ try:
+ version = get_version(httpx.get(
+ url=base_url,
+ headers={
+ 'user-agent': get_headers()
+ },
+ timeout=max_timeout
+ ).text)
+ except:
+ console.print("[green]Auto generate version ...")
+ version = secrets.token_hex(32 // 2)
return version, domain_to_use
@@ -90,10 +101,6 @@ def title_search(title_search: str, domain: str) -> int:
Returns:
int: The number of titles found.
"""
-
- max_timeout = config_manager.get_int("REQUESTS", "timeout")
-
- # Send request to search for titles ( replace à to a and space to "+" )
try:
response = httpx.get(
url=f"https://{SITE_NAME}.{domain}/api/search?q={title_search.replace(' ', '+')}",
@@ -112,6 +119,7 @@ def title_search(title_search: str, domain: str) -> int:
'slug': dict_title.get('slug'),
'name': dict_title.get('name'),
'type': dict_title.get('type'),
+ 'date': dict_title.get('last_air_date'),
'score': dict_title.get('score')
})
diff --git a/StreamingCommunity/Api/Site/streamingcommunity/util/ScrapeSerie.py b/StreamingCommunity/Api/Site/streamingcommunity/util/ScrapeSerie.py
index 6b00a93..3dc2fd5 100644
--- a/StreamingCommunity/Api/Site/streamingcommunity/util/ScrapeSerie.py
+++ b/StreamingCommunity/Api/Site/streamingcommunity/util/ScrapeSerie.py
@@ -10,7 +10,7 @@ import httpx
# Internal utilities
from StreamingCommunity.Util.headers import get_headers
from StreamingCommunity.Util._jsonConfig import config_manager
-from StreamingCommunity.Api.Player.Helper.Vixcloud.util import SeasonManager, EpisodeManager
+from StreamingCommunity.Api.Player.Helper.Vixcloud.util import Season, EpisodeManager
# Variable
@@ -26,7 +26,7 @@ class ScrapeSerie:
site_name (str): Name of the streaming site to scrape from
"""
self.is_series = False
- self.headers = {}
+ self.headers = {'user-agent': get_headers()}
self.base_name = site_name
self.domain = config_manager.get_dict('SITE', self.base_name)['domain']
@@ -46,23 +46,22 @@ class ScrapeSerie:
if series_name is not None:
self.is_series = True
self.series_name = series_name
- self.obj_season_manager: SeasonManager = SeasonManager()
- self.obj_episode_manager: EpisodeManager = EpisodeManager()
-
- # Create headers
- self.headers = {
- 'user-agent': get_headers(),
- 'x-inertia': 'true',
- 'x-inertia-version': self.version,
- }
+ self.season_manager = None
+ self.episode_manager: EpisodeManager = EpisodeManager()
- def collect_info_seasons(self) -> None:
+ def collect_info_title(self) -> None:
"""
Retrieve season information for a TV series from the streaming site.
Raises:
Exception: If there's an error fetching season information
"""
+ self.headers = {
+ 'user-agent': get_headers(),
+ 'x-inertia': 'true',
+ 'x-inertia-version': self.version,
+ }
+
try:
response = httpx.get(
@@ -73,17 +72,22 @@ class ScrapeSerie:
response.raise_for_status()
# Extract seasons from JSON response
- json_response = response.json().get('props', {}).get('title', {}).get('seasons', [])
-
- # Add each season to the season manager
- for dict_season in json_response:
- self.obj_season_manager.add_season(dict_season)
+ json_response = response.json().get('props')
+ # Collect info about season
+ self.season_manager = Season(json_response.get('title'))
+ self.season_manager.collect_images(self.base_name, self.domain)
+
+ # Collect first episode info
+ for i, ep in enumerate(json_response.get('loadedSeason').get('episodes')):
+ self.season_manager.episodes.add(ep)
+ self.season_manager.episodes.get(i).collect_image(self.base_name, self.domain)
+
except Exception as e:
logging.error(f"Error collecting season info: {e}")
raise
- def collect_title_season(self, number_season: int) -> None:
+ def collect_info_season(self, number_season: int) -> None:
"""
Retrieve episode information for a specific season.
@@ -93,6 +97,12 @@ class ScrapeSerie:
Raises:
Exception: If there's an error fetching episode information
"""
+ self.headers = {
+ 'user-agent': get_headers(),
+ 'x-inertia': 'true',
+ 'x-inertia-version': self.version,
+ }
+
try:
response = httpx.get(
url=f'https://{self.base_name}.{self.domain}/titles/{self.media_id}-{self.series_name}/stagione-{number_season}',
@@ -102,11 +112,11 @@ class ScrapeSerie:
response.raise_for_status()
# Extract episodes from JSON response
- json_response = response.json().get('props', {}).get('loadedSeason', {}).get('episodes', [])
+ json_response = response.json().get('props').get('loadedSeason').get('episodes')
# Add each episode to the episode manager
for dict_episode in json_response:
- self.obj_episode_manager.add_episode(dict_episode)
+ self.episode_manager.add(dict_episode)
except Exception as e:
logging.error(f"Error collecting title season info: {e}")
diff --git a/StreamingCommunity/Api/Template/Util/get_domain.py b/StreamingCommunity/Api/Template/Util/get_domain.py
index 26f920c..5e145e2 100644
--- a/StreamingCommunity/Api/Template/Util/get_domain.py
+++ b/StreamingCommunity/Api/Template/Util/get_domain.py
@@ -49,7 +49,16 @@ def get_final_redirect_url(initial_url, max_timeout):
# Create a client with redirects enabled
try:
- with httpx.Client(follow_redirects=True, timeout=max_timeout, headers={'user-agent': get_headers()}) as client:
+ with httpx.Client(
+ headers={
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
+ 'accept-language': 'it-IT,it;q=0.9,en-US;q=0.8,en;q=0.7',
+ 'User-Agent': get_headers()
+ },
+ follow_redirects=True,
+ timeout=max_timeout
+
+ ) as client:
response = client.get(initial_url)
response.raise_for_status()
@@ -59,7 +68,7 @@ def get_final_redirect_url(initial_url, max_timeout):
return final_url
except Exception as e:
- console.print(f"[cyan]Test url[white]: [red]{initial_url}, [cyan]error[white]: [red]{e}")
+ console.print(f"\n[cyan]Test url[white]: [red]{initial_url}, [cyan]error[white]: [red]{e}")
return None
def search_domain(site_name: str, base_url: str):
@@ -69,7 +78,6 @@ def search_domain(site_name: str, base_url: str):
Parameters:
- site_name (str): The name of the site to search the domain for.
- base_url (str): The base URL to construct complete URLs.
- - follow_redirects (bool): To follow redirect url or not.
Returns:
tuple: The found domain and the complete URL.
@@ -80,47 +88,67 @@ def search_domain(site_name: str, base_url: str):
domain = str(config_manager.get_dict("SITE", site_name)['domain'])
try:
-
# Test the current domain
- response_follow = httpx.get(f"{base_url}.{domain}", headers={'user-agent': get_headers()}, timeout=max_timeout, follow_redirects=True)
- response_follow.raise_for_status()
+ with httpx.Client(
+ headers={
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
+ 'accept-language': 'it-IT,it;q=0.9,en-US;q=0.8,en;q=0.7',
+ 'User-Agent': get_headers()
+ },
+ follow_redirects=True,
+ timeout=max_timeout
+
+ ) as client:
+ response_follow = client.get(f"{base_url}.{domain}")
+ response_follow.raise_for_status()
except Exception as e:
-
query = base_url.split("/")[-1]
- first_url = google_search(query)
- console.print(f"[green]First url from google seach[white]: [red]{first_url}")
+
+ # Perform a Google search with multiple results
+ search_results = list(search(query, num_results=5))
+ #console.print(f"[green]Google search results[white]: {search_results}")
- if first_url:
- final_url = get_final_redirect_url(first_url, max_timeout)
-
- if final_url != None:
- console.print(f"\n[bold yellow]Suggestion:[/bold yellow] [white](Experimental)\n"
- f"[cyan]New final URL[white]: [green]{final_url}")
-
- def extract_domain(url):
- parsed_url = urlparse(url)
- domain = parsed_url.netloc
- return domain.split(".")[-1]
-
- new_domain_extract = extract_domain(str(final_url))
-
- if msg.ask(f"[red]Do you want to auto update config.json - '[green]{site_name}[red]' with domain: [green]{new_domain_extract}", choices=["y", "n"], default="y").lower() == "y":
-
- # Update domain in config.json
- config_manager.config['SITE'][site_name]['domain'] = new_domain_extract
- config_manager.write_config()
-
- # Return config domain
- #console.print(f"[cyan]Return domain: [red]{new_domain_extract} \n")
- return new_domain_extract, f"{base_url}.{new_domain_extract}"
+ # Iterate through search results
+ for first_url in search_results:
+ console.print(f"[green]Checking url[white]: [red]{first_url}")
- else:
- console.print("[bold red]\nManually change the domain in the JSON file.[/bold red]")
- raise
+ # Check if the base URL matches the Google search result
+ parsed_first_url = urlparse(first_url)
- else:
- console.print("[bold red]No valid URL to follow redirects.[/bold red]")
+ # Compare base url from google search with base url from config.json
+ if parsed_first_url.netloc.split(".")[0] == base_url:
+ console.print(f"[red]URL does not match base URL. Skipping.[/red]")
+ continue
+
+ try:
+ final_url = get_final_redirect_url(first_url, max_timeout)
+
+ if final_url is not None:
+
+ def extract_domain(url):
+ parsed_url = urlparse(url)
+ domain = parsed_url.netloc
+ return domain.split(".")[-1]
+
+ new_domain_extract = extract_domain(str(final_url))
+
+ if msg.ask(f"[cyan]\nDo you want to auto site[white]: [red]{site_name}[cyan] with domain[white]: [red]{new_domain_extract}", choices=["y", "n"], default="y").lower() == "y":
+
+ # Update domain in config.json
+ config_manager.config['SITE'][site_name]['domain'] = new_domain_extract
+ config_manager.write_config()
+
+ # Return config domain
+ return new_domain_extract, f"{base_url}.{new_domain_extract}"
+
+ except Exception as redirect_error:
+ console.print(f"[red]Error following redirect for {first_url}: {redirect_error}")
+ continue
+
+ # If no matching URL is found
+ console.print("[bold red]No valid URL found matching the base URL.[/bold red]")
+ raise Exception("No matching domain found")
# Ensure the URL is in string format before parsing
parsed_url = urlparse(str(response_follow.url))
@@ -128,10 +156,9 @@ def search_domain(site_name: str, base_url: str):
tld = parse_domain.split('.')[-1]
if tld is not None:
-
# Update domain in config.json
config_manager.config['SITE'][site_name]['domain'] = tld
config_manager.write_config()
# Return config domain
- return tld, f"{base_url}.{tld}"
+ return tld, f"{base_url}.{tld}"
\ No newline at end of file
diff --git a/StreamingCommunity/Api/Template/Util/manage_ep.py b/StreamingCommunity/Api/Template/Util/manage_ep.py
index 5fc3fb3..b138b04 100644
--- a/StreamingCommunity/Api/Template/Util/manage_ep.py
+++ b/StreamingCommunity/Api/Template/Util/manage_ep.py
@@ -117,16 +117,29 @@ def validate_selection(list_season_select: List[int], seasons_count: int) -> Lis
Returns:
- List[int]: Adjusted list of valid season numbers.
"""
+ while True:
+ try:
+
+ # Remove any seasons greater than the available seasons
+ valid_seasons = [season for season in list_season_select if 1 <= season <= seasons_count]
- # Remove any seasons greater than the available seasons
- valid_seasons = [season for season in list_season_select if 1 <= season <= seasons_count]
+ # If the list is empty, the input was completely invalid
+ if not valid_seasons:
+ logging.error(f"Invalid selection: The selected seasons are outside the available range (1-{seasons_count}). Please try again.")
- # If the list is empty, the input was completely invalid
- if not valid_seasons:
- print()
- raise ValueError(f"Invalid selection: The selected seasons are outside the available range (1-{seasons_count}).")
+ # Re-prompt for valid input
+ input_seasons = input(f"Enter valid season numbers (1-{seasons_count}): ")
+ list_season_select = list(map(int, input_seasons.split(',')))
+ continue # Re-prompt the user if the selection is invalid
+
+ return valid_seasons # Return the valid seasons if the input is correct
+
+ except ValueError:
+ logging.error("Error: Please enter valid integers separated by commas.")
- return valid_seasons
+ # Prompt the user for valid input again
+ input_seasons = input(f"Enter valid season numbers (1-{seasons_count}): ")
+ list_season_select = list(map(int, input_seasons.split(',')))
# --> for episode
@@ -141,13 +154,26 @@ def validate_episode_selection(list_episode_select: List[int], episodes_count: i
Returns:
- List[int]: Adjusted list of valid episode numbers.
"""
+ while True:
+ try:
- # Remove any episodes greater than the available episodes
- valid_episodes = [episode for episode in list_episode_select if 1 <= episode <= episodes_count]
+ # Remove any episodes greater than the available episodes
+ valid_episodes = [episode for episode in list_episode_select if 1 <= episode <= episodes_count]
- # If the list is empty, the input was completely invalid
- if not valid_episodes:
- print()
- raise ValueError(f"Invalid selection: The selected episodes are outside the available range (1-{episodes_count}).")
+ # If the list is empty, the input was completely invalid
+ if not valid_episodes:
+ logging.error(f"Invalid selection: The selected episodes are outside the available range (1-{episodes_count}). Please try again.")
- return valid_episodes
+ # Re-prompt for valid input
+ input_episodes = input(f"Enter valid episode numbers (1-{episodes_count}): ")
+ list_episode_select = list(map(int, input_episodes.split(',')))
+ continue # Re-prompt the user if the selection is invalid
+
+ return valid_episodes
+
+ except ValueError:
+ logging.error("Error: Please enter valid integers separated by commas.")
+
+ # Prompt the user for valid input again
+ input_episodes = input(f"Enter valid episode numbers (1-{episodes_count}): ")
+ list_episode_select = list(map(int, input_episodes.split(',')))
\ No newline at end of file
diff --git a/StreamingCommunity/Lib/Downloader/HLS/segments.py b/StreamingCommunity/Lib/Downloader/HLS/segments.py
index 338f4dc..d04e6b4 100644
--- a/StreamingCommunity/Lib/Downloader/HLS/segments.py
+++ b/StreamingCommunity/Lib/Downloader/HLS/segments.py
@@ -229,11 +229,6 @@ class M3U8_Segments:
self.download_interrupted = True
self.stop_event.set()
- if threading.current_thread() is threading.main_thread():
- signal.signal(signal.SIGINT, interrupt_handler)
- else:
- print("Signal handler must be set in the main thread")
-
def make_requests_stream(self, ts_url: str, index: int, progress_bar: tqdm, backoff_factor: float = 1.5) -> None:
"""
Downloads a TS segment and adds it to the segment queue with retry logic.
@@ -548,7 +543,7 @@ class M3U8_Segments:
file_size = os.path.getsize(self.tmp_file_path)
if file_size == 0:
raise Exception("Output file is empty")
-
+
# Get expected time
ex_hours, ex_minutes, ex_seconds = format_duration(self.expected_real_time_s)
ex_formatted_duration = f"[yellow]{int(ex_hours)}[red]h [yellow]{int(ex_minutes)}[red]m [yellow]{int(ex_seconds)}[red]s"
diff --git a/StreamingCommunity/Lib/Driver/driver_1.py b/StreamingCommunity/Lib/Driver/driver_1.py
new file mode 100644
index 0000000..5a17a24
--- /dev/null
+++ b/StreamingCommunity/Lib/Driver/driver_1.py
@@ -0,0 +1,76 @@
+# 29.06.24
+
+import tempfile
+import logging
+
+
+# External library
+from bs4 import BeautifulSoup
+from seleniumbase import Driver
+
+
+# Internal utilities
+from StreamingCommunity.Util._jsonConfig import config_manager
+
+
+# Config
+USE_HEADLESS = config_manager.get_bool("BROWSER", "headless")
+
+
+class WebAutomation:
+ """
+ A class for automating web interactions using SeleniumBase and BeautifulSoup.
+ """
+
+ def __init__(self):
+ """
+ Initializes the WebAutomation instance with SeleniumBase Driver.
+
+ Parameters:
+ headless (bool, optional): Whether to run the browser in headless mode. Default is True.
+ """
+ logging.getLogger('seleniumbase').setLevel(logging.ERROR)
+
+ self.driver = Driver(
+ uc=True,
+ uc_cdp_events=True,
+ headless=USE_HEADLESS,
+ user_data_dir = tempfile.mkdtemp(),
+ chromium_arg="--disable-search-engine-choice-screen"
+ )
+
+ def quit(self):
+ """
+ Quits the WebDriver instance.
+ """
+ self.driver.quit()
+
+ def get_page(self, url):
+ """
+ Navigates the browser to the specified URL.
+
+ Parameters:
+ url (str): The URL to navigate to.
+ """
+ self.driver.get(url)
+
+ def retrieve_soup(self):
+ """
+ Retrieves the BeautifulSoup object for the current page's HTML content.
+
+ Returns:
+ BeautifulSoup object: Parsed HTML content of the current page.
+ """
+ html_content = self.driver.page_source
+ soup = BeautifulSoup(html_content, 'html.parser')
+ return soup
+
+ def get_content(self):
+ """
+ Returns the HTML content of the current page.
+
+ Returns:
+ str: The HTML content of the current page.
+ """
+ return self.driver.page_source
+
diff --git a/StreamingCommunity/Lib/FFmpeg/util.py b/StreamingCommunity/Lib/FFmpeg/util.py
index 7c7828e..c8512ab 100644
--- a/StreamingCommunity/Lib/FFmpeg/util.py
+++ b/StreamingCommunity/Lib/FFmpeg/util.py
@@ -55,7 +55,6 @@ def get_video_duration(file_path: str) -> float:
Returns:
(float): The duration of the video in seconds if successful, None if there's an error.
"""
-
try:
ffprobe_cmd = [FFPROB_PATH, '-v', 'error', '-show_format', '-print_format', 'json', file_path]
logging.info(f"FFmpeg command: {ffprobe_cmd}")
diff --git a/StreamingCommunity/Util/table.py b/StreamingCommunity/Util/table.py
index f84442e..bfc6553 100644
--- a/StreamingCommunity/Util/table.py
+++ b/StreamingCommunity/Util/table.py
@@ -1,5 +1,12 @@
# 03.03.24
+import os
+import sys
+import logging
+import importlib
+
+
+# External library
from rich.console import Console
from rich.table import Table
from rich.prompt import Prompt
@@ -9,15 +16,13 @@ from typing import Dict, List, Any
# Internal utilities
from .message import start_message
+from .call_stack import get_call_stack
class TVShowManager:
def __init__(self):
"""
Initialize TVShowManager with provided column information.
-
- Parameters:
- - column_info (Dict[str, Dict[str, str]]): Dictionary containing column names, their colors, and justification.
"""
self.console = Console()
self.tv_shows: List[Dict[str, Any]] = [] # List to store TV show data as dictionaries
@@ -80,7 +85,6 @@ class TVShowManager:
self.console.print(table) # Use self.console.print instead of print
-
def run(self, force_int_input: bool = False, max_int_input: int = 0) -> str:
"""
Run the TV show manager application.
@@ -101,9 +105,16 @@ class TVShowManager:
# Display table
self.display_data(self.tv_shows[self.slice_start:self.slice_end])
+ # Find research function from call stack
+ research_func = None
+ for reverse_fun in get_call_stack():
+ if reverse_fun['function'] == 'search' and reverse_fun['script'] == '__init__.py':
+ research_func = reverse_fun
+ logging.info(f"Found research_func: {research_func}")
+
# Handling user input for loading more items or quitting
if self.slice_end < total_items:
- self.console.print(f"\n\n[yellow][INFO] [green]Press [red]Enter [green]for next page, or [red]'q' [green]to quit.")
+ self.console.print(f"\n\n[yellow][INFO] [green]Press [red]Enter [green]for next page, [red]'q' [green]to quit, or [red]'back' [green]to search.")
if not force_int_input:
key = Prompt.ask(
@@ -113,7 +124,7 @@ class TVShowManager:
else:
choices = [str(i) for i in range(0, max_int_input)]
- choices.extend(["q", ""])
+ choices.extend(["q", "", "back"])
key = Prompt.ask("[cyan]Insert media [red]index", choices=choices, show_choices=False)
last_command = key
@@ -127,22 +138,62 @@ class TVShowManager:
if self.slice_end > total_items:
self.slice_end = total_items
+ elif key.lower() == "back" and research_func:
+ try:
+ # Find the project root directory
+ current_path = research_func['folder']
+ while not os.path.exists(os.path.join(current_path, 'StreamingCommunity')):
+ current_path = os.path.dirname(current_path)
+
+ # Add project root to Python path
+ project_root = current_path
+ #print(f"[DEBUG] Project Root: {project_root}")
+
+ if project_root not in sys.path:
+ sys.path.insert(0, project_root)
+
+ # Import using full absolute import
+ module_path = 'StreamingCommunity.Api.Site.streamingcommunity'
+ #print(f"[DEBUG] Importing module: {module_path}")
+
+ # Import the module
+ module = importlib.import_module(module_path)
+
+ # Get the search function
+ search_func = getattr(module, 'media_search_manager')
+
+ # Ask for search string
+ string_to_search = Prompt.ask(f"\n[purple]Insert word to search in [red]{research_func['folder_base']}").strip()
+
+ # Call the search function with the search string
+ search_func(string_to_search)
+
+ except Exception as e:
+ self.console.print(f"[red]Error during search: {e}")
+
+ # Print detailed traceback
+ import traceback
+ traceback.print_exc()
+
+ # Optionally remove the path if you want to clean up
+ if project_root in sys.path:
+ sys.path.remove(project_root)
+
else:
break
else:
# Last slice, ensure all remaining items are shown
- self.console.print(f"\n\n[yellow][INFO] [red]You've reached the end. [green]Press [red]Enter [green]for next page, or [red]'q' [green]to quit.")
+ self.console.print(f"\n\n[yellow][INFO] [green]You've reached the end. [red]Enter [green]for first page, [red]'q' [green]to quit, or [red]'back' [green]to search.")
if not force_int_input:
key = Prompt.ask(
"\n[cyan]Insert media index [yellow](e.g., 1), [red]* [cyan]to download all media, "
"[yellow](e.g., 1-2) [cyan]for a range of media, or [yellow](e.g., 3-*) [cyan]to download from a specific index to the end"
)
-
else:
choices = [str(i) for i in range(0, max_int_input)]
- choices.extend(["q", ""])
+ choices.extend(["q", "", "back"])
key = Prompt.ask("[cyan]Insert media [red]index", choices=choices, show_choices=False)
last_command = key
@@ -154,10 +205,51 @@ class TVShowManager:
self.slice_start = 0
self.slice_end = self.step
+ elif key.lower() == "back" and research_func:
+ try:
+ # Find the project root directory
+ current_path = research_func['folder']
+ while not os.path.exists(os.path.join(current_path, 'StreamingCommunity')):
+ current_path = os.path.dirname(current_path)
+
+ # Add project root to Python path
+ project_root = current_path
+ #print(f"[DEBUG] Project Root: {project_root}")
+
+ if project_root not in sys.path:
+ sys.path.insert(0, project_root)
+
+ # Import using full absolute import
+ module_path = 'StreamingCommunity.Api.Site.streamingcommunity'
+ #print(f"[DEBUG] Importing module: {module_path}")
+
+ # Import the module
+ module = importlib.import_module(module_path)
+
+ # Get the search function
+ search_func = getattr(module, 'media_search_manager')
+
+ # Ask for search string
+ string_to_search = Prompt.ask(f"\n[purple]Insert word to search in [red]{research_func['folder_base']}").strip()
+
+ # Call the search function with the search string
+ search_func(string_to_search)
+
+ except Exception as e:
+ self.console.print(f"[red]Error during search: {e}")
+
+ # Print detailed traceback
+ import traceback
+ traceback.print_exc()
+
+ # Optionally remove the path if you want to clean up
+ if project_root in sys.path:
+ sys.path.remove(project_root)
+
else:
break
return last_command
def clear(self):
- self.tv_shows = []
+ self.tv_shows = []
\ No newline at end of file
diff --git a/client/dashboard/src/components/Dashboard.js b/client/dashboard/src/components/Dashboard.js
index cb0fa55..2b7e9fc 100644
--- a/client/dashboard/src/components/Dashboard.js
+++ b/client/dashboard/src/components/Dashboard.js
@@ -3,8 +3,7 @@ import axios from 'axios';
import { Container, Button, Form, InputGroup } from 'react-bootstrap';
import SearchBar from './SearchBar.js';
-
-const API_BASE_URL = "http://127.0.0.1:1234";
+import { API_URL } from './ApiUrl.js';
const Dashboard = () => {
const [items, setItems] = useState([]);
@@ -15,7 +14,7 @@ const Dashboard = () => {
const fetchItems = async (filter = '') => {
try {
- const response = await axios.get(`${API_BASE_URL}/api/items?filter=${filter}`);
+ const response = await axios.get(`${API_URL}/items?filter=${filter}`);
setItems(response.data);
} catch (error) {
console.error("Error fetching items:", error);
diff --git a/client/dashboard/src/components/Downloads.js b/client/dashboard/src/components/Downloads.js
index 54d98bc..bd9716e 100644
--- a/client/dashboard/src/components/Downloads.js
+++ b/client/dashboard/src/components/Downloads.js
@@ -4,7 +4,7 @@ import { Container, Row, Col, Card, Button, Badge, Modal } from 'react-bootstrap
import { FaTrash, FaPlay } from 'react-icons/fa';
import { Link } from 'react-router-dom';
-const API_BASE_URL = "http://127.0.0.1:1234";
+import { SERVER_PATH_URL, SERVER_DELETE_URL, API_URL } from './ApiUrl';
const Downloads = () => {
const [downloads, setDownloads] = useState([]);
@@ -15,7 +15,7 @@ const Downloads = () => {
// Fetch all downloads
const fetchDownloads = async () => {
try {
- const response = await axios.get(`${API_BASE_URL}/downloads`);
+ const response = await axios.get(`${SERVER_PATH_URL}/get`);
setDownloads(response.data);
setLoading(false);
} catch (error) {
@@ -27,7 +27,7 @@ const Downloads = () => {
// Delete a TV episode
const handleDeleteEpisode = async (id, season, episode) => {
try {
- await axios.delete(`${API_BASE_URL}/deleteEpisode`, {
+ await axios.delete(`${SERVER_DELETE_URL}/episode`, {
params: { id, season, episode }
});
fetchDownloads(); // Refresh the list
@@ -39,7 +39,7 @@ const Downloads = () => {
// Delete a movie
const handleDeleteMovie = async (id) => {
try {
- await axios.delete(`${API_BASE_URL}/deleteMovie`, {
+ await axios.delete(`${SERVER_DELETE_URL}/movie`, {
params: { id }
});
fetchDownloads(); // Refresh the list
@@ -50,13 +50,16 @@ const Downloads = () => {
// Watch video
const handleWatchVideo = (videoPath) => {
+ console.log("Video path received:", videoPath); // Controlla il valore di videoPath
setCurrentVideo(videoPath);
setShowPlayer(true);
};
+
// Initial fetch of downloads
useEffect(() => {
fetchDownloads();
+ console.log("Downloads fetched:", downloads);
}, []);
if (loading) {
@@ -107,7 +110,7 @@ const Downloads = () => {
@@ -180,12 +183,12 @@ const Downloads = () => {
{/* Modal Video Player */}
setShowPlayer(false)} size="lg" centered>
-
+
diff --git a/client/dashboard/src/components/SearchBar.js b/client/dashboard/src/components/SearchBar.js
index ef414a8..94da62a 100644
--- a/client/dashboard/src/components/SearchBar.js
+++ b/client/dashboard/src/components/SearchBar.js
@@ -1,5 +1,5 @@
import React, { useState } from 'react';
-import PropTypes from 'prop-types'; // Add this import
+import PropTypes from 'prop-types';
import { useNavigate } from 'react-router-dom';
import { Form, InputGroup, Button } from 'react-bootstrap';
import { FaSearch } from 'react-icons/fa';
@@ -38,11 +38,8 @@ const SearchBar = ({ onSearch }) => {
);
};
-// Add PropTypes validation
SearchBar.propTypes = {
- onSearch: PropTypes.func // If onSearch is optional
- // or
- // onSearch: PropTypes.func.isRequired // If onSearch is required
+ onSearch: PropTypes.func
};
export default SearchBar;
\ No newline at end of file
diff --git a/client/dashboard/src/components/SearchResult.js b/client/dashboard/src/components/SearchResult.js
index 9fb8642..0c64a77 100644
--- a/client/dashboard/src/components/SearchResult.js
+++ b/client/dashboard/src/components/SearchResult.js
@@ -4,8 +4,7 @@ import axios from 'axios';
import { Container, Row, Col, Card, Spinner } from 'react-bootstrap';
import SearchBar from './SearchBar.js';
-
-const API_BASE_URL = "http://127.0.0.1:1234";
+import { API_URL } from './ApiUrl.js';
const SearchResults = () => {
const [results, setResults] = useState([]);
@@ -20,7 +19,7 @@ const SearchResults = () => {
const fetchSearchResults = async () => {
try {
setLoading(true);
- const response = await axios.get(`${API_BASE_URL}/api/search`, {
+ const response = await axios.get(`${API_URL}/search`, {
params: { q: query }
});
setResults(response.data);
diff --git a/client/dashboard/src/components/TitleDetail.js b/client/dashboard/src/components/TitleDetail.js
index 8dd814c..91ca59a 100644
--- a/client/dashboard/src/components/TitleDetail.js
+++ b/client/dashboard/src/components/TitleDetail.js
@@ -6,7 +6,7 @@ import { FaDownload, FaPlay, FaPlus, FaTrash } from 'react-icons/fa';
import SearchBar from './SearchBar.js';
-const API_BASE_URL = "http://127.0.0.1:1234";
+import { API_URL, SERVER_WATCHLIST_URL, SERVER_PATH_URL } from './ApiUrl.js';
const TitleDetail = () => {
const [titleDetails, setTitleDetails] = useState(null);
@@ -27,7 +27,7 @@ const TitleDetail = () => {
const titleUrl = location.state?.url || location.pathname.split('/title/')[1];
// Fetch title information
- const response = await axios.get(`${API_BASE_URL}/api/getInfo`, {
+ const response = await axios.get(`${API_URL}/getInfo`, {
params: { url: titleUrl }
});
@@ -59,7 +59,7 @@ const TitleDetail = () => {
const checkDownloadStatus = async (titleData) => {
try {
if (titleData.type === 'movie') {
- const response = await axios.get(`${API_BASE_URL}/downloads`);
+ const response = await axios.get(`${SERVER_PATH_URL}/get`);
const downloadedMovie = response.data.find(
download => download.type === 'movie' && download.slug === titleData.slug
);
@@ -70,7 +70,7 @@ const TitleDetail = () => {
}
});
} else if (titleData.type === 'tv') {
- const response = await axios.get(`${API_BASE_URL}/downloads`);
+ const response = await axios.get(`${SERVER_PATH_URL}/get`);
const downloadedEpisodes = response.data.filter(
download => download.type === 'tv' && download.slug === titleData.slug
);
@@ -92,7 +92,7 @@ const TitleDetail = () => {
// Check watchlist status
const checkWatchlistStatus = async (slug) => {
try {
- const response = await axios.get(`${API_BASE_URL}/api/getWatchlist`);
+ const response = await axios.get(`${SERVER_WATCHLIST_URL}/get`);
const inWatchlist = response.data.some(item => item.name === slug);
setIsInWatchlist(inWatchlist);
} catch (error) {
@@ -104,7 +104,7 @@ const TitleDetail = () => {
if (titleDetails.type === 'tv') {
try {
setLoading(true);
- const seasonResponse = await axios.get(`${API_BASE_URL}/api/getInfoSeason`, {
+ const seasonResponse = await axios.get(`${API_URL}/getInfoSeason`, {
params: {
url: location.state?.url,
n: seasonNumber
@@ -123,7 +123,7 @@ const TitleDetail = () => {
const handleDownloadFilm = async () => {
try {
- const response = await axios.get(`${API_BASE_URL}/downloadFilm`, {
+ const response = await axios.get(`${API_URL}/download/film`, {
params: {
id: titleDetails.id,
slug: titleDetails.slug
@@ -144,12 +144,14 @@ const TitleDetail = () => {
}
};
- const handleDownloadEpisode = async (seasonNumber, episodeNumber) => {
+ const handleDownloadEpisode = async (seasonNumber, episodeNumber, titleID, titleSlug) => {
try {
- const response = await axios.get(`${API_BASE_URL}/downloadEpisode`, {
+ const response = await axios.get(`${API_URL}/download/episode`, {
params: {
n_s: seasonNumber,
- n_ep: episodeNumber
+ n_ep: episodeNumber,
+ titleID: titleID,
+ slug: titleSlug
}
});
const videoPath = response.data.path;
@@ -176,7 +178,7 @@ const TitleDetail = () => {
try {
let path;
if (titleDetails.type === 'movie') {
- const response = await axios.get(`${API_BASE_URL}/moviePath`, {
+ const response = await axios.get(`${SERVER_PATH_URL}/movie`, {
params: { id: titleDetails.id }
});
path = response.data.path;
@@ -198,21 +200,21 @@ const TitleDetail = () => {
const handleAddToWatchlist = async () => {
try {
- await axios.post(`${API_BASE_URL}/api/addWatchlist`, {
+ await axios.post(`${SERVER_WATCHLIST_URL}/add`, {
name: titleDetails.slug,
url: location.state?.url || location.pathname.split('/title/')[1],
- season: titleDetails.season_count
+ season: titleDetails.season_count // Changed 'season_count' to 'season'
});
setIsInWatchlist(true);
} catch (error) {
console.error("Error adding to watchlist:", error);
alert("Error adding to watchlist. Please try again.");
}
- };
-
+ };
+
const handleRemoveFromWatchlist = async () => {
try {
- await axios.post(`${API_BASE_URL}/api/removeWatchlist`, {
+ await axios.post(`${SERVER_WATCHLIST_URL}/remove`, {
name: titleDetails.slug
});
setIsInWatchlist(false);
@@ -375,7 +377,7 @@ const TitleDetail = () => {
) : (
@@ -393,7 +395,7 @@ const TitleDetail = () => {
setShowPlayer(false)} size="lg" centered>