core: Temporary fix "check_duration_v_a()"

This commit is contained in:
None 2025-03-21 14:48:17 +01:00
parent 23b576e4b6
commit b71fd50c24
7 changed files with 430 additions and 0 deletions

View File

@ -0,0 +1,49 @@
# 21.03.25
import logging
# External libraries
import httpx
# Internal utilities
from StreamingCommunity.Util.config_json import config_manager
from StreamingCommunity.Util.headers import get_userAgent
# Variable
MAX_TIMEOUT = config_manager.get_int("REQUESTS", "timeout")
class AnimeWorldPlayer:
def __init__(self, full_url, episode_data, session_id, csrf_token):
"""Initialize the AnimeWorldPlayer with session details, episode data, and URL."""
self.session_id = session_id
self.csrf_token = csrf_token
self.episode_data = episode_data
self.number = episode_data['number']
self.link = episode_data['link']
# Create an HTTP client with session cookies, headers, and base URL.
self.client = httpx.Client(
cookies={"sessionId": session_id},
headers={"User-Agent": get_userAgent(), "csrf-token": csrf_token},
base_url=full_url,
timeout=MAX_TIMEOUT
)
def get_download_link(self):
"""Fetch the download link from AnimeWorld using the episode link."""
try:
# Make a POST request to the episode link and follow any redirects
res = self.client.post(self.link, follow_redirects=True)
data = res.json()
# Extract the first available server link and return it after modifying the URL
server_link = data["links"]["9"][list(data["links"]["9"].keys())[0]]["link"]
return server_link.replace('download-file.php?id=', '')
except Exception as e:
logging.error(f"Error in new API system: {e}")
return None

View File

@ -0,0 +1,71 @@
# 21.03.25
# External library
from rich.console import Console
from rich.prompt import Prompt
# Internal utilities
from StreamingCommunity.Api.Template import get_select_title
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
# Logic class
from .site import title_search, media_search_manager, table_show_manager
from .serie import download_series
# Variable
indice = 8
_useFor = "anime"
_deprecate = True
_priority = 2
_engineDownload = "mp4"
msg = Prompt()
console = Console()
def process_search_result(select_title):
"""
Handles the search result and initiates the download for either a film or series.
"""
if select_title.type == "TV":
download_series(select_title)
def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_item: dict = None):
"""
Main function of the application for search film, series and anime.
Parameters:
string_to_search (str, optional): String to search for
get_onlyDatabase (bool, optional): If True, return only the database object
direct_item (dict, optional): Direct item to process (bypass search)
"""
if direct_item:
select_title = MediaItem(**direct_item)
process_search_result(select_title)
return
# Get the user input for the search term
string_to_search = msg.ask(f"\n[purple]Insert a word to search in [green]{site_constant.SITE_NAME}").strip()
# Perform the database search
len_database = title_search(string_to_search)
##If only the database is needed, return the manager
if get_onlyDatabase:
return media_search_manager
if len_database > 0:
select_title = get_select_title(table_show_manager, media_search_manager)
process_search_result(select_title)
else:
console.print(f"\n[red]Nothing matching was found for[white]: [purple]{string_to_search}")
# If no results are found, ask again
string_to_search = msg.ask(f"\n[purple]Insert a word to search in [green]{site_constant.SITE_NAME}").strip()
search()

View File

@ -0,0 +1,108 @@
# 11.03.24
import os
import logging
from typing import Tuple
# External library
from rich.console import Console
from rich.prompt import Prompt
# Internal utilities
from StreamingCommunity.Util.os import os_manager
from StreamingCommunity.Util.message import start_message
from StreamingCommunity.Lib.Downloader import MP4_downloader
# Logic class
from .util.ScrapeSerie import ScrapSerie
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Util import manage_selection, dynamic_format_number
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
# Player
from StreamingCommunity.Api.Player.sweetpixel import AnimeWorldPlayer
# Variable
console = Console()
msg = Prompt()
KILL_HANDLER = bool(False)
def download_episode(index_select: int, scrape_serie: ScrapSerie, episodes) -> Tuple[str,bool]:
"""
Downloads the selected episode.
Parameters:
- index_select (int): Index of the episode to download.
Return:
- str: output path
- bool: kill handler status
"""
start_message()
# Get information about the selected episode
console.print(f"[yellow]Download: [red]EP_{index_select} \n")
console.print("[cyan]You can safely stop the download with [bold]Ctrl+c[bold] [cyan] \n")
# Create output path
title_name = f"{scrape_serie.get_name()}_EP_{dynamic_format_number(str(index_select))}.mp4"
mp4_path = os_manager.get_sanitize_path(os.path.join(site_constant.ANIME_FOLDER, scrape_serie.get_name()))
# Create output folder
os_manager.create_path(mp4_path)
# Collect mp4 link
video_source = AnimeWorldPlayer(site_constant.FULL_URL, episodes[index_select], scrape_serie.session_id, scrape_serie.csrf_token)
mp4_link = video_source.get_download_link()
# Start downloading
path, kill_handler = MP4_downloader(
url=str(mp4_link).strip(),
path=os.path.join(mp4_path, title_name)
)
return path, kill_handler
def download_series(select_title: MediaItem):
"""
Function to download episodes of a TV series.
Parameters:
- tv_id (int): The ID of the TV series.
- tv_name (str): The name of the TV series.
"""
start_message()
scrape_serie = ScrapSerie(select_title.url, site_constant.FULL_URL)
# Get the count of episodes for the TV series
episodes = scrape_serie.get_episodes()
episoded_count = len(episodes)
console.print(f"[cyan]Episodes find: [red]{episoded_count}")
# Prompt user to select an episode index
last_command = msg.ask("\n[cyan]Insert media [red]index [yellow]or [red]* [cyan]to download all media [yellow]or [red]1-2 [cyan]or [red]3-* [cyan]for a range of media")
# Manage user selection
list_episode_select = manage_selection(last_command, episoded_count)
# Download selected episodes
if len(list_episode_select) == 1 and last_command != "*":
path, _ = download_episode(list_episode_select[0]-1, scrape_serie, episodes)
return path
# Download all other episodes selecter
else:
kill_handler = False
for i_episode in list_episode_select:
if kill_handler:
break
_, kill_handler = download_episode(i_episode-1, scrape_serie, episodes)

View File

@ -0,0 +1,102 @@
# 21.03.25
import logging
# External libraries
import httpx
from bs4 import BeautifulSoup
from rich.console import Console
# Internal utilities
from StreamingCommunity.Util.config_json import config_manager
from StreamingCommunity.Util.headers import get_userAgent, get_headers
from StreamingCommunity.Util.table import TVShowManager
# Logic class
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaManager
# Variable
console = Console()
media_search_manager = MediaManager()
table_show_manager = TVShowManager()
max_timeout = config_manager.get_int("REQUESTS", "timeout")
def get_session_and_csrf() -> dict:
"""
Get the session ID and CSRF token from the website's cookies and HTML meta data.
"""
# Send an initial GET request to the website
response = httpx.get(site_constant.FULL_URL, headers=get_headers())
# Extract the sessionId from the cookies
session_id = response.cookies.get('sessionId')
logging.info(f"Session ID: {session_id}")
# Use BeautifulSoup to parse the HTML and extract the CSRF-Token
soup = BeautifulSoup(response.text, 'html.parser')
# Try to find the CSRF token in a meta tag or hidden input
csrf_token = None
meta_tag = soup.find('meta', {'name': 'csrf-token'})
if meta_tag:
csrf_token = meta_tag.get('content')
# If it's not in the meta tag, check for hidden input fields
if not csrf_token:
input_tag = soup.find('input', {'name': '_csrf'})
if input_tag:
csrf_token = input_tag.get('value')
logging.info(f"CSRF Token: {csrf_token}")
return session_id, csrf_token
def title_search(title: str) -> int:
"""
Function to perform an anime search using a provided title.
Parameters:
- title_search (str): The title to search for.
Returns:
- int: A number containing the length of media search manager.
"""
session_id, csrf_token = get_session_and_csrf()
url = f"{site_constant.FULL_URL}/api/search/v2"
# Set up the headers, params for the request
headers = {
'User-Agent': get_userAgent(),
'Accept': 'application/json, text/javascript, */*; q=0.01',
'CSRF-Token': csrf_token,
'X-Requested-With': 'XMLHttpRequest'
}
params = {
'keyword': title,
}
# Make the POST request
response = httpx.post(url, params=params, cookies={'sessionId': session_id}, headers=headers)
for dict_title in response.json()['animes']:
try:
media_search_manager.add_media({
'id': dict_title.get('id'),
'name': dict_title.get('name'),
'type': 'TV',
'status': dict_title.get('stateName'),
'episodes_count': dict_title.get('episodes'),
'plot': ' '.join((words := str(dict_title.get('story', '')).split())[:10]) + ('...' if len(words) > 10 else ''),
'url': f"{site_constant.FULL_URL}/play/{dict_title.get('link')}.{dict_title.get('identifier')}"
})
except Exception as e:
print(f"Error parsing a film entry: {e}")
# Return the length of media search manager
return media_search_manager.get_length()

View File

@ -0,0 +1,78 @@
# 21.03.25
# External libraries
import httpx
from bs4 import BeautifulSoup
# Internal utilities
from StreamingCommunity.Util.headers import get_userAgent
from StreamingCommunity.Util.config_json import config_manager
# Player
from ..site import get_session_and_csrf
from StreamingCommunity.Api.Player.sweetpixel import AnimeWorldPlayer
# Variable
max_timeout = config_manager.get_int("REQUESTS", "timeout")
class ScrapSerie:
def __init__(self, url, full_url):
"""Initialize the ScrapSerie object with the provided URL and setup the HTTP client."""
self.url = url
self.link = httpx.URL(url).path
self.session_id, self.csrf_token = get_session_and_csrf()
self.client = httpx.Client(
cookies={"sessionId": self.session_id},
headers={"User-Agent": get_userAgent(), "csrf-token": self.csrf_token},
base_url=full_url
)
def get_name(self):
"""Extract and return the name of the anime series."""
response = self.client.get(self.link, follow_redirects=True)
if response.status_code == 200:
soup = BeautifulSoup(response.content, "html.parser")
return soup.find("h1", {"id": "anime-title"}).get_text()
else:
raise Exception(f"Failed to retrieve anime name. Status code: {response.status_code}")
def get_episodes(self, nums=None):
"""Fetch and return the list of episodes, optionally filtering by specific episode numbers."""
response = self.client.get(self.link, follow_redirects=True)
if response.status_code != 200:
raise Exception(f"Failed to retrieve episodes. Status code: {response.status_code}")
soup = BeautifulSoup(response.content.decode('utf-8', 'ignore'), "html.parser")
raw_eps = {}
for data in soup.select('li.episode > a'):
epNum = data.get('data-episode-num')
epID = data.get('data-episode-id')
if nums and epNum not in nums:
continue
if epID not in raw_eps:
raw_eps[epID] = {
'number': epNum,
'link': f"/api/download/{epID}"
}
episodes = [episode_data for episode_data in raw_eps.values()]
return episodes
def get_episode(self, index):
"""Fetch a specific episode based on the index, and return an AnimeWorldPlayer instance."""
episodes = self.get_episodes()
if 0 <= index < len(episodes):
episode_data = episodes[index]
return AnimeWorldPlayer(episode_data, self.session_id, self.csrf_token)
else:
raise IndexError("Episode index out of range")

View File

@ -207,6 +207,18 @@ def check_duration_v_a(video_path, audio_path, tolerance=1.0):
video_duration = get_video_duration(video_path)
audio_duration = get_video_duration(audio_path)
# Check if either duration is None and specify which one is None
if video_duration is None and audio_duration is None:
console.print("[yellow]Warning: Both video and audio durations are None. Returning 0 as duration difference.[/yellow]")
return False, 0.0
elif video_duration is None:
console.print("[yellow]Warning: Video duration is None. Returning 0 as duration difference.[/yellow]")
return False, 0.0
elif audio_duration is None:
console.print("[yellow]Warning: Audio duration is None. Returning 0 as duration difference.[/yellow]")
return False, 0.0
# Calculate the duration difference
duration_difference = abs(video_duration - audio_duration)
# Check if the duration difference is within the tolerance

View File

@ -254,8 +254,18 @@ def main(script_id = 0):
}
# Add dynamic arguments based on loaded search modules
used_short_options = set()
for alias, (_, use_for) in search_functions.items():
short_option = alias[:3].upper()
original_short_option = short_option
count = 1
while short_option in used_short_options:
short_option = f"{original_short_option}{count}"
count += 1
used_short_options.add(short_option)
long_option = alias
parser.add_argument(f'-{short_option}', f'--{long_option}', action='store_true', help=f'Search for {alias.split("_")[0]} on streaming platforms.')