This commit is contained in:
Lovi 2025-04-29 23:15:04 +02:00
parent d872921c23
commit 812b3e46ee
6 changed files with 332 additions and 162 deletions

View File

@ -1,140 +0,0 @@
# 05.07.24
import re
import logging
# External libraries
import httpx
import jsbeautifier
from bs4 import BeautifulSoup
# Internal utilities
from StreamingCommunity.Util.config_json import config_manager
from StreamingCommunity.Util.headers import get_userAgent
# Variable
MAX_TIMEOUT = config_manager.get_int("REQUESTS", "timeout")
class VideoSource:
def __init__(self, url: str):
"""
Sets up the video source with the provided URL.
Parameters:
- url (str): The URL of the video.
"""
self.url = url
self.redirect_url = None
self.maxstream_url = None
self.m3u8_url = None
self.headers = {'user-agent': get_userAgent()}
def get_redirect_url(self):
"""
Sends a request to the initial URL and extracts the redirect URL.
"""
try:
response = httpx.get(self.url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT)
response.raise_for_status()
# Extract the redirect URL from the HTML
soup = BeautifulSoup(response.text, "html.parser")
self.redirect_url = soup.find("div", id="iframen1").get("data-src")
logging.info(f"Redirect URL: {self.redirect_url}")
return self.redirect_url
except Exception as e:
logging.error(f"Error parsing HTML: {e}")
raise
def get_maxstream_url(self):
"""
Sends a request to the redirect URL and extracts the Maxstream URL.
"""
try:
response = httpx.get(self.redirect_url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT)
response.raise_for_status()
# Extract the Maxstream URL from the HTML
soup = BeautifulSoup(response.text, "html.parser")
maxstream_url = soup.find("a")
if maxstream_url is None:
# If no anchor tag is found, try the alternative method
logging.warning("Anchor tag not found. Trying the alternative method.")
headers = {
'origin': 'https://stayonline.pro',
'user-agent': get_userAgent(),
'x-requested-with': 'XMLHttpRequest',
}
# Make request to stayonline api
data = {'id': self.redirect_url.split("/")[-2], 'ref': ''}
response = httpx.post('https://stayonline.pro/ajax/linkEmbedView.php', headers=headers, data=data)
response.raise_for_status()
uprot_url = response.json()['data']['value']
# Retry getting maxtstream url
response = httpx.get(uprot_url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT)
response.raise_for_status()
soup = BeautifulSoup(response.text, "html.parser")
maxstream_url = soup.find("a").get("href")
else:
maxstream_url = maxstream_url.get("href")
self.maxstream_url = maxstream_url
logging.info(f"Maxstream URL: {self.maxstream_url}")
return self.maxstream_url
except Exception as e:
logging.error(f"Error during the request: {e}")
raise
def get_m3u8_url(self):
"""
Sends a request to the Maxstream URL and extracts the .m3u8 file URL.
"""
try:
response = httpx.get(self.maxstream_url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT)
response.raise_for_status()
soup = BeautifulSoup(response.text, "html.parser")
# Iterate over all script tags in the HTML
for script in soup.find_all("script"):
if "eval(function(p,a,c,k,e,d)" in script.text:
# Execute the script using
data_js = jsbeautifier.beautify(script.text)
# Extract the .m3u8 URL from the script's output
match = re.search(r'sources:\s*\[\{\s*src:\s*"([^"]+)"', data_js)
if match:
self.m3u8_url = match.group(1)
logging.info(f"M3U8 URL: {self.m3u8_url}")
break
else:
logging.error("Failed to find M3U8 URL: No match found")
return self.m3u8_url
except Exception as e:
logging.error(f"Error executing the Node.js script: {e}")
raise
def get_playlist(self):
"""
Executes the entire flow to obtain the final .m3u8 file URL.
"""
self.get_redirect_url()
self.get_maxstream_url()
return self.get_m3u8_url()

View File

@ -0,0 +1,117 @@
# 05.07.24
import re
import logging
# External libraries
import httpx
import jsbeautifier
from bs4 import BeautifulSoup
# Internal utilities
from StreamingCommunity.Util.config_json import config_manager
from StreamingCommunity.Util.headers import get_userAgent
# Variable
MAX_TIMEOUT = config_manager.get_int("REQUESTS", "timeout")
class VideoSource:
def __init__(self, url: str):
self.url = url
self.headers = {
'accept': 'application/json, text/javascript, */*; q=0.01',
'accept-language': 'it-IT,it;q=0.9,en-US;q=0.8,en;q=0.7',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'origin': 'https://stayonline.pro',
'user-agent': get_userAgent(),
'x-requested-with': 'XMLHttpRequest',
}
def get_redirect_url(self):
try:
response = httpx.get(self.url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT)
response.raise_for_status()
soup = BeautifulSoup(response.text, "html.parser")
for link in soup.find_all('a'):
if link.get('href') is not None and 'stayonline' in link.get('href'):
self.redirect_url = link.get('href')
logging.info(f"Redirect URL: {self.redirect_url}")
return self.redirect_url
raise Exception("Stayonline URL not found")
except Exception as e:
logging.error(f"Error getting redirect URL: {e}")
raise
def get_link_id(self):
try:
response = httpx.get(self.redirect_url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT)
response.raise_for_status()
soup = BeautifulSoup(response.text, "html.parser")
for script in soup.find_all('script'):
match = re.search(r'var\s+linkId\s*=\s*"([^"]+)"', script.text)
if match:
return match.group(1)
raise Exception("LinkId not found")
except Exception as e:
logging.error(f"Error getting link ID: {e}")
raise
def get_final_url(self, link_id):
try:
self.headers['referer'] = f'https://stayonline.pro/l/{link_id}/'
data = {
'id': link_id,
'ref': '',
}
response = httpx.post('https://stayonline.pro/ajax/linkView.php',
headers=self.headers,
data=data,
timeout=MAX_TIMEOUT)
response.raise_for_status()
return response.json()['data']['value']
except Exception as e:
logging.error(f"Error getting final URL: {e}")
raise
def get_playlist(self):
"""
Executes the entire flow to obtain the final video URL.
"""
self.get_redirect_url()
link_id = self.get_link_id()
final_url = self.get_final_url(link_id)
final_url = "https://mixdrop.club/f/1np7evr7ckerql4/"
print("Final URL: ", final_url)
response = httpx.get(final_url, timeout=MAX_TIMEOUT)
soup = BeautifulSoup(response.text, "html.parser")
script_text = None
for script in soup.find_all('script'):
if "eval" in str(script.text):
script_text = str(script.text)
break
print("Found script: ", script_text)
delivery_url = None
beautified = jsbeautifier.beautify(script_text)
for line in beautified.splitlines():
if 'MDCore.wurl' in line:
url = line.split('= ')[1].strip('"').strip(';')
delivery_url = f"https:{url}"
print("Found delivery URL: ", delivery_url)
return delivery_url

View File

@ -0,0 +1,70 @@
# 09.06.24
from urllib.parse import quote_plus
# External library
from rich.console import Console
from rich.prompt import Prompt
# Internal utilities
from StreamingCommunity.Api.Template import get_select_title
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
# Logic class
from .site import title_search, media_search_manager, table_show_manager
from .film import download_film
# Variable
indice = 4
_useFor = "film"
_priority = 0
_engineDownload = "mp4"
msg = Prompt()
console = Console()
def process_search_result(select_title):
"""
Handles the search result and initiates the download for either a film or series.
"""
download_film(select_title)
def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_item: dict = None):
"""
Main function of the application for search.
Parameters:
string_to_search (str, optional): String to search for
get_onylDatabase (bool, optional): If True, return only the database object
direct_item (dict, optional): Direct item to process (bypass search)
"""
if direct_item:
select_title = MediaItem(**direct_item)
process_search_result(select_title)
return
if string_to_search is None:
string_to_search = msg.ask(f"\n[purple]Insert word to search in [green]{site_constant.SITE_NAME}").strip()
# Search on database
len_database = title_search(quote_plus(string_to_search))
## If only the database is needed, return the manager
if get_onlyDatabase:
return media_search_manager
if len_database > 0:
select_title = get_select_title(table_show_manager, media_search_manager)
process_search_result(select_title)
else:
# If no results are found, ask again
console.print(f"\n[red]Nothing matching was found for[white]: [purple]{string_to_search}")
search()

View File

@ -0,0 +1,64 @@
# 03.07.24
import os
# External library
from rich.console import Console
# Internal utilities
from StreamingCommunity.Util.os import os_manager
from StreamingCommunity.Util.message import start_message
from StreamingCommunity.Lib.Downloader import MP4_downloader
# Logic class
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
# Player
from StreamingCommunity.Api.Player.mixdrop import VideoSource
# Variable
console = Console()
def download_film(select_title: MediaItem) -> str:
"""
Downloads a film using the provided obj.
Parameters:
- select_title (MediaItem): The media item to be downloaded. This should be an instance of the MediaItem class, containing attributes like `name` and `url`.
Return:
- str: output path
"""
start_message()
console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [cyan]{select_title.name}[/cyan] \n")
# Setup api manger
video_source = VideoSource(select_title.url)
src_mp4 = video_source.get_playlist()
print(src_mp4)
# Define output path
title_name = os_manager.get_sanitize_file(select_title.name) +".mp4"
mp4_path = os.path.join(site_constant.MOVIE_FOLDER, title_name.replace(".mp4", ""))
# Start downloading
path, kill_handler = MP4_downloader(
url=src_mp4,
path=mp4_path,
headers_= {
'Connection': 'keep-alive',
'Origin': 'https://mixdrop.sb',
'Range': 'bytes=0-',
'Referer': 'https://mixdrop.sb/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36 OPR/118.0.0.0',
}
)
return path, kill_handler

View File

@ -0,0 +1,81 @@
# 03.07.24
import sys
# External libraries
import httpx
from bs4 import BeautifulSoup
from rich.console import Console
# Internal utilities
from StreamingCommunity.Util.config_json import config_manager
from StreamingCommunity.Util.headers import get_userAgent
from StreamingCommunity.Util.table import TVShowManager
# Logic class
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaManager
# Variable
console = Console()
media_search_manager = MediaManager()
table_show_manager = TVShowManager()
max_timeout = config_manager.get_int("REQUESTS", "timeout")
def title_search(query: str) -> int:
"""
Search for titles based on a search query.
Parameters:
- query (str): The query to search for.
Returns:
- int: The number of titles found.
"""
media_search_manager.clear()
table_show_manager.clear()
search_url = f"{site_constant.FULL_URL}/?s={query}"
console.print(f"[cyan]Search url: [yellow]{search_url}")
try:
response = httpx.get(
search_url,
headers={'user-agent': get_userAgent()},
timeout=max_timeout,
follow_redirects=True,
verify=False
)
response.raise_for_status()
except Exception as e:
console.print(f"Site: {site_constant.SITE_NAME}, request search error: {e}")
return 0
# Create soup and find table
soup = BeautifulSoup(response.text, "html.parser")
for card in soup.find_all("div", class_=["card", "mp-post", "horizontal"]):
try:
title_tag = card.find("h3", class_="card-title").find("a")
url = title_tag.get("href")
title = title_tag.get_text(strip=True)
title_info = {
'name': title,
'url': url,
'type': 'film'
}
media_search_manager.add_media(title_info)
except Exception as e:
print(f"Error parsing a film entry: {e}")
# Return the number of titles found
return media_search_manager.get_length()

View File

@ -1,22 +0,0 @@
# 23.11.24
# Fix import
import sys
import os
src_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
sys.path.append(src_path)
# Import
from StreamingCommunity.Util.message import start_message
from StreamingCommunity.Util.logger import Logger
from StreamingCommunity.Api.Player.maxstream import VideoSource
# Test
start_message()
logger = Logger()
video_source = VideoSource("https://cb01new.biz/what-the-waters-left-behind-scars-hd-2023")
master_playlist = video_source.get_playlist()
print(master_playlist)