Remove __pycache__

This commit is contained in:
Lovi 2024-12-10 12:07:01 +01:00
parent 98b2f03692
commit cff39796fc
101 changed files with 49 additions and 825 deletions

47
.gitignore vendored
View File

@ -1,8 +1,45 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
*.manifest
*.spec
# Other
Video
note.txt
list_proxy.txt
cmd.txt
downloaded_files
# Cache
__pycache__/
# Ignore node_modules directory in the client dashboard to avoid committing dependencies
/client/dashboard/node_modules
# Ignore build directory in the client dashboard to avoid committing build artifacts
/client/dashboard/build
# Ignore all __pycache__ directories
**/__pycache__/
# Ignore Video directory to avoid committing video files
/Video/
/client/dashboard/build

View File

@ -1,89 +0,0 @@
# 14.06.24
import logging
# External libraries
import httpx
from bs4 import BeautifulSoup
# Internal utilities
from StreamingCommunity.Util._jsonConfig import config_manager
from StreamingCommunity.Util.headers import get_headers
# Variable
from StreamingCommunity.Api.Site.ddlstreamitaly.costant import COOKIE
max_timeout = config_manager.get_int("REQUESTS", "timeout")
class VideoSource:
def __init__(self) -> None:
"""
Initializes the VideoSource object with default values.
"""
self.headers = {'user-agent': get_headers()}
self.cookie = COOKIE
def setup(self, url: str) -> None:
"""
Sets up the video source with the provided URL.
Parameters:
- url (str): The URL of the video source.
"""
self.url = url
def make_request(self, url: str) -> str:
"""
Make an HTTP GET request to the provided URL.
Parameters:
- url (str): The URL to make the request to.
Returns:
- str: The response content if successful, None otherwise.
"""
try:
response = httpx.get(
url=url,
headers=self.headers,
cookies=self.cookie,
timeout=max_timeout
)
response.raise_for_status()
return response.text
except Exception as err:
logging.error(f"An error occurred: {err}")
return None
def get_playlist(self):
"""
Retrieves the playlist URL from the video source.
Returns:
- tuple: The mp4 link if found, None otherwise.
"""
try:
text = self.make_request(self.url)
if text:
soup = BeautifulSoup(text, "html.parser")
source = soup.find("source")
if source:
mp4_link = source.get("src")
return mp4_link
else:
logging.error("No <source> tag found in the HTML.")
else:
logging.error("Failed to retrieve content from the URL.")
except Exception as e:
logging.error(f"An error occurred while parsing the playlist: {e}")

View File

@ -1,151 +0,0 @@
# 05.07.24
import re
import logging
# External libraries
import httpx
import jsbeautifier
from bs4 import BeautifulSoup
# Internal utilities
from StreamingCommunity.Util._jsonConfig import config_manager
from StreamingCommunity.Util.headers import get_headers
# Variable
max_timeout = config_manager.get_int("REQUESTS", "timeout")
class VideoSource:
def __init__(self, url: str):
"""
Sets up the video source with the provided URL.
Parameters:
- url (str): The URL of the video.
"""
self.url = url
self.redirect_url = None
self.maxstream_url = None
self.m3u8_url = None
self.headers = {'user-agent': get_headers()}
def get_redirect_url(self):
"""
Sends a request to the initial URL and extracts the redirect URL.
"""
try:
# Send a GET request to the initial URL
response = httpx.get(self.url, headers=self.headers, follow_redirects=True, timeout=max_timeout)
response.raise_for_status()
# Extract the redirect URL from the HTML
soup = BeautifulSoup(response.text, "html.parser")
self.redirect_url = soup.find("div", id="iframen1").get("data-src")
logging.info(f"Redirect URL: {self.redirect_url}")
return self.redirect_url
except httpx.RequestError as e:
logging.error(f"Error during the initial request: {e}")
raise
except AttributeError as e:
logging.error(f"Error parsing HTML: {e}")
raise
def get_maxstream_url(self):
"""
Sends a request to the redirect URL and extracts the Maxstream URL.
"""
try:
# Send a GET request to the redirect URL
response = httpx.get(self.redirect_url, headers=self.headers, follow_redirects=True, timeout=max_timeout)
response.raise_for_status()
# Extract the Maxstream URL from the HTML
soup = BeautifulSoup(response.text, "html.parser")
maxstream_url = soup.find("a")
if maxstream_url is None:
# If no anchor tag is found, try the alternative method
logging.warning("Anchor tag not found. Trying the alternative method.")
headers = {
'origin': 'https://stayonline.pro',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 OPR/111.0.0.0',
'x-requested-with': 'XMLHttpRequest',
}
# Make request to stayonline api
data = {'id': self.redirect_url.split("/")[-2], 'ref': ''}
response = httpx.post('https://stayonline.pro/ajax/linkEmbedView.php', headers=headers, data=data)
response.raise_for_status()
uprot_url = response.json()['data']['value']
# Retry getting maxtstream url
response = httpx.get(uprot_url, headers=self.headers, follow_redirects=True, timeout=max_timeout)
response.raise_for_status()
soup = BeautifulSoup(response.text, "html.parser")
maxstream_url = soup.find("a").get("href")
else:
maxstream_url = maxstream_url.get("href")
self.maxstream_url = maxstream_url
logging.info(f"Maxstream URL: {self.maxstream_url}")
return self.maxstream_url
except httpx.RequestError as e:
logging.error(f"Error during the request to the redirect URL: {e}")
raise
except AttributeError as e:
logging.error(f"Error parsing HTML: {e}")
raise
def get_m3u8_url(self):
"""
Sends a request to the Maxstream URL and extracts the .m3u8 file URL.
"""
try:
# Send a GET request to the Maxstream URL
response = httpx.get(self.maxstream_url, headers=self.headers, follow_redirects=True, timeout=max_timeout)
response.raise_for_status()
soup = BeautifulSoup(response.text, "html.parser")
# Iterate over all script tags in the HTML
for script in soup.find_all("script"):
if "eval(function(p,a,c,k,e,d)" in script.text:
# Execute the script using
data_js = jsbeautifier.beautify(script.text)
# Extract the .m3u8 URL from the script's output
match = re.search(r'sources:\s*\[\{\s*src:\s*"([^"]+)"', data_js)
if match:
self.m3u8_url = match.group(1)
logging.info(f"M3U8 URL: {self.m3u8_url}")
break
return self.m3u8_url
except Exception as e:
logging.error(f"Error executing the Node.js script: {e}")
raise
def get_playlist(self):
"""
Executes the entire flow to obtain the final .m3u8 file URL.
"""
self.get_redirect_url()
self.get_maxstream_url()
return self.get_m3u8_url()

View File

@ -1,194 +0,0 @@
# 26.05.24
import re
import logging
# External libraries
import httpx
import jsbeautifier
from bs4 import BeautifulSoup
# Internal utilities
from StreamingCommunity.Util._jsonConfig import config_manager
from StreamingCommunity.Util.headers import get_headers
# Variable
max_timeout = config_manager.get_int("REQUESTS", "timeout")
class VideoSource:
def __init__(self, url: str) -> None:
"""
Initializes the VideoSource object with default values.
Attributes:
- url (str): The URL of the video source.
"""
self.headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'accept-language': 'it-IT,it;q=0.9,en-US;q=0.8,en;q=0.7',
'User-Agent': get_headers()
}
self.client = httpx.Client()
self.url = url
def make_request(self, url: str) -> str:
"""
Make an HTTP GET request to the provided URL.
Parameters:
- url (str): The URL to make the request to.
Returns:
- str: The response content if successful, None otherwise.
"""
try:
response = self.client.get(
url=url,
headers=self.headers,
follow_redirects=True,
timeout=max_timeout
)
response.raise_for_status()
return response.text
except Exception as e:
logging.error(f"Request failed: {e}")
return None
def parse_html(self, html_content: str) -> BeautifulSoup:
"""
Parse the provided HTML content using BeautifulSoup.
Parameters:
- html_content (str): The HTML content to parse.
Returns:
- BeautifulSoup: Parsed HTML content if successful, None otherwise.
"""
try:
soup = BeautifulSoup(html_content, "html.parser")
return soup
except Exception as e:
logging.error(f"Failed to parse HTML content: {e}")
return None
def get_iframe(self, soup):
"""
Extracts the source URL of the second iframe in the provided BeautifulSoup object.
Parameters:
- soup (BeautifulSoup): A BeautifulSoup object representing the parsed HTML.
Returns:
- str: The source URL of the second iframe, or None if not found.
"""
iframes = soup.find_all("iframe")
if iframes and len(iframes) > 1:
return iframes[1].get("src")
return None
def find_content(self, url):
"""
Makes a request to the specified URL and parses the HTML content.
Parameters:
- url (str): The URL to fetch content from.
Returns:
- BeautifulSoup: A BeautifulSoup object representing the parsed HTML content, or None if the request fails.
"""
content = self.make_request(url)
if content:
return self.parse_html(content)
return None
def get_result_node_js(self, soup):
"""
Prepares and runs a Node.js script from the provided BeautifulSoup object to retrieve the video URL.
Parameters:
- soup (BeautifulSoup): A BeautifulSoup object representing the parsed HTML content.
Returns:
- str: The output from the Node.js script, or None if the script cannot be found or executed.
"""
for script in soup.find_all("script"):
if "eval" in str(script):
return jsbeautifier.beautify(script.text)
return None
def get_playlist(self) -> str:
"""
Download a video from the provided URL.
Returns:
str: The URL of the downloaded video if successful, None otherwise.
"""
try:
html_content = self.make_request(self.url)
if not html_content:
logging.error("Failed to fetch HTML content.")
return None
soup = self.parse_html(html_content)
if not soup:
logging.error("Failed to parse HTML content.")
return None
# Find master playlist
data_js = self.get_result_node_js(soup)
if data_js is not None:
match = re.search(r'sources:\s*\[\{\s*file:\s*"([^"]+)"', data_js)
if match:
return match.group(1)
else:
iframe_src = self.get_iframe(soup)
if not iframe_src:
logging.error("No iframe found.")
return None
down_page_soup = self.find_content(iframe_src)
if not down_page_soup:
logging.error("Failed to fetch down page content.")
return None
pattern = r'data-link="(//supervideo[^"]+)"'
match = re.search(pattern, str(down_page_soup))
if not match:
logging.error("No player available for download.")
return None
supervideo_url = "https:" + match.group(1)
supervideo_soup = self.find_content(supervideo_url)
if not supervideo_soup:
logging.error("Failed to fetch supervideo content.")
return None
# Find master playlist
data_js = self.get_result_node_js(supervideo_soup)
match = re.search(r'sources:\s*\[\{\s*file:\s*"([^"]+)"', data_js)
if match:
return match.group(1)
return None
except Exception as e:
logging.error(f"An error occurred: {e}")
return None

View File

@ -221,10 +221,11 @@ class M3U8_Segments:
self.download_interrupted = True
self.stop_event.set()
if threading.current_thread() is threading.main_thread():
"""if threading.current_thread() is threading.main_thread():
signal.signal(signal.SIGINT, interrupt_handler)
else:
console.log("[red]Signal handler must be set in the main thread !!")
console.log("[red]Signal handler must be set in the main thread !!")"""
signal.signal(signal.SIGINT, interrupt_handler)
def make_requests_stream(self, ts_url: str, index: int, progress_bar: tqdm, backoff_factor: float = 1.5) -> None:
"""

View File

@ -1,156 +0,0 @@
# 09.06.24
import os
import sys
import ssl
import certifi
import logging
# External libraries
import httpx
from tqdm import tqdm
# Internal utilities
from StreamingCommunity.Util.headers import get_headers
from StreamingCommunity.Util.color import Colors
from StreamingCommunity.Util.console import console, Panel
from StreamingCommunity.Util._jsonConfig import config_manager
from StreamingCommunity.Util.os import internet_manager
# Logic class
from ...FFmpeg import print_duration_table
# Suppress SSL warnings
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# Config
GET_ONLY_LINK = config_manager.get_bool('M3U8_PARSER', 'get_only_link')
TQDM_USE_LARGE_BAR = config_manager.get_int('M3U8_DOWNLOAD', 'tqdm_use_large_bar')
REQUEST_VERIFY = config_manager.get_float('REQUESTS', 'verify_ssl')
REQUEST_TIMEOUT = config_manager.get_float('REQUESTS', 'timeout')
def MP4_downloader(url: str, path: str, referer: str = None, headers_: dict = None):
"""
Downloads an MP4 video from a given URL with robust error handling and SSL bypass.
Parameters:
- url (str): The URL of the MP4 video to download.
- path (str): The local path where the downloaded MP4 file will be saved.
- referer (str, optional): The referer header value.
- headers_ (dict, optional): Custom headers for the request.
"""
# Early return for link-only mode
if GET_ONLY_LINK:
return {'path': path, 'url': url}
# Validate URL
if not (url.lower().startswith('http://') or url.lower().startswith('https://')):
logging.error(f"Invalid URL: {url}")
console.print(f"[bold red]Invalid URL: {url}[/bold red]")
return None
# Prepare headers
try:
headers = {}
if referer:
headers['Referer'] = referer
# Use custom headers if provided, otherwise use default user agent
if headers_:
headers.update(headers_)
else:
headers['User-Agent'] = get_headers()
except Exception as header_err:
logging.error(f"Error preparing headers: {header_err}")
console.print(f"[bold red]Error preparing headers: {header_err}[/bold red]")
return None
try:
# Create a custom transport that bypasses SSL verification
transport = httpx.HTTPTransport(
verify=False, # Disable SSL certificate verification
http2=True # Optional: enable HTTP/2 support
)
# Download with streaming and progress tracking
with httpx.Client(transport=transport, timeout=httpx.Timeout(60.0)) as client:
with client.stream("GET", url, headers=headers, timeout=REQUEST_TIMEOUT) as response:
response.raise_for_status()
# Get total file size
total = int(response.headers.get('content-length', 0))
# Handle empty streams
if total == 0:
console.print("[bold red]No video stream found.[/bold red]")
return None
# Create progress bar
progress_bar = tqdm(
total=total,
ascii='░▒█',
bar_format=f"{Colors.YELLOW}[MP4] {Colors.WHITE}({Colors.CYAN}video{Colors.WHITE}): "
f"{Colors.RED}{{percentage:.2f}}% {Colors.MAGENTA}{{bar}} {Colors.WHITE}[ "
f"{Colors.YELLOW}{{n_fmt}}{Colors.WHITE} / {Colors.RED}{{total_fmt}} {Colors.WHITE}] "
f"{Colors.YELLOW}{{elapsed}} {Colors.WHITE}< {Colors.CYAN}{{remaining}} {Colors.WHITE}| "
f"{Colors.YELLOW}{{rate_fmt}}{{postfix}} {Colors.WHITE}]",
unit='iB',
unit_scale=True,
desc='Downloading',
mininterval=0.05
)
# Ensure directory exists
os.makedirs(os.path.dirname(path), exist_ok=True)
# Download file
with open(path, 'wb') as file, progress_bar as bar:
downloaded = 0
for chunk in response.iter_bytes(chunk_size=1024):
if chunk:
size = file.write(chunk)
downloaded += size
bar.update(size)
# Optional: Add a check to stop download if needed
# if downloaded > MAX_DOWNLOAD_SIZE:
# break
# Post-download processing
if os.path.exists(path) and os.path.getsize(path) > 0:
console.print(Panel(
f"[bold green]Download completed![/bold green]\n"
f"[cyan]File size: [bold red]{internet_manager.format_file_size(os.path.getsize(path))}[/bold red]\n"
f"[cyan]Duration: [bold]{print_duration_table(path, description=False, return_string=True)}[/bold]",
title=f"{os.path.basename(path.replace('.mp4', ''))}",
border_style="green"
))
return path
else:
console.print("[bold red]Download failed or file is empty.[/bold red]")
return None
except httpx.HTTPStatusError as http_err:
logging.error(f"HTTP error occurred: {http_err}")
console.print(f"[bold red]HTTP Error: {http_err}[/bold red]")
return None
except httpx.RequestError as req_err:
logging.error(f"Request error: {req_err}")
console.print(f"[bold red]Request Error: {req_err}[/bold red]")
return None
except Exception as e:
logging.error(f"Unexpected error during download: {e}")
console.print(f"[bold red]Unexpected Error: {e}[/bold red]")
return None

View File

@ -1,222 +0,0 @@
# 23.06.24
import os
import sys
import time
import shutil
import logging
# Internal utilities
from StreamingCommunity.Util.color import Colors
from StreamingCommunity.Util.os import internet_manager
from StreamingCommunity.Util._jsonConfig import config_manager
# External libraries
from tqdm import tqdm
from qbittorrent import Client
# Tor config
HOST = str(config_manager.get_dict('DEFAULT', 'config_qbit_tor')['host'])
PORT = str(config_manager.get_dict('DEFAULT', 'config_qbit_tor')['port'])
USERNAME = str(config_manager.get_dict('DEFAULT', 'config_qbit_tor')['user'])
PASSWORD = str(config_manager.get_dict('DEFAULT', 'config_qbit_tor')['pass'])
# Config
TQDM_USE_LARGE_BAR = config_manager.get_int('M3U8_DOWNLOAD', 'tqdm_use_large_bar')
REQUEST_VERIFY = config_manager.get_float('REQUESTS', 'verify_ssl')
REQUEST_TIMEOUT = config_manager.get_float('REQUESTS', 'timeout')
class TOR_downloader:
def __init__(self):
"""
Initializes the TorrentManager instance.
Parameters:
- host (str): IP address or hostname of the qBittorrent Web UI.
- port (int): Port number of the qBittorrent Web UI.
- username (str): Username for logging into qBittorrent.
- password (str): Password for logging into qBittorrent.
"""
try:
self.qb = Client(f'http://{HOST}:{PORT}/')
except:
logging.error("Start qbitorrent first.")
self.username = USERNAME
self.password = PASSWORD
self.logged_in = False
self.save_path = None
self.torrent_name = None
self.login()
def login(self):
"""
Logs into the qBittorrent Web UI.
"""
try:
self.qb.login(self.username, self.password)
self.logged_in = True
logging.info("Successfully logged in to qBittorrent.")
except Exception as e:
logging.error(f"Failed to log in: {str(e)}")
self.logged_in = False
def add_magnet_link(self, magnet_link):
"""
Adds a torrent via magnet link to qBittorrent.
Parameters:
- magnet_link (str): Magnet link of the torrent to be added.
"""
try:
self.qb.download_from_link(magnet_link)
logging.info("Added magnet link to qBittorrent.")
# Get the hash of the latest added torrent
torrents = self.qb.torrents()
if torrents:
self.latest_torrent_hash = torrents[-1]['hash']
logging.info(f"Latest torrent hash: {self.latest_torrent_hash}")
except Exception as e:
logging.error(f"Failed to add magnet link: {str(e)}")
def start_download(self):
"""
Starts downloading the latest added torrent and monitors progress.
"""
try:
torrents = self.qb.torrents()
if not torrents:
logging.error("No torrents found.")
return
# Sleep to load magnet to qbit app
time.sleep(10)
latest_torrent = torrents[-1]
torrent_hash = latest_torrent['hash']
# Custom bar for mobile and pc
if TQDM_USE_LARGE_BAR:
bar_format = (
f"{Colors.YELLOW}[TOR] {Colors.WHITE}({Colors.CYAN}video{Colors.WHITE}): "
f"{Colors.RED}{{percentage:.2f}}% {Colors.MAGENTA}{{bar}} {Colors.WHITE}[ "
f"{Colors.YELLOW}{{elapsed}} {Colors.WHITE}< {Colors.CYAN}{{remaining}}{{postfix}} {Colors.WHITE}]"
)
else:
bar_format = (
f"{Colors.YELLOW}Proc{Colors.WHITE}: "
f"{Colors.RED}{{percentage:.2f}}% {Colors.WHITE}| "
f"{Colors.CYAN}{{remaining}}{{postfix}} {Colors.WHITE}]"
)
progress_bar = tqdm(
total=100,
ascii='░▒█',
bar_format=bar_format,
unit_scale=True,
unit_divisor=1024,
mininterval=0.05
)
with progress_bar as pbar:
while True:
# Get variable from qtorrent
torrent_info = self.qb.get_torrent(torrent_hash)
self.save_path = torrent_info['save_path']
self.torrent_name = torrent_info['name']
# Fetch important variable
pieces_have = torrent_info['pieces_have']
pieces_num = torrent_info['pieces_num']
progress = (pieces_have / pieces_num) * 100 if pieces_num else 0
pbar.n = progress
download_speed = torrent_info['dl_speed']
total_size = torrent_info['total_size']
downloaded_size = torrent_info['total_downloaded']
# Format variable
downloaded_size_str = internet_manager.format_file_size(downloaded_size)
downloaded_size = downloaded_size_str.split(' ')[0]
total_size_str = internet_manager.format_file_size(total_size)
total_size = total_size_str.split(' ')[0]
total_size_unit = total_size_str.split(' ')[1]
average_internet_str = internet_manager.format_transfer_speed(download_speed)
average_internet = average_internet_str.split(' ')[0]
average_internet_unit = average_internet_str.split(' ')[1]
# Update the progress bar's postfix
if TQDM_USE_LARGE_BAR:
pbar.set_postfix_str(
f"{Colors.WHITE}[ {Colors.GREEN}{downloaded_size} {Colors.WHITE}< {Colors.GREEN}{total_size} {Colors.RED}{total_size_unit} "
f"{Colors.WHITE}| {Colors.CYAN}{average_internet} {Colors.RED}{average_internet_unit}"
)
else:
pbar.set_postfix_str(
f"{Colors.WHITE}[ {Colors.GREEN}{downloaded_size}{Colors.RED} {total_size} "
f"{Colors.WHITE}| {Colors.CYAN}{average_internet} {Colors.RED}{average_internet_unit}"
)
pbar.refresh()
time.sleep(0.2)
# Break at the end
if int(progress) == 100:
break
except KeyboardInterrupt:
logging.info("Download process interrupted.")
except Exception as e:
logging.error(f"Download error: {str(e)}")
sys.exit(0)
def move_downloaded_files(self, destination=None):
"""
Moves downloaded files of the latest torrent to another location.
Parameters:
- save_path (str): Current save path (output directory) of the torrent.
- destination (str, optional): Destination directory to move files. If None, moves to current directory.
Returns:
- bool: True if files are moved successfully, False otherwise.
"""
video_extensions = {'.mp4', '.mkv', 'avi'}
time.sleep(2)
# List directories in the save path
dirs = [d for d in os.listdir(self.save_path) if os.path.isdir(os.path.join(self.save_path, d))]
for dir_name in dirs:
if self.torrent_name.split(" ")[0] in dir_name:
dir_path = os.path.join(self.save_path, dir_name)
# Ensure destination is set; if not, use current directory
destination = destination or os.getcwd()
# Move only video files
for file_name in os.listdir(dir_path):
file_path = os.path.join(dir_path, file_name)
# Check if it's a file and if it has a video extension
if os.path.isfile(file_path) and os.path.splitext(file_name)[1] in video_extensions:
shutil.move(file_path, os.path.join(destination, file_name))
logging.info(f"Moved file {file_name} to {destination}")
time.sleep(2)
self.qb.delete_permanently(self.qb.torrents()[-1]['hash'])
return True

View File

@ -1,5 +1,3 @@
# 23.06.24
from .HLS.downloader import HLS_Downloader
from .MP4.downloader import MP4_downloader
from .TOR.downloader import TOR_downloader
from .HLS.downloader import HLS_Downloader

View File

@ -62,11 +62,11 @@
},
"SITE": {
"streamingcommunity": {
"domain": "asia"
"domain": "family"
}
},
"EXTRA": {
"mongodb": "mongodb://....",
"mongodb": "mongodb+srv://admin:admin@cluster0.hwk1q.mongodb.net/?retryWrites=true&w=majority&appName=Cluster0",
"database": "StreamingCommunity"
}
}

Some files were not shown because too many files have changed in this diff Show More