Add timeout all site, add type category, add recall home for hls.

This commit is contained in:
Lovi 2024-10-27 17:20:55 +01:00
parent f09add2074
commit 44e81d7f5e
32 changed files with 317 additions and 103 deletions

View File

@ -153,7 +153,7 @@ You can change some behaviors by tweaking the configuration file.
<summary><strong>REQUESTS</strong></summary> <summary><strong>REQUESTS</strong></summary>
* **timeout**: The timeout value for requests. * **timeout**: The timeout value for requests.
- **Default Value**: `10` - **Default Value**: `15`
* **verify_ssl**: Whether to verify SSL certificates. * **verify_ssl**: Whether to verify SSL certificates.
- **Default Value**: `false` - **Default Value**: `false`
@ -165,22 +165,10 @@ You can change some behaviors by tweaking the configuration file.
<details> <details>
<summary><strong>M3U8_DOWNLOAD</strong></summary> <summary><strong>M3U8_DOWNLOAD</strong></summary>
* **tdqm_workers**: The number of workers that will cooperate to download .ts files. **A high value may slow down your PC**
- **Default Value**: `30`
* **tqdm_use_large_bar**: Whether to use large progress bars during downloads (Downloading %desc: %percentage:.2f %bar %elapsed < %remaining %postfix * **tqdm_use_large_bar**: Whether to use large progress bars during downloads (Downloading %desc: %percentage:.2f %bar %elapsed < %remaining %postfix
- **Default Value**: `true` - **Default Value**: `true`
- **Example Value**: `false` with Proc: %percentage:.2f %remaining %postfix - **Example Value**: `false` with Proc: %percentage:.2f %remaining %postfix
* **download_video**: Whether to download video streams.
- **Default Value**: `true`
* **download_audio**: Whether to download audio streams.
- **Default Value**: `true`
* **download_sub**: Whether to download subtitle streams.
- **Default Value**: `true`
* **specific_list_audio**: A list of specific audio languages to download. * **specific_list_audio**: A list of specific audio languages to download.
- **Example Value**: `['ita']` - **Example Value**: `['ita']`

View File

@ -11,6 +11,7 @@ from .title import download_title
# Variable # Variable
indice = 8 indice = 8
_use_for = "film_serie"
_deprecate = False _deprecate = False

View File

@ -7,6 +7,8 @@ from unidecode import unidecode
# Internal utilities # Internal utilities
from Src.Util.console import console
from Src.Util._jsonConfig import config_manager
from Src.Util.headers import get_headers from Src.Util.headers import get_headers
from Src.Util.table import TVShowManager from Src.Util.table import TVShowManager
from ..Template import search_domain, get_select_title from ..Template import search_domain, get_select_title
@ -33,12 +35,22 @@ def title_search(word_to_search: str) -> int:
""" """
# Find new domain if prev dont work # Find new domain if prev dont work
max_timeout = config_manager.get_int("REQUESTS", "timeout")
domain_to_use, _ = search_domain(SITE_NAME, f"https://{SITE_NAME}") domain_to_use, _ = search_domain(SITE_NAME, f"https://{SITE_NAME}")
# Construct the full site URL and load the search page # Construct the full site URL and load the search page
response = httpx.get(f"https://{SITE_NAME}.{domain_to_use}/search/{unidecode(word_to_search)}/1/", headers={'user-agent': get_headers()}, follow_redirects=True) try:
response = httpx.get(
url=f"https://{SITE_NAME}.{domain_to_use}/search/{unidecode(word_to_search)}/1/",
headers={'user-agent': get_headers()},
follow_redirects=True,
timeout=max_timeout
)
response.raise_for_status() response.raise_for_status()
except Exception as e:
console.print(f"Site: {SITE_NAME}, request search error: {e}")
# Create soup and find table # Create soup and find table
soup = BeautifulSoup(response.text, "html.parser") soup = BeautifulSoup(response.text, "html.parser")

View File

@ -70,13 +70,14 @@ def search_domain(site_name: str, base_url: str):
""" """
# Extract config domain # Extract config domain
max_timeout = config_manager.get_int("REQUESTS", "timeout")
domain = str(config_manager.get_dict("SITE", site_name)['domain']) domain = str(config_manager.get_dict("SITE", site_name)['domain'])
console.print(f"[cyan]Test site[white]: [red]{base_url}.{domain}") console.print(f"[cyan]Test site[white]: [red]{base_url}.{domain}")
try: try:
# Test the current domain # Test the current domain
response_follow = httpx.get(f"{base_url}.{domain}", headers={'user-agent': get_headers()}, timeout=4, follow_redirects=True) response_follow = httpx.get(f"{base_url}.{domain}", headers={'user-agent': get_headers()}, timeout=max_timeout, follow_redirects=True)
console.print(f"[cyan]Response site[white]: [red]{response_follow.status_code}") console.print(f"[cyan]Response site[white]: [red]{response_follow.status_code}")
response_follow.raise_for_status() response_follow.raise_for_status()

View File

@ -11,9 +11,14 @@ from bs4 import BeautifulSoup
# Internal utilities # Internal utilities
from Src.Util._jsonConfig import config_manager
from Src.Util.headers import get_headers from Src.Util.headers import get_headers
# Variable
max_timeout = config_manager.get_int("REQUESTS", "timeout")
class VideoSource: class VideoSource:
def __init__(self, url: str): def __init__(self, url: str):
""" """
@ -46,7 +51,7 @@ class VideoSource:
""" """
try: try:
response = httpx.get(url, headers=self.headers, follow_redirects=True) response = httpx.get(url, headers=self.headers, follow_redirects=True, timeout=max_timeout)
response.raise_for_status() response.raise_for_status()
return response.text return response.text

View File

@ -11,6 +11,7 @@ from .film import download_film
# Variable # Variable
indice = 2 indice = 2
_use_for = "film"
_deprecate = False _deprecate = False

View File

@ -1,14 +1,15 @@
# 26.05.24 # 26.05.24
import os import os
import sys import time
import logging
# Internal utilities # Internal utilities
from Src.Util.console import console, msg
from Src.Util.message import start_message from Src.Util.message import start_message
from Src.Util.console import console from Src.Util.call_stack import get_call_stack
from Src.Lib.Downloader import HLS_Downloader from Src.Lib.Downloader import HLS_Downloader
from ..Template import execute_search
# Logic class # Logic class
@ -44,7 +45,10 @@ def download_film(select_title: MediaItem):
master_playlist = video_source.get_playlist() master_playlist = video_source.get_playlist()
# Download the film using the m3u8 playlist, and output filename # Download the film using the m3u8 playlist, and output filename
HLS_Downloader( if HLS_Downloader(m3u8_playlist = master_playlist, output_filename = os.path.join(mp4_path, mp4_name)).start() == 404:
m3u8_playlist = master_playlist, time.sleep(2)
output_filename = os.path.join(mp4_path, mp4_name)
).start() # Re call search function
if msg.ask("[green]Do you want to continue [white]([red]y[white])[green] or return at home[white]([red]n[white]) ", choices=['y', 'n'], default='y', show_choices=True) == "n":
frames = get_call_stack()
execute_search(frames[-4])

View File

@ -7,6 +7,8 @@ from unidecode import unidecode
# Internal utilities # Internal utilities
from Src.Util.console import console
from Src.Util._jsonConfig import config_manager
from Src.Util.headers import get_headers from Src.Util.headers import get_headers
from Src.Util.table import TVShowManager from Src.Util.table import TVShowManager
from ..Template import search_domain, get_select_title from ..Template import search_domain, get_select_title
@ -31,12 +33,21 @@ def title_search(title_search: str) -> int:
""" """
# Find new domain if prev dont work # Find new domain if prev dont work
max_timeout = config_manager.get_int("REQUESTS", "timeout")
domain_to_use, _ = search_domain(SITE_NAME, f"https://{SITE_NAME}") domain_to_use, _ = search_domain(SITE_NAME, f"https://{SITE_NAME}")
# Send request to search for title # Send request to search for title
response = httpx.get(f"https://{SITE_NAME}.{domain_to_use}/?story={unidecode(title_search.replace(' ', '+'))}&do=search&subaction=search&titleonly=3", headers={'user-agent': get_headers()}) try:
response = httpx.get(
url=f"https://{SITE_NAME}.{domain_to_use}/?story={unidecode(title_search.replace(' ', '+'))}&do=search&subaction=search&titleonly=3",
headers={'user-agent': get_headers()},
timeout=max_timeout
)
response.raise_for_status() response.raise_for_status()
except Exception as e:
console.print(f"Site: {SITE_NAME}, request search error: {e}")
# Create soup and find table # Create soup and find table
soup = BeautifulSoup(response.text, "html.parser") soup = BeautifulSoup(response.text, "html.parser")
table_content = soup.find('div', id="dle-content") table_content = soup.find('div', id="dle-content")

View File

@ -21,6 +21,7 @@ from ..Class.WindowType import WindowVideo, WindowParameter, DynamicJSONConverte
# Variable # Variable
from ...costant import SITE_NAME from ...costant import SITE_NAME
max_timeout = config_manager.get_int("REQUESTS", "timeout")
class VideoSource: class VideoSource:
@ -57,7 +58,11 @@ class VideoSource:
""" """
try: try:
response = httpx.get(f"https://www.{self.base_name}.{self.domain}/info_api/{self.media_id}/", headers=self.headers) response = httpx.get(
url=f"https://www.{self.base_name}.{self.domain}/info_api/{self.media_id}/",
headers=self.headers,
timeout=max_timeout
)
response.raise_for_status() response.raise_for_status()
# Parse JSON response and return episode count # Parse JSON response and return episode count
@ -84,7 +89,12 @@ class VideoSource:
"end_range": index_ep + 1 "end_range": index_ep + 1
} }
response = httpx.get(f"https://www.{self.base_name}.{self.domain}/info_api/{self.media_id}/{index_ep}", headers=self.headers, params=params, timeout=5) response = httpx.get(
url=f"https://www.{self.base_name}.{self.domain}/info_api/{self.media_id}/{index_ep}",
headers=self.headers,
params=params,
timeout=max_timeout
)
response.raise_for_status() response.raise_for_status()
# Return information about the episode # Return information about the episode
@ -107,7 +117,11 @@ class VideoSource:
""" """
try: try:
response = httpx.get(f"https://www.{self.base_name}.{self.domain}/embed-url/{episode_id}", headers=self.headers) response = httpx.get(
url=f"https://www.{self.base_name}.{self.domain}/embed-url/{episode_id}",
headers=self.headers,
timeout=max_timeout
)
response.raise_for_status() response.raise_for_status()
# Extract and clean embed URL # Extract and clean embed URL
@ -182,8 +196,8 @@ class VideoSource:
final_params["h"] = "1" final_params["h"] = "1"
# Construct the new query string and final URL # Construct the new query string and final URL
new_query = urlencode(final_params) # Encode final_params into a query string new_query = urlencode(final_params)
new_url = m._replace(query=new_query) # Replace the old query string with the new one new_url = m._replace(query=new_query)
final_url = urlunparse(new_url) # Construct the final URL from the modified parts final_url = urlunparse(new_url)
return final_url return final_url

View File

@ -11,6 +11,7 @@ from .anime import download_film, download_series
# Variable # Variable
indice = 1 indice = 1
_use_for = "anime"
_deprecate = False _deprecate = False

View File

@ -10,6 +10,8 @@ from unidecode import unidecode
# Internal utilities # Internal utilities
from Src.Util.console import console
from Src.Util._jsonConfig import config_manager
from Src.Util.table import TVShowManager from Src.Util.table import TVShowManager
from ..Template import search_domain, get_select_title from ..Template import search_domain, get_select_title
@ -99,7 +101,9 @@ def title_search(title: str) -> int:
""" """
# Get token and session value from configuration # Get token and session value from configuration
max_timeout = config_manager.get_int("REQUESTS", "timeout")
domain_to_use, _ = search_domain(SITE_NAME, f"https://www.{SITE_NAME}") domain_to_use, _ = search_domain(SITE_NAME, f"https://www.{SITE_NAME}")
data = get_token(SITE_NAME, domain_to_use) data = get_token(SITE_NAME, domain_to_use)
# Prepare cookies to be used in the request # Prepare cookies to be used in the request
@ -121,9 +125,19 @@ def title_search(title: str) -> int:
} }
# Send a POST request to the API endpoint for live search # Send a POST request to the API endpoint for live search
response = httpx.post(f'https://www.{SITE_NAME}.{domain_to_use}/livesearch', cookies=cookies, headers=headers, json=json_data) try:
response = httpx.post(
url=f'https://www.{SITE_NAME}.{domain_to_use}/livesearch',
cookies=cookies,
headers=headers,
json=json_data,
timeout=max_timeout
)
response.raise_for_status() response.raise_for_status()
except Exception as e:
console.print(f"Site: {SITE_NAME}, request search error: {e}")
# Process each record returned in the response # Process each record returned in the response
for dict_title in response.json()['records']: for dict_title in response.json()['records']:

View File

@ -11,6 +11,7 @@ from .title import download_title
# Variable # Variable
indice = 7 indice = 7
_use_for = "film_serie"
_deprecate = False _deprecate = False

View File

@ -7,6 +7,8 @@ from unidecode import unidecode
# Internal utilities # Internal utilities
from Src.Util.console import console
from Src.Util._jsonConfig import config_manager
from Src.Util.headers import get_headers from Src.Util.headers import get_headers
from Src.Util.table import TVShowManager from Src.Util.table import TVShowManager
from ..Template import search_domain, get_select_title from ..Template import search_domain, get_select_title
@ -34,12 +36,21 @@ def title_search(word_to_search: str) -> int:
""" """
# Find new domain if prev dont work # Find new domain if prev dont work
max_timeout = config_manager.get_int("REQUESTS", "timeout")
domain_to_use, _ = search_domain(SITE_NAME, f"https://{SITE_NAME}") domain_to_use, _ = search_domain(SITE_NAME, f"https://{SITE_NAME}")
# Construct the full site URL and load the search page # Construct the full site URL and load the search page
response = httpx.get(f"https://{SITE_NAME}.{domain_to_use}/search?q={unidecode(word_to_search)}&category=1&subcat=2&page=1", headers={'user-agent': get_headers()}) try:
response = httpx.get(
url=f"https://{SITE_NAME}.{domain_to_use}/search?q={unidecode(word_to_search)}&category=1&subcat=2&page=1",
headers={'user-agent': get_headers()},
timeout=max_timeout
)
response.raise_for_status() response.raise_for_status()
except Exception as e:
console.print(f"Site: {SITE_NAME}, request search error: {e}")
# Create soup and find table # Create soup and find table
soup = BeautifulSoup(response.text, "html.parser") soup = BeautifulSoup(response.text, "html.parser")

View File

@ -12,8 +12,12 @@ from bs4 import BeautifulSoup
# Internal utilities # Internal utilities
from Src.Util.headers import get_headers from Src.Util.headers import get_headers
from Src.Util._jsonConfig import config_manager
# Variable
max_timeout = config_manager.get_int("REQUESTS", "timeout")
class VideoSource: class VideoSource:
def __init__(self, url: str): def __init__(self, url: str):
""" """
@ -35,7 +39,12 @@ class VideoSource:
try: try:
# Send a GET request to the initial URL # Send a GET request to the initial URL
response = httpx.get(self.url, headers=self.headers, follow_redirects=True, timeout=10) response = httpx.get(
url=self.url,
headers=self.headers,
follow_redirects=True,
timeout=max_timeout
)
response.raise_for_status() response.raise_for_status()
# Extract the redirect URL from the HTML # Extract the redirect URL from the HTML
@ -60,7 +69,12 @@ class VideoSource:
try: try:
# Send a GET request to the redirect URL # Send a GET request to the redirect URL
response = httpx.get(self.redirect_url, headers=self.headers, follow_redirects=True, timeout=10) response = httpx.get(
url=self.redirect_url,
headers=self.headers,
follow_redirects=True,
timeout=max_timeout
)
response.raise_for_status() response.raise_for_status()
# Extract the Maxstream URL from the HTML # Extract the Maxstream URL from the HTML
@ -79,7 +93,7 @@ class VideoSource:
# Make request to stayonline api # Make request to stayonline api
data = {'id': self.redirect_url.split("/")[-2], 'ref': ''} data = {'id': self.redirect_url.split("/")[-2], 'ref': ''}
response = httpx.post('https://stayonline.pro/ajax/linkEmbedView.php', headers=headers, data=data) response = httpx.post('https://stayonline.pro/ajax/linkEmbedView.php', headers=headers, data=data, timeout=max_timeout)
response.raise_for_status() response.raise_for_status()
uprot_url = response.json()['data']['value'] uprot_url = response.json()['data']['value']
@ -112,7 +126,12 @@ class VideoSource:
try: try:
# Send a GET request to the Maxstream URL # Send a GET request to the Maxstream URL
response = httpx.get(self.maxstream_url, headers=self.headers, follow_redirects=True, timeout=10) response = httpx.get(
url=self.maxstream_url,
headers=self.headers,
follow_redirects=True,
timeout=max_timeout
)
response.raise_for_status() response.raise_for_status()
soup = BeautifulSoup(response.text, "html.parser") soup = BeautifulSoup(response.text, "html.parser")

View File

@ -11,6 +11,7 @@ from .film import download_film
# Variable # Variable
indice = 9 indice = 9
_use_for = "film"
_deprecate = False _deprecate = False

View File

@ -7,6 +7,8 @@ from unidecode import unidecode
# Internal utilities # Internal utilities
from Src.Util.console import console
from Src.Util._jsonConfig import config_manager
from Src.Util.headers import get_headers from Src.Util.headers import get_headers
from Src.Util.table import TVShowManager from Src.Util.table import TVShowManager
from ..Template import search_domain, get_select_title from ..Template import search_domain, get_select_title
@ -34,12 +36,22 @@ def title_search(word_to_search: str) -> int:
""" """
# Find new domain if prev dont work # Find new domain if prev dont work
max_timeout = config_manager.get_int("REQUESTS", "timeout")
domain_to_use, _ = search_domain(SITE_NAME, f"https://{SITE_NAME}") domain_to_use, _ = search_domain(SITE_NAME, f"https://{SITE_NAME}")
# Send request to search for titles # Send request to search for titles
response = httpx.get(f"https://{SITE_NAME}.{domain_to_use}/?s={unidecode(word_to_search)}", headers={'user-agent': get_headers()}, follow_redirects=True) try:
response = httpx.get(
url=f"https://{SITE_NAME}.{domain_to_use}/?s={unidecode(word_to_search)}",
headers={'user-agent': get_headers()},
follow_redirects=True,
timeout=max_timeout
)
response.raise_for_status() response.raise_for_status()
except Exception as e:
console.print(f"Site: {SITE_NAME}, request search error: {e}")
# Create soup and find table # Create soup and find table
soup = BeautifulSoup(response.text, "html.parser") soup = BeautifulSoup(response.text, "html.parser")

View File

@ -9,11 +9,13 @@ from bs4 import BeautifulSoup
# Internal utilities # Internal utilities
from Src.Util._jsonConfig import config_manager
from Src.Util.headers import get_headers from Src.Util.headers import get_headers
# Variable # Variable
from ..costant import COOKIE from ..costant import COOKIE
max_timeout = config_manager.get_int("REQUESTS", "timeout")
class VideoSource: class VideoSource:
@ -44,7 +46,12 @@ class VideoSource:
- str: The response content if successful, None otherwise. - str: The response content if successful, None otherwise.
""" """
try: try:
response = httpx.get(url, headers=self.headers, cookies=self.cookie) response = httpx.get(
url=url,
headers=self.headers,
cookies=self.cookie,
timeout=max_timeout
)
response.raise_for_status() response.raise_for_status()
return response.text return response.text

View File

@ -14,6 +14,7 @@ from .series import download_thread
# Variable # Variable
indice = 3 indice = 3
_use_for = "serie"
_deprecate = False _deprecate = False

View File

@ -11,6 +11,8 @@ from unidecode import unidecode
# Internal utilities # Internal utilities
from Src.Util.console import console
from Src.Util._jsonConfig import config_manager
from Src.Util.headers import get_headers from Src.Util.headers import get_headers
from Src.Util.table import TVShowManager from Src.Util.table import TVShowManager
from ..Template import search_domain, get_select_title from ..Template import search_domain, get_select_title
@ -38,12 +40,21 @@ def title_search(word_to_search: str) -> int:
""" """
# Find new domain if prev dont work # Find new domain if prev dont work
max_timeout = config_manager.get_int("REQUESTS", "timeout")
domain_to_use, _ = search_domain(SITE_NAME, f"https://{SITE_NAME}") domain_to_use, _ = search_domain(SITE_NAME, f"https://{SITE_NAME}")
# Send request to search for titles # Send request to search for titles
response = httpx.get(f"https://{SITE_NAME}.{domain_to_use}/search/?&q={unidecode(word_to_search)}&quick=1&type=videobox_video&nodes=11", headers={'user-agent': get_headers()}) try:
response = httpx.get(
url=f"https://{SITE_NAME}.{domain_to_use}/search/?&q={unidecode(word_to_search)}&quick=1&type=videobox_video&nodes=11",
headers={'user-agent': get_headers()},
timeout=max_timeout
)
response.raise_for_status() response.raise_for_status()
except Exception as e:
console.print(f"Site: {SITE_NAME}, request search error: {e}")
# Create soup and find table # Create soup and find table
soup = BeautifulSoup(response.text, "html.parser") soup = BeautifulSoup(response.text, "html.parser")
table_content = soup.find('ol', class_="ipsStream") table_content = soup.find('ol', class_="ipsStream")

View File

@ -11,9 +11,14 @@ from bs4 import BeautifulSoup
# Internal utilities # Internal utilities
from Src.Util._jsonConfig import config_manager
from Src.Util.headers import get_headers from Src.Util.headers import get_headers
# Variable
max_timeout = config_manager.get_int("REQUESTS", "timeout")
class VideoSource: class VideoSource:
def __init__(self) -> None: def __init__(self) -> None:
""" """
@ -45,7 +50,12 @@ class VideoSource:
""" """
try: try:
response = httpx.get(url, headers=self.headers, follow_redirects=True, timeout=10) response = httpx.get(
url=url,
headers=self.headers,
follow_redirects=True,
timeout=max_timeout
)
response.raise_for_status() response.raise_for_status()
return response.text return response.text

View File

@ -11,6 +11,7 @@ from .series import download_series
# Variable # Variable
indice = 4 indice = 4
_use_for = "serie"
_deprecate = False _deprecate = False

View File

@ -7,6 +7,8 @@ from unidecode import unidecode
# Internal utilities # Internal utilities
from Src.Util.console import console
from Src.Util._jsonConfig import config_manager
from Src.Util.headers import get_headers from Src.Util.headers import get_headers
from Src.Util.table import TVShowManager from Src.Util.table import TVShowManager
from ..Template import search_domain, get_select_title from ..Template import search_domain, get_select_title
@ -34,12 +36,21 @@ def title_search(word_to_search: str) -> int:
""" """
# Find new domain if prev dont work # Find new domain if prev dont work
max_timeout = config_manager.get_int("REQUESTS", "timeout")
domain_to_use, _ = search_domain(SITE_NAME, f"https://{SITE_NAME}") domain_to_use, _ = search_domain(SITE_NAME, f"https://{SITE_NAME}")
# Send request to search for titles # Send request to search for titles
response = httpx.get(f"https://guardaserie.{domain_to_use}/?story={unidecode(word_to_search)}&do=search&subaction=search", headers={'user-agent': get_headers()}, timeout=15) try:
response = httpx.get(
url=f"https://guardaserie.{domain_to_use}/?story={unidecode(word_to_search)}&do=search&subaction=search",
headers={'user-agent': get_headers()},
timeout=max_timeout
)
response.raise_for_status() response.raise_for_status()
except Exception as e:
console.print(f"Site: {SITE_NAME}, request search error: {e}")
# Create soup and find table # Create soup and find table
soup = BeautifulSoup(response.text, "html.parser") soup = BeautifulSoup(response.text, "html.parser")
table_content = soup.find('div', class_="mlnew-list") table_content = soup.find('div', class_="mlnew-list")

View File

@ -11,6 +11,7 @@ from .film import download_film
# Variable # Variable
indice = 9 indice = 9
_use_for = "film"
_deprecate = False _deprecate = False

View File

@ -2,6 +2,7 @@
import os import os
import sys import sys
import time
import logging import logging
@ -11,11 +12,12 @@ from bs4 import BeautifulSoup
# Internal utilities # Internal utilities
from Src.Util.console import console, msg
from Src.Util.message import start_message from Src.Util.message import start_message
from Src.Util.console import console from Src.Util.call_stack import get_call_stack
from Src.Util.os import can_create_file, remove_special_characters
from Src.Util.headers import get_headers from Src.Util.headers import get_headers
from Src.Lib.Downloader import HLS_Downloader from Src.Lib.Downloader import HLS_Downloader
from ..Template import execute_search
# Logic class # Logic class
@ -60,19 +62,17 @@ def download_film(movie_details: Json_film):
video_source.setup(supervideo_url) video_source.setup(supervideo_url)
# Define output path # Define output path
mp4_name = remove_special_characters(movie_details.title) + ".mp4" mp4_name = movie_details.title + ".mp4"
mp4_path = os.path.join(ROOT_PATH, SITE_NAME, MOVIE_FOLDER, remove_special_characters(movie_details.title)) mp4_path = os.path.join(ROOT_PATH, SITE_NAME, MOVIE_FOLDER, movie_details.title)
# Check if the MP4 file can be created
if not can_create_file(mp4_name):
logging.error("Invalid mp4 name.")
sys.exit(0)
# Get m3u8 master playlist # Get m3u8 master playlist
master_playlist = video_source.get_playlist() master_playlist = video_source.get_playlist()
# Download the film using the m3u8 playlist, and output filename # Download the film using the m3u8 playlist, and output filename
HLS_Downloader( if HLS_Downloader(m3u8_playlist = master_playlist, output_filename = os.path.join(mp4_path, mp4_name)).start() == 404:
m3u8_playlist = master_playlist, time.sleep(2)
output_filename = os.path.join(mp4_path, mp4_name)
).start() # Re call search function
if msg.ask("[green]Do you want to continue [white]([red]y[white])[green] or return at home[white]([red]n[white]) ", choices=['y', 'n'], default='y', show_choices=True) == "n":
frames = get_call_stack()
execute_search(frames[-4])

View File

@ -11,6 +11,7 @@ from bs4 import BeautifulSoup
# Internal utilities # Internal utilities
from Src.Util._jsonConfig import config_manager
from Src.Util.headers import get_headers from Src.Util.headers import get_headers
from Src.Util.console import console, Panel from Src.Util.console import console, Panel
@ -23,6 +24,7 @@ from ..Class.WindowType import WindowVideo, WindowParameter, DynamicJSONConverte
# Variable # Variable
from ...costant import SITE_NAME from ...costant import SITE_NAME
max_timeout = config_manager.get_int("REQUESTS", "timeout")
class VideoSource: class VideoSource:
@ -66,7 +68,11 @@ class VideoSource:
try: try:
response = httpx.get(f"https://{self.base_name}.{self.domain}/titles/{self.media_id}-{self.series_name}", headers=self.headers, timeout=15) response = httpx.get(
url=f"https://{self.base_name}.{self.domain}/titles/{self.media_id}-{self.series_name}",
headers=self.headers,
timeout=max_timeout
)
response.raise_for_status() response.raise_for_status()
# Extract JSON response if available # Extract JSON response if available
@ -90,7 +96,11 @@ class VideoSource:
try: try:
# Make a request to collect information about a specific season # Make a request to collect information about a specific season
response = httpx.get(f'https://{self.base_name}.{self.domain}/titles/{self.media_id}-{self.series_name}/stagione-{number_season}', headers=self.headers, timeout=15) response = httpx.get(
url=f'https://{self.base_name}.{self.domain}/titles/{self.media_id}-{self.series_name}/stagione-{number_season}',
headers=self.headers,
timeout=max_timeout
)
response.raise_for_status() response.raise_for_status()
# Extract JSON response if available # Extract JSON response if available
@ -122,7 +132,11 @@ class VideoSource:
try: try:
# Make a request to get iframe source # Make a request to get iframe source
response = httpx.get(f"https://{self.base_name}.{self.domain}/iframe/{self.media_id}", params=params, timeout=15) response = httpx.get(
url=f"https://{self.base_name}.{self.domain}/iframe/{self.media_id}",
params=params,
timeout=max_timeout
)
response.raise_for_status() response.raise_for_status()
# Parse response with BeautifulSoup to get iframe source # Parse response with BeautifulSoup to get iframe source
@ -164,7 +178,11 @@ class VideoSource:
# Make a request to get content # Make a request to get content
try: try:
response = httpx.get(self.iframe_src, headers=self.headers, timeout=15) response = httpx.get(
url=self.iframe_src,
headers=self.headers,
timeout=max_timeout
)
response.raise_for_status() response.raise_for_status()
except Exception as e: except Exception as e:
@ -172,8 +190,6 @@ class VideoSource:
console.print(Panel("[red bold]Coming soon", title="Notification", title_align="left", border_style="yellow")) console.print(Panel("[red bold]Coming soon", title="Notification", title_align="left", border_style="yellow"))
sys.exit(0) sys.exit(0)
if response.status_code == 200:
# Parse response with BeautifulSoup to get content # Parse response with BeautifulSoup to get content
soup = BeautifulSoup(response.text, "html.parser") soup = BeautifulSoup(response.text, "html.parser")
script = soup.find("body").find("script").text script = soup.find("body").find("script").text

View File

@ -12,6 +12,7 @@ from .series import download_series
# Variable # Variable
indice = 0 indice = 0
_use_for = "film_serie"
_deprecate = False _deprecate = False

View File

@ -12,8 +12,9 @@ from unidecode import unidecode
# Internal utilities # Internal utilities
from Src.Util.headers import get_headers
from Src.Util.console import console from Src.Util.console import console
from Src.Util._jsonConfig import config_manager
from Src.Util.headers import get_headers
from Src.Util.table import TVShowManager from Src.Util.table import TVShowManager
from ..Template import search_domain, get_select_title from ..Template import search_domain, get_select_title
@ -91,10 +92,20 @@ def title_search(title_search: str, domain: str) -> int:
int: The number of titles found. int: The number of titles found.
""" """
max_timeout = config_manager.get_int("REQUESTS", "timeout")
# Send request to search for titles ( replace à to a and space to "+" ) # Send request to search for titles ( replace à to a and space to "+" )
response = httpx.get(f"https://{SITE_NAME}.{domain}/api/search?q={unidecode(title_search.replace(' ', '+'))}", headers={'user-agent': get_headers()}) try:
response = httpx.get(
url=f"https://{SITE_NAME}.{domain}/api/search?q={unidecode(title_search.replace(' ', '+'))}",
headers={'user-agent': get_headers()},
timeout=max_timeout
)
response.raise_for_status() response.raise_for_status()
except Exception as e:
console.print(f"Site: {SITE_NAME}, request search error: {e}")
# Add found titles to media search manager # Add found titles to media search manager
for dict_title in response.json()['data']: for dict_title in response.json()['data']:
media_search_manager.add_media({ media_search_manager.add_media({

View File

@ -55,6 +55,7 @@ FILTER_CUSTOM_REOLUTION = config_manager.get_int('M3U8_PARSER', 'force_resolutio
# Variable # Variable
max_timeout = config_manager.get_int("REQUESTS", "timeout")
headers_index = config_manager.get_dict('REQUESTS', 'user-agent') headers_index = config_manager.get_dict('REQUESTS', 'user-agent')
m3u8_url_fixer = M3U8_UrlFix() m3u8_url_fixer = M3U8_UrlFix()
@ -97,7 +98,7 @@ class HttpClient:
""" """
self.headers = headers self.headers = headers
def get(self, url: str, timeout: int=20): def get(self, url: str):
""" """
Sends a GET request to the specified URL and returns the response as text. Sends a GET request to the specified URL and returns the response as text.
@ -105,7 +106,11 @@ class HttpClient:
str: The response body as text if the request is successful, None otherwise. str: The response body as text if the request is successful, None otherwise.
""" """
try: try:
response = httpx.get(url, headers=self.headers, timeout=timeout) response = httpx.get(
url=url,
headers=self.headers,
timeout=max_timeout
)
response.raise_for_status() response.raise_for_status()
return response.text # Return the response text return response.text # Return the response text
@ -114,7 +119,7 @@ class HttpClient:
logging.error(f"Request to {url} failed: {response.status_code} when get text.") logging.error(f"Request to {url} failed: {response.status_code} when get text.")
return 404 return 404
def get_content(self, url, timeout=20): def get_content(self, url):
""" """
Sends a GET request to the specified URL and returns the raw response content. Sends a GET request to the specified URL and returns the raw response content.
@ -122,7 +127,11 @@ class HttpClient:
bytes: The response content as bytes if the request is successful, None otherwise. bytes: The response content as bytes if the request is successful, None otherwise.
""" """
try: try:
response = httpx.get(url, headers=self.headers, timeout=timeout) response = httpx.get(
url=url,
headers=self.headers,
timeout=max_timeout
)
response.raise_for_status() response.raise_for_status()
return response.content # Return the raw response content return response.content # Return the raw response content

View File

@ -38,7 +38,6 @@ from .proxyes import main_test_proxy
# Config # Config
TQDM_DELAY_WORKER = config_manager.get_float('M3U8_DOWNLOAD', 'tqdm_delay') TQDM_DELAY_WORKER = config_manager.get_float('M3U8_DOWNLOAD', 'tqdm_delay')
TQDM_USE_LARGE_BAR = config_manager.get_int('M3U8_DOWNLOAD', 'tqdm_use_large_bar') TQDM_USE_LARGE_BAR = config_manager.get_int('M3U8_DOWNLOAD', 'tqdm_use_large_bar')
REQUEST_TIMEOUT = config_manager.get_float('REQUESTS', 'timeout')
REQUEST_MAX_RETRY = config_manager.get_int('REQUESTS', 'max_retry') REQUEST_MAX_RETRY = config_manager.get_int('REQUESTS', 'max_retry')
REQUEST_VERIFY = config_manager.get_bool('REQUESTS', 'verify_ssl') REQUEST_VERIFY = config_manager.get_bool('REQUESTS', 'verify_ssl')
THERE_IS_PROXY_LIST = check_file_existence("list_proxy.txt") THERE_IS_PROXY_LIST = check_file_existence("list_proxy.txt")
@ -48,6 +47,7 @@ PROXY_START_MAX = config_manager.get_float('REQUESTS', 'proxy_start_max')
# Variable # Variable
headers_index = config_manager.get_dict('REQUESTS', 'user-agent') headers_index = config_manager.get_dict('REQUESTS', 'user-agent')
max_timeout = config_manager.get_int("REQUESTS", "timeout")
@ -98,7 +98,11 @@ class M3U8_Segments:
# Make request to get porxy # Make request to get porxy
try: try:
response = httpx.get(key_uri, headers=headers_index) response = httpx.get(
url=key_uri,
headers=headers_index,
timeout=max_timeout
)
response.raise_for_status() response.raise_for_status()
except Exception as e: except Exception as e:
@ -214,16 +218,38 @@ class M3U8_Segments:
with httpx.Client(proxies=proxy, verify=need_verify) as client: with httpx.Client(proxies=proxy, verify=need_verify) as client:
if 'key_base_url' in self.__dict__: if 'key_base_url' in self.__dict__:
response = client.get(ts_url, headers=random_headers(self.key_base_url), timeout=REQUEST_TIMEOUT, follow_redirects=True) response = client.get(
url=ts_url,
headers=random_headers(self.key_base_url),
timeout=max_timeout,
follow_redirects=True
)
else: else:
response = client.get(ts_url, headers={'user-agent': get_headers()}, timeout=REQUEST_TIMEOUT, follow_redirects=True) response = client.get(
url=ts_url,
headers={'user-agent': get_headers()},
timeout=max_timeout,
follow_redirects=True
)
else: else:
with httpx.Client(verify=need_verify) as client_2: with httpx.Client(verify=need_verify) as client_2:
if 'key_base_url' in self.__dict__: if 'key_base_url' in self.__dict__:
response = client_2.get(ts_url, headers=random_headers(self.key_base_url), timeout=REQUEST_TIMEOUT, follow_redirects=True) response = client_2.get(
url=ts_url,
headers=random_headers(self.key_base_url),
timeout=max_timeout,
follow_redirects=True
)
else: else:
response = client_2.get(ts_url, headers={'user-agent': get_headers()}, timeout=REQUEST_TIMEOUT, follow_redirects=True) response = client_2.get(
url=ts_url,
headers={'user-agent': get_headers()},
timeout=max_timeout,
follow_redirects=True
)
# Get response content # Get response content
response.raise_for_status() # Raise exception for HTTP errors response.raise_for_status() # Raise exception for HTTP errors

View File

@ -55,7 +55,7 @@ def MP4_downloader(url: str, path: str, referer: str = None, headers_: str = Non
# Make request to get content of video # Make request to get content of video
with httpx.Client(verify=REQUEST_VERIFY, timeout=REQUEST_TIMEOUT) as client: with httpx.Client(verify=REQUEST_VERIFY, timeout=REQUEST_TIMEOUT) as client:
with client.stream("GET", url, headers=headers, timeout=10) as response: with client.stream("GET", url, headers=headers, timeout=REQUEST_TIMEOUT) as response:
total = int(response.headers.get('content-length', 0)) total = int(response.headers.get('content-length', 0))
if total != 0: if total != 0:

View File

@ -17,7 +17,7 @@
"show_trending": false "show_trending": false
}, },
"REQUESTS": { "REQUESTS": {
"timeout": 10, "timeout": 15,
"max_retry": 3, "max_retry": 3,
"verify_ssl": true, "verify_ssl": true,
"user-agent": "", "user-agent": "",

45
run.py
View File

@ -42,7 +42,6 @@ def run_function(func: Callable[..., None], close_console: bool = False) -> None
def load_search_functions(): def load_search_functions():
modules = [] modules = []
loaded_functions = {} loaded_functions = {}
@ -53,10 +52,8 @@ def load_search_functions():
logging.info(f"Base folder path: {api_dir}") logging.info(f"Base folder path: {api_dir}")
logging.info(f"Api module path: {init_files}") logging.info(f"Api module path: {init_files}")
# Retrieve modules and their indices # Retrieve modules and their indices
for init_file in init_files: for init_file in init_files:
# Get folder name as module name # Get folder name as module name
module_name = os.path.basename(os.path.dirname(init_file)) module_name = os.path.basename(os.path.dirname(init_file))
logging.info(f"Load module name: {module_name}") logging.info(f"Load module name: {module_name}")
@ -64,14 +61,13 @@ def load_search_functions():
try: try:
# Dynamically import the module # Dynamically import the module
mod = importlib.import_module(f'Src.Api.{module_name}') mod = importlib.import_module(f'Src.Api.{module_name}')
# Get 'indice' from the module # Get 'indice' from the module
indice = getattr(mod, 'indice', 0) indice = getattr(mod, 'indice', 0)
is_deprecate = bool(getattr(mod, '_deprecate', True)) is_deprecate = bool(getattr(mod, '_deprecate', True))
use_for = getattr(mod, '_use_for', 'other')
# Add module and indice to the list
if not is_deprecate: if not is_deprecate:
modules.append((module_name, indice)) modules.append((module_name, indice, use_for))
except Exception as e: except Exception as e:
console.print(f"[red]Failed to import module {module_name}: {str(e)}") console.print(f"[red]Failed to import module {module_name}: {str(e)}")
@ -80,13 +76,14 @@ def load_search_functions():
modules.sort(key=lambda x: x[1]) modules.sort(key=lambda x: x[1])
# Load search functions in the sorted order # Load search functions in the sorted order
for module_name, _ in modules: for module_name, _, use_for in modules:
# Construct a unique alias for the module # Construct a unique alias for the module
module_alias = f'{module_name}_search' module_alias = f'{module_name}_search'
logging.info(f"Module alias: {module_alias}") logging.info(f"Module alias: {module_alias}")
try: try:
# Dynamically import the module # Dynamically import the module
mod = importlib.import_module(f'Src.Api.{module_name}') mod = importlib.import_module(f'Src.Api.{module_name}')
@ -94,7 +91,7 @@ def load_search_functions():
search_function = getattr(mod, 'search') search_function = getattr(mod, 'search')
# Add the function to the loaded functions dictionary # Add the function to the loaded functions dictionary
loaded_functions[module_alias] = search_function loaded_functions[module_alias] = (search_function, use_for)
except Exception as e: except Exception as e:
console.print(f"[red]Failed to load search function from module {module_name}: {str(e)}") console.print(f"[red]Failed to load search function from module {module_name}: {str(e)}")
@ -149,17 +146,25 @@ def main():
# Create dynamic argument parser # Create dynamic argument parser
parser = argparse.ArgumentParser(description='Script to download film and series from the internet.') parser = argparse.ArgumentParser(description='Script to download film and series from the internet.')
color_map = {
"anime": "red",
"film_serie": "yellow",
"film": "blue",
"serie": "green",
"other": "white"
}
# Add dynamic arguments based on loaded search modules # Add dynamic arguments based on loaded search modules
for alias in search_functions.keys(): for alias, (_, use_for) in search_functions.items():
short_option = alias[:3].upper() # Take the first three letters of the alias in uppercase short_option = alias[:3].upper()
long_option = alias # Use the full alias as the full option name long_option = alias
parser.add_argument(f'-{short_option}', f'--{long_option}', action='store_true', help=f'Search for {alias.split("_")[0]} on streaming platforms.') parser.add_argument(f'-{short_option}', f'--{long_option}', action='store_true', help=f'Search for {alias.split("_")[0]} on streaming platforms.')
# Parse command line arguments # Parse command line arguments
args = parser.parse_args() args = parser.parse_args()
# Mapping command-line arguments to functions # Mapping command-line arguments to functions
arg_to_function = {alias: search_functions[alias] for alias in search_functions.keys()} arg_to_function = {alias: func for alias, (func, _) in search_functions.items()}
# Check which argument is provided and run the corresponding function # Check which argument is provided and run the corresponding function
for arg, func in arg_to_function.items(): for arg, func in arg_to_function.items():
@ -168,14 +173,22 @@ def main():
return return
# Mapping user input to functions # Mapping user input to functions
input_to_function = {str(i): search_functions[alias] for i, alias in enumerate(search_functions.keys())} input_to_function = {str(i): func for i, (alias, (func, _)) in enumerate(search_functions.items())}
# Create dynamic prompt message and choices # Create dynamic prompt message and choices
choice_labels = {str(i): alias.split("_")[0].capitalize() for i, alias in enumerate(search_functions.keys())} choice_labels = {str(i): (alias.split("_")[0].capitalize(), use_for) for i, (alias, (_, use_for)) in enumerate(search_functions.items())}
prompt_message = f"[green]Insert category [white]({', '.join([f'[red]{key}: [magenta]{label}' for key, label in choice_labels.items()])}[white]): "
# Display the category legend in a single line
legend_text = " | ".join([f"[{color}]{category.capitalize()}[/{color}]" for category, color in color_map.items()])
console.print(f"[bold green]Category Legend:[/bold green] {legend_text}")
# Construct the prompt message with color-coded site names
prompt_message = "[green]Insert category [white](" + ", ".join(
[f"{key}: [{color_map[label[1]]}]{label[0]}[/{color_map[label[1]]}]" for key, label in choice_labels.items()]
) + "[white])"
# Ask the user for input # Ask the user for input
category = msg.ask(prompt_message, choices=list(choice_labels.keys()), default="0") category = msg.ask(prompt_message, choices=list(choice_labels.keys()), default="0", show_choices=False, show_default=False)
# Run the corresponding function based on user input # Run the corresponding function based on user input
if category in input_to_function: if category in input_to_function: