From ded66f446e4c8c942fc18e7db801968927a84dc2 Mon Sep 17 00:00:00 2001 From: Lovi <62809003+Arrowar@users.noreply.github.com> Date: Sat, 31 May 2025 10:52:16 +0200 Subject: [PATCH 01/20] Remove database of domain --- .github/.domain/domain_update.py | 253 +++++++++++++++++++++++++ .github/.domain/domains.json | 62 ++++++ .gitignore | 1 - StreamingCommunity/Util/config_json.py | 33 ++-- 4 files changed, 331 insertions(+), 18 deletions(-) create mode 100644 .github/.domain/domain_update.py create mode 100644 .github/.domain/domains.json diff --git a/.github/.domain/domain_update.py b/.github/.domain/domain_update.py new file mode 100644 index 0000000..e4be02d --- /dev/null +++ b/.github/.domain/domain_update.py @@ -0,0 +1,253 @@ +# 20.04.2024 + +import os +import re +import time +import json +from datetime import datetime +from urllib.parse import urlparse, urlunparse + + +import httpx +import ua_generator + + +JSON_FILE_PATH = os.path.join(".github", ".domain", "domains.json") + + +def load_domains(file_path): + if not os.path.exists(file_path): + print(f"Error: The file {file_path} was not found.") + return None + + try: + with open(file_path, 'r', encoding='utf-8') as f: + return json.load(f) + + except Exception as e: + print(f"Error reading the file {file_path}: {e}") + return None + +def save_domains(file_path, data): + try: + with open(file_path, 'w', encoding='utf-8') as f: + json.dump(data, f, indent=2, ensure_ascii=False) + print(f"Data successfully saved to {file_path}") + + except Exception as e: + print(f"Error saving the file {file_path}: {e}") + +def get_new_tld(full_url): + try: + parsed_url = urlparse(full_url) + hostname = parsed_url.hostname + if hostname: + parts = hostname.split('.') + return parts[-1] + + except Exception: + pass + + return None + +def try_url_with_retries(url_to_try, headers, timeout=15, retries=3, backoff_factor=0.5): + for attempt in range(retries): + try: + with httpx.Client(headers=headers, timeout=timeout, follow_redirects=True) as client: + response = client.get(url_to_try) + response.raise_for_status() + return response + + except (httpx.TimeoutException, httpx.ConnectError) as e: + print(f" [!] Attempt {attempt + 1}/{retries} for {url_to_try}: Network error ({type(e).__name__}). Retrying in {backoff_factor * (2 ** attempt)}s...") + if attempt + 1 == retries: + print(f" [!] Failed all {retries} attempts for {url_to_try} due to {type(e).__name__}.") + return None + time.sleep(backoff_factor * (2 ** attempt)) + + except httpx.HTTPStatusError as http_err: + if http_err.response.status_code in [403, 429, 503]: + print(f" [!] HTTP error {http_err.response.status_code} for {url_to_try}. Suspected Cloudflare, checking for ...") + try: + with httpx.Client(headers=headers, timeout=timeout, follow_redirects=False) as cf_client: + cf_page_response = cf_client.get(url_to_try) + if cf_page_response.status_code != http_err.response.status_code and not (200 <= cf_page_response.status_code < 300) : + cf_page_response.raise_for_status() + + match = re.search(r': {base_href_url}") + try: + print(f" [] Attempting request to URL: {base_href_url}") + with httpx.Client(headers=headers, timeout=timeout, follow_redirects=True) as base_client: + final_response_from_base = base_client.get(base_href_url) + final_response_from_base.raise_for_status() + print(f" [+] Successfully fetched from URL.") + return final_response_from_base + + except httpx.RequestError as base_req_e: + print(f" [!] Error requesting URL {base_href_url}: {base_req_e}") + return None + + else: + print(f" [!] No found in page content for {url_to_try}.") + return None + + except httpx.RequestError as cf_req_e: + print(f" [!] Error fetching Cloudflare-like page content for {url_to_try}: {cf_req_e}") + return None + + else: + print(f" [!] HTTP error {http_err.response.status_code} for {url_to_try}. No retry.") + return None + + except httpx.RequestError as e: + print(f" [!] Generic error for {url_to_try}: {e}. No retry.") + return None + + return None + + +def update_domain_entries(data): + if not data: + return False + + updated_count = 0 + + for key, entry in data.items(): + print(f"\n--- [DOMAIN] {key} ---") + original_full_url = entry.get("full_url") + original_domain_in_entry = entry.get("domain") + + if not original_full_url: + print(f" [!] 'full_url' missing. Skipped.") + continue + + ua = ua_generator.generate(device=('desktop', 'mobile'), browser=('chrome', 'edge', 'firefox', 'safari')) + current_headers = ua.headers.get() + + print(f" [] Stored URL: {original_full_url}") + if original_domain_in_entry: + print(f" [] Stored Domain (TLD): {original_domain_in_entry}") + + potential_urls_to_try = [] + potential_urls_to_try.append(("Original", original_full_url)) + + try: + parsed_original = urlparse(original_full_url) + + current_netloc = parsed_original.netloc + if current_netloc.startswith("www."): + varied_netloc = current_netloc[4:] + potential_urls_to_try.append(("Without www", urlunparse(parsed_original._replace(netloc=varied_netloc)))) + else: + varied_netloc = "www." + current_netloc + potential_urls_to_try.append(("With www", urlunparse(parsed_original._replace(netloc=varied_netloc)))) + + current_path = parsed_original.path + if not current_path: + potential_urls_to_try.append(("With trailing slash", urlunparse(parsed_original._replace(path='/')))) + elif current_path.endswith('/'): + potential_urls_to_try.append(("Without trailing slash", urlunparse(parsed_original._replace(path=current_path[:-1])))) + else: + potential_urls_to_try.append(("With trailing slash", urlunparse(parsed_original._replace(path=current_path + '/')))) + + except Exception as e: + print(f" [!] Error generating URL variations: {e}") + + entry_updated_in_this_run = False + + seen_urls_for_entry = set() + unique_potential_urls = [] + for label, url_val in potential_urls_to_try: + if url_val not in seen_urls_for_entry: + unique_potential_urls.append((label, url_val)) + seen_urls_for_entry.add(url_val) + + parsed_original_for_http_check = urlparse(original_full_url) + if parsed_original_for_http_check.scheme == 'https': + http_url = urlunparse(parsed_original_for_http_check._replace(scheme='http')) + if http_url not in seen_urls_for_entry: + unique_potential_urls.append(("HTTP Fallback", http_url)) + + for label, url_to_check in unique_potential_urls: + if entry_updated_in_this_run: + break + + print(f" [] Testing URL ({label}): {url_to_check}") + response = try_url_with_retries(url_to_check, current_headers) + + if response: + final_url_from_request = str(response.url) + print(f" [+] Redirect/Response to: {final_url_from_request}") + + parsed_final_url = urlparse(final_url_from_request) + normalized_full_url = urlunparse(parsed_final_url._replace(path='/', params='', query='', fragment='')) + if parsed_final_url.path == '' and not normalized_full_url.endswith('/'): + normalized_full_url += '/' + + if normalized_full_url != final_url_from_request: + print(f" [+] Normalized URL: {normalized_full_url}") + + if normalized_full_url != original_full_url: + new_tld_val = get_new_tld(final_url_from_request) + + if new_tld_val: + entry["full_url"] = normalized_full_url + + if new_tld_val != original_domain_in_entry: + print(f" [-] Domain TLD Changed: '{original_domain_in_entry}' -> '{new_tld_val}'") + entry["old_domain"] = original_domain_in_entry if original_domain_in_entry else entry.get("old_domain", "") + entry["domain"] = new_tld_val + entry["time_change"] = datetime.now().strftime('%Y-%m-%d %H:%M:%S') + print(f" [-] Domain & URL Updated: New TLD '{new_tld_val}', New URL '{normalized_full_url}'") + + else: + entry["domain"] = new_tld_val + print(f" [-] URL Updated (TLD Unchanged '{new_tld_val}'): New URL '{normalized_full_url}'") + + updated_count += 1 + entry_updated_in_this_run = True + + else: + print(f" [!] Could not extract TLD from {final_url_from_request}. URL not updated despite potential change.") + else: + if final_url_from_request != original_full_url: + print(f" [] Same Domain (after normalization): {final_url_from_request} -> {normalized_full_url}") + + else: + print(f" [] Same Domain: {final_url_from_request}") + + if label == "Original" or normalized_full_url == original_full_url : + entry_updated_in_this_run = True + + if not entry_updated_in_this_run: + print(f" [-] No Update for {key} after {len(unique_potential_urls)} attempts.") + + return updated_count > 0 + +def main(): + print("Starting domain update script...") + domain_data = load_domains(JSON_FILE_PATH) + + if domain_data: + if update_domain_entries(domain_data): + save_domains(JSON_FILE_PATH, domain_data) + print("\nUpdate complete. Some entries were modified.") + + else: + print("\nUpdate complete. No domains were modified.") + + else: + print("\nCannot proceed without domain data.") + + print("Script finished.") + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/.github/.domain/domains.json b/.github/.domain/domains.json new file mode 100644 index 0000000..a7f588a --- /dev/null +++ b/.github/.domain/domains.json @@ -0,0 +1,62 @@ +{ + "1337xx": { + "domain": "to", + "full_url": "https://www.1337xx.to/", + "old_domain": "to", + "time_change": "2025-03-19 12:20:19" + }, + "cb01new": { + "domain": "download", + "full_url": "https://cb01net.download/", + "old_domain": "my", + "time_change": "2025-05-26 22:23:24" + }, + "animeunity": { + "domain": "so", + "full_url": "https://www.animeunity.so/", + "old_domain": "so", + "time_change": "2025-03-19 12:20:23" + }, + "animeworld": { + "domain": "ac", + "full_url": "https://www.animeworld.ac/", + "old_domain": "ac", + "time_change": "2025-03-21 12:20:27" + }, + "guardaserie": { + "domain": "meme", + "full_url": "https://guardaserie.meme/", + "old_domain": "meme", + "time_change": "2025-03-19 12:20:24" + }, + "ddlstreamitaly": { + "domain": "co", + "full_url": "https://ddlstreamitaly.co/", + "old_domain": "co", + "time_change": "2025-03-19 12:20:26" + }, + "streamingwatch": { + "domain": "org", + "full_url": "https://www.streamingwatch.org/", + "old_domain": "org", + "time_change": "2025-04-29 12:30:30" + }, + "altadefinizione": { + "domain": "spa", + "full_url": "https://altadefinizione.spa/", + "old_domain": "locker", + "time_change": "2025-05-26 23:22:45" + }, + "streamingcommunity": { + "domain": "blog", + "full_url": "https://streamingunity.blog/", + "old_domain": "to", + "time_change": "2025-05-31 10:45:55" + }, + "altadefinizionegratis": { + "domain": "icu", + "full_url": "https://altadefinizionegratis.icu/", + "old_domain": "taipei", + "time_change": "2025-05-18 11:21:05" + } +} \ No newline at end of file diff --git a/.gitignore b/.gitignore index 5cf4a3f..9322c75 100644 --- a/.gitignore +++ b/.gitignore @@ -52,5 +52,4 @@ cmd.txt bot_config.json scripts.json active_requests.json -domains.json working_proxies.json \ No newline at end of file diff --git a/StreamingCommunity/Util/config_json.py b/StreamingCommunity/Util/config_json.py index 08070cd..62f68a4 100644 --- a/StreamingCommunity/Util/config_json.py +++ b/StreamingCommunity/Util/config_json.py @@ -268,33 +268,32 @@ class ConfigManager: self._load_site_data_from_file() def _load_site_data_from_api(self) -> None: - """Load site data from API.""" + """Load site data from GitHub.""" + domains_github_url = "https://raw.githubusercontent.com/Arrowar/StreamingCommunity/refs/heads/main/.github/.domain/domains.json" headers = { - "apikey": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6Inp2Zm5ncG94d3Jnc3duenl0YWRoIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NDAxNTIxNjMsImV4cCI6MjA1NTcyODE2M30.FNTCCMwi0QaKjOu8gtZsT5yQttUW8QiDDGXmzkn89QE", - "Authorization": f"Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6Inp2Zm5ncG94d3Jnc3duenl0YWRoIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NDAxNTIxNjMsImV4cCI6MjA1NTcyODE2M30.FNTCCMwi0QaKjOu8gtZsT5yQttUW8QiDDGXmzkn89QE", - "Content-Type": "application/json", - "User-Agent": get_userAgent() + "User-Agent": get_userAgent() } try: - console.print("[bold cyan]Retrieving site data from API...[/bold cyan]") - response = requests.get("https://zvfngpoxwrgswnzytadh.supabase.co/rest/v1/public", timeout=8, headers=headers) + console.print(f"[bold cyan]Retrieving site data from GitHub:[/bold cyan] [green]{domains_github_url}[/green]") + response = requests.get(domains_github_url, timeout=8, headers=headers) if response.ok: - data = response.json() - if data and len(data) > 0: - self.configSite = data[0]['data'] - - site_count = len(self.configSite) if isinstance(self.configSite, dict) else 0 - - else: - console.print("[bold yellow]API returned an empty data set[/bold yellow]") + self.configSite = response.json() + + site_count = len(self.configSite) if isinstance(self.configSite, dict) else 0 + console.print(f"[bold green]Site data loaded from GitHub:[/bold green] {site_count} streaming services found.") + else: - console.print(f"[bold red]API request failed:[/bold red] HTTP {response.status_code}, {response.text[:100]}") + console.print(f"[bold red]GitHub request failed:[/bold red] HTTP {response.status_code}, {response.text[:100]}") self._handle_site_data_fallback() + except json.JSONDecodeError as e: + console.print(f"[bold red]Error parsing JSON from GitHub:[/bold red] {str(e)}") + self._handle_site_data_fallback() + except Exception as e: - console.print(f"[bold red]API connection error:[/bold red] {str(e)}") + console.print(f"[bold red]GitHub connection error:[/bold red] {str(e)}") self._handle_site_data_fallback() def _load_site_data_from_file(self) -> None: From 71e97c2c65ab52d9b5a89be3e7c8e5e2d8e279eb Mon Sep 17 00:00:00 2001 From: Lovi <62809003+Arrowar@users.noreply.github.com> Date: Sat, 31 May 2025 10:58:12 +0200 Subject: [PATCH 02/20] Site: Update endpoint --- .github/.site/js/script.js | 32 ++++++++------------------------ 1 file changed, 8 insertions(+), 24 deletions(-) diff --git a/.github/.site/js/script.js b/.github/.site/js/script.js index e89eb9f..727e297 100644 --- a/.github/.site/js/script.js +++ b/.github/.site/js/script.js @@ -113,43 +113,27 @@ async function checkSiteStatus(url, siteName) { } } -const supabaseUrl = 'https://zvfngpoxwrgswnzytadh.supabase.co'; -const supabaseKey = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6Inp2Zm5ncG94d3Jnc3duenl0YWRoIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NDAxNTIxNjMsImV4cCI6MjA1NTcyODE2M30.FNTCCMwi0QaKjOu8gtZsT5yQttUW8QiDDGXmzkn89QE'; +const domainsJsonUrl = 'https://raw.githubusercontent.com/Arrowar/StreamingCommunity/refs/heads/main/.github/.domain/domains.json'; async function loadSiteData() { try { - console.log('Starting to load site data...'); + console.log('Starting to load site data from GitHub...'); createStatusIndicator(); - updateStatusIndicator('Loading...', 'Fetching site data from database...', 0); + updateStatusIndicator('Loading...', 'Fetching site data from GitHub repository...', 0); const siteList = document.getElementById('site-list'); - const headers = { - 'accept': '*/*', - 'accept-language': 'it-IT,it;q=0.9,en-US;q=0.8,en;q=0.7', - 'apikey': supabaseKey, - 'authorization': `Bearer ${supabaseKey}`, - 'content-type': 'application/json', - 'cache-control': 'no-cache', - 'pragma': 'no-cache', - 'range': '0-9' - }; - - console.log('Fetching from Supabase with headers:', headers); - const response = await fetch(`${supabaseUrl}/rest/v1/public?select=*`, { - method: 'GET', - headers: headers - }); + console.log(`Fetching from GitHub: ${domainsJsonUrl}`); + const response = await fetch(domainsJsonUrl); if (!response.ok) throw new Error(`HTTP error! Status: ${response.status}`); - const data = await response.json(); + const configSite = await response.json(); // Directly get the site data object siteList.innerHTML = ''; - if (data && data.length > 0) { - const configSite = data[0].data; + if (configSite && Object.keys(configSite).length > 0) { // Check if configSite is a non-empty object totalSites = Object.keys(configSite).length; completedSites = 0; let latestUpdate = new Date(0); @@ -239,7 +223,7 @@ async function loadSiteData() { document.getElementById('last-update-time').textContent = formattedDate; } else { siteList.innerHTML = '
No sites available
'; - updateStatusIndicator('Ready', 'No sites found in database', 100); + updateStatusIndicator('Ready', 'No sites found in the JSON file.', 100); } } catch (error) { console.error('Errore:', error); From 884bcf656cad7fbd104f10e95104fb6b9fb82ff8 Mon Sep 17 00:00:00 2001 From: None <62809003+Arrowar@users.noreply.github.com> Date: Sat, 31 May 2025 10:59:11 +0200 Subject: [PATCH 03/20] Create update_domain.yml --- .github/workflows/update_domain.yml | 48 +++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 .github/workflows/update_domain.yml diff --git a/.github/workflows/update_domain.yml b/.github/workflows/update_domain.yml new file mode 100644 index 0000000..3d7a0bc --- /dev/null +++ b/.github/workflows/update_domain.yml @@ -0,0 +1,48 @@ +name: Aggiorna Domini Periodicamente + +on: + schedule: + - cron: "*/45 * * * *" + workflow_dispatch: + +jobs: + update-domains: + runs-on: ubuntu-latest + permissions: + contents: write + + steps: + - name: Checkout del codice + uses: actions/checkout@v4 + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: '3.12' + + - name: Installa dipendenze + run: pip install httpx ua-generator + + - name: Configura DNS + run: | + sudo sh -c 'echo "nameserver 9.9.9.9" > /etc/resolv.conf' + sudo sh -c 'echo "nameserver 149.112.112.122" >> /etc/resolv.conf' + cat /etc/resolv.conf + + - name: Esegui lo script di aggiornamento domini + run: python domain_updater.py + + - name: Commit e Push delle modifiche (se presenti) + run: | + git config --global user.name 'github-actions[bot]' + git config --global user.email 'github-actions[bot]@users.noreply.github.com' + + # Controlla se domain.json è stato modificato + if ! git diff --quiet domain.json; then + git add domain.json + git commit -m "Aggiornamento automatico domini [skip ci]" + echo "Modifiche committate. Tentativo di push..." + git push + else + echo "Nessuna modifica a domain.json da committare." + fi From 1776538c6c13ece38971ff79a62fc0ca44214cdf Mon Sep 17 00:00:00 2001 From: Lovi <62809003+Arrowar@users.noreply.github.com> Date: Sat, 31 May 2025 11:28:38 +0200 Subject: [PATCH 04/20] github: Update domains --- .github/.domain/domain_update.py | 310 ++++++++++++++-------------- .github/workflows/update_domain.yml | 39 ++-- 2 files changed, 180 insertions(+), 169 deletions(-) diff --git a/.github/.domain/domain_update.py b/.github/.domain/domain_update.py index e4be02d..937661f 100644 --- a/.github/.domain/domain_update.py +++ b/.github/.domain/domain_update.py @@ -1,17 +1,14 @@ # 20.04.2024 -import os import re -import time +import os import json from datetime import datetime from urllib.parse import urlparse, urlunparse - import httpx import ua_generator - JSON_FILE_PATH = os.path.join(".github", ".domain", "domains.json") @@ -50,69 +47,137 @@ def get_new_tld(full_url): return None -def try_url_with_retries(url_to_try, headers, timeout=15, retries=3, backoff_factor=0.5): - for attempt in range(retries): - try: - with httpx.Client(headers=headers, timeout=timeout, follow_redirects=True) as client: - response = client.get(url_to_try) - response.raise_for_status() - return response - - except (httpx.TimeoutException, httpx.ConnectError) as e: - print(f" [!] Attempt {attempt + 1}/{retries} for {url_to_try}: Network error ({type(e).__name__}). Retrying in {backoff_factor * (2 ** attempt)}s...") - if attempt + 1 == retries: - print(f" [!] Failed all {retries} attempts for {url_to_try} due to {type(e).__name__}.") - return None - time.sleep(backoff_factor * (2 ** attempt)) +def extract_domain_from_response(response, original_url): + if 'location' in response.headers: + return response.headers['location'] + + if str(response.url) != original_url: + return str(response.url) + + try: + content_type = response.headers.get('content-type', '').lower() + if 'text/html' in content_type or 'text/plain' in content_type: + response_text = response.text - except httpx.HTTPStatusError as http_err: - if http_err.response.status_code in [403, 429, 503]: - print(f" [!] HTTP error {http_err.response.status_code} for {url_to_try}. Suspected Cloudflare, checking for ...") - try: - with httpx.Client(headers=headers, timeout=timeout, follow_redirects=False) as cf_client: - cf_page_response = cf_client.get(url_to_try) - if cf_page_response.status_code != http_err.response.status_code and not (200 <= cf_page_response.status_code < 300) : - cf_page_response.raise_for_status() - - match = re.search(r': {base_href_url}") - try: - print(f" [] Attempting request to URL: {base_href_url}") - with httpx.Client(headers=headers, timeout=timeout, follow_redirects=True) as base_client: - final_response_from_base = base_client.get(base_href_url) - final_response_from_base.raise_for_status() - print(f" [+] Successfully fetched from URL.") - return final_response_from_base - - except httpx.RequestError as base_req_e: - print(f" [!] Error requesting URL {base_href_url}: {base_req_e}") - return None - - else: - print(f" [!] No found in page content for {url_to_try}.") - return None - - except httpx.RequestError as cf_req_e: - print(f" [!] Error fetching Cloudflare-like page content for {url_to_try}: {cf_req_e}") - return None - - else: - print(f" [!] HTTP error {http_err.response.status_code} for {url_to_try}. No retry.") - return None + js_redirect_patterns = [ + r'window\.location\.href\s*=\s*["\']([^"\']+)["\']', + r'window\.location\s*=\s*["\']([^"\']+)["\']', + r'location\.href\s*=\s*["\']([^"\']+)["\']', + r'document\.location\s*=\s*["\']([^"\']+)["\']' + ] - except httpx.RequestError as e: - print(f" [!] Generic error for {url_to_try}: {e}. No retry.") - return None - + for pattern in js_redirect_patterns: + js_match = re.search(pattern, response_text, re.IGNORECASE) + if js_match: + return js_match.group(1) + + meta_patterns = [ + r']*http-equiv=["\']?refresh["\']?[^>]*content=["\'][^"\']*url=([^"\'>\s]+)', + r']*content=["\'][^"\']*url=([^"\'>\s]+)[^>]*http-equiv=["\']?refresh["\']?' + ] + + for pattern in meta_patterns: + meta_match = re.search(pattern, response_text, re.IGNORECASE) + if meta_match: + return meta_match.group(1) + + canonical_match = re.search(r']*rel=["\']?canonical["\']?[^>]*href=["\']([^"\']+)["\']', response_text, re.IGNORECASE) + if canonical_match: + return canonical_match.group(1) + + base_match = re.search(r']*href=["\']([^"\']+)["\']', response_text, re.IGNORECASE) + if base_match: + return base_match.group(1) + + error_redirect_patterns = [ + r'[Rr]edirect(?:ed)?\s+to:?\s*([^\s<>"\']+)', + r'[Nn]ew\s+[Uu][Rr][Ll]:?\s*([^\s<>"\']+)', + r'[Mm]oved\s+to:?\s*([^\s<>"\']+)', + r'[Ff]ound\s+at:?\s*([^\s<>"\']+)' + ] + + for pattern in error_redirect_patterns: + error_match = re.search(pattern, response_text) + if error_match: + potential_url = error_match.group(1) + if potential_url.startswith(('http://', 'https://', '//')): + return potential_url + + except Exception as e: + print(f" [!] Error extracting from response content: {e}") + return None +def try_url(url_to_try, headers, timeout=15): + try: + with httpx.Client(headers=headers, timeout=timeout, follow_redirects=False) as client: + response = client.get(url_to_try) + + if response.status_code in [301, 302, 303, 307, 308]: + location = response.headers.get('location') + if location: + print(f" [+] Found redirect ({response.status_code}) to: {location}") + try: + final_response = client.get(location) + if 200 <= final_response.status_code < 400: + return final_response + else: + return httpx.Response( + status_code=200, + headers={"location": location}, + content=b"", + request=response.request + ) + except Exception: + return httpx.Response( + status_code=200, + headers={"location": location}, + content=b"", + request=response.request + ) + + elif response.status_code in [403, 409, 429, 503]: + print(f" [!] HTTP {response.status_code} - attempting to extract redirect info") + + location = response.headers.get('location') + if location: + print(f" [+] Found location header in error response: {location}") + return httpx.Response( + status_code=200, + headers={"location": location}, + content=b"", + request=response.request + ) + + new_url = extract_domain_from_response(response, url_to_try) + if new_url and new_url != url_to_try: + print(f" [+] Found redirect URL in error response content: {new_url}") + return httpx.Response( + status_code=200, + headers={"location": new_url}, + content=b"", + request=response.request + ) + + if 200 <= response.status_code < 400: + return response + + print(f" [!] HTTP {response.status_code} for {url_to_try}") + + except httpx.HTTPStatusError as http_err: + new_url = extract_domain_from_response(http_err.response, url_to_try) + if new_url: + print(f" [+] Found new URL from HTTPStatusError response: {new_url}") + return httpx.Response( + status_code=200, + headers={"location": new_url}, + content=b"", + request=http_err.request + ) + except Exception as e: + print(f" [!] Error for {url_to_try}: {type(e).__name__}") + + return None def update_domain_entries(data): if not data: @@ -135,100 +200,47 @@ def update_domain_entries(data): print(f" [] Stored URL: {original_full_url}") if original_domain_in_entry: print(f" [] Stored Domain (TLD): {original_domain_in_entry}") - - potential_urls_to_try = [] - potential_urls_to_try.append(("Original", original_full_url)) - - try: - parsed_original = urlparse(original_full_url) - - current_netloc = parsed_original.netloc - if current_netloc.startswith("www."): - varied_netloc = current_netloc[4:] - potential_urls_to_try.append(("Without www", urlunparse(parsed_original._replace(netloc=varied_netloc)))) - else: - varied_netloc = "www." + current_netloc - potential_urls_to_try.append(("With www", urlunparse(parsed_original._replace(netloc=varied_netloc)))) - - current_path = parsed_original.path - if not current_path: - potential_urls_to_try.append(("With trailing slash", urlunparse(parsed_original._replace(path='/')))) - elif current_path.endswith('/'): - potential_urls_to_try.append(("Without trailing slash", urlunparse(parsed_original._replace(path=current_path[:-1])))) - else: - potential_urls_to_try.append(("With trailing slash", urlunparse(parsed_original._replace(path=current_path + '/')))) - - except Exception as e: - print(f" [!] Error generating URL variations: {e}") - - entry_updated_in_this_run = False - seen_urls_for_entry = set() - unique_potential_urls = [] - for label, url_val in potential_urls_to_try: - if url_val not in seen_urls_for_entry: - unique_potential_urls.append((label, url_val)) - seen_urls_for_entry.add(url_val) - - parsed_original_for_http_check = urlparse(original_full_url) - if parsed_original_for_http_check.scheme == 'https': - http_url = urlunparse(parsed_original_for_http_check._replace(scheme='http')) - if http_url not in seen_urls_for_entry: - unique_potential_urls.append(("HTTP Fallback", http_url)) + print(f" [] Testing URL: {original_full_url}") + response = try_url(original_full_url, current_headers) - for label, url_to_check in unique_potential_urls: - if entry_updated_in_this_run: - break + if response: + final_url_from_request = str(response.url) + print(f" [+] Redirect/Response to: {final_url_from_request}") + + parsed_final_url = urlparse(final_url_from_request) + normalized_full_url = urlunparse(parsed_final_url._replace(path='/', params='', query='', fragment='')) + if parsed_final_url.path == '' and not normalized_full_url.endswith('/'): + normalized_full_url += '/' - print(f" [] Testing URL ({label}): {url_to_check}") - response = try_url_with_retries(url_to_check, current_headers) + if normalized_full_url != final_url_from_request: + print(f" [+] Normalized URL: {normalized_full_url}") - if response: - final_url_from_request = str(response.url) - print(f" [+] Redirect/Response to: {final_url_from_request}") - - parsed_final_url = urlparse(final_url_from_request) - normalized_full_url = urlunparse(parsed_final_url._replace(path='/', params='', query='', fragment='')) - if parsed_final_url.path == '' and not normalized_full_url.endswith('/'): - normalized_full_url += '/' + if normalized_full_url != original_full_url: + new_tld_val = get_new_tld(final_url_from_request) - if normalized_full_url != final_url_from_request: - print(f" [+] Normalized URL: {normalized_full_url}") - - if normalized_full_url != original_full_url: - new_tld_val = get_new_tld(final_url_from_request) + if new_tld_val: + entry["full_url"] = normalized_full_url - if new_tld_val: - entry["full_url"] = normalized_full_url - - if new_tld_val != original_domain_in_entry: - print(f" [-] Domain TLD Changed: '{original_domain_in_entry}' -> '{new_tld_val}'") - entry["old_domain"] = original_domain_in_entry if original_domain_in_entry else entry.get("old_domain", "") - entry["domain"] = new_tld_val - entry["time_change"] = datetime.now().strftime('%Y-%m-%d %H:%M:%S') - print(f" [-] Domain & URL Updated: New TLD '{new_tld_val}', New URL '{normalized_full_url}'") - - else: - entry["domain"] = new_tld_val - print(f" [-] URL Updated (TLD Unchanged '{new_tld_val}'): New URL '{normalized_full_url}'") - - updated_count += 1 - entry_updated_in_this_run = True - + if new_tld_val != original_domain_in_entry: + print(f" [-] Domain TLD Changed: '{original_domain_in_entry}' -> '{new_tld_val}'") + entry["old_domain"] = original_domain_in_entry if original_domain_in_entry else entry.get("old_domain", "") + entry["domain"] = new_tld_val + entry["time_change"] = datetime.now().strftime('%Y-%m-%d %H:%M:%S') + print(f" [-] Domain & URL Updated: New TLD '{new_tld_val}', New URL '{normalized_full_url}'") else: - print(f" [!] Could not extract TLD from {final_url_from_request}. URL not updated despite potential change.") + entry["domain"] = new_tld_val + print(f" [-] URL Updated (TLD Unchanged '{new_tld_val}'): New URL '{normalized_full_url}'") + + updated_count += 1 + else: - if final_url_from_request != original_full_url: - print(f" [] Same Domain (after normalization): {final_url_from_request} -> {normalized_full_url}") + print(f" [!] Could not extract TLD from {final_url_from_request}. URL not updated despite potential change.") + else: + print(f" [] Same Domain: {final_url_from_request}") - else: - print(f" [] Same Domain: {final_url_from_request}") - - if label == "Original" or normalized_full_url == original_full_url : - entry_updated_in_this_run = True - - if not entry_updated_in_this_run: - print(f" [-] No Update for {key} after {len(unique_potential_urls)} attempts.") + else: + print(f" [-] No response for {key}") return updated_count > 0 @@ -240,10 +252,8 @@ def main(): if update_domain_entries(domain_data): save_domains(JSON_FILE_PATH, domain_data) print("\nUpdate complete. Some entries were modified.") - else: print("\nUpdate complete. No domains were modified.") - else: print("\nCannot proceed without domain data.") diff --git a/.github/workflows/update_domain.yml b/.github/workflows/update_domain.yml index 3d7a0bc..231c795 100644 --- a/.github/workflows/update_domain.yml +++ b/.github/workflows/update_domain.yml @@ -1,8 +1,8 @@ -name: Aggiorna Domini Periodicamente +name: Update domains on: schedule: - - cron: "*/45 * * * *" + - cron: "0 */2 * * *" workflow_dispatch: jobs: @@ -12,37 +12,38 @@ jobs: contents: write steps: - - name: Checkout del codice + - name: Checkout code uses: actions/checkout@v4 - name: Setup Python uses: actions/setup-python@v5 with: - python-version: '3.12' + python-version: '3.12' + + - name: Install dependencies + run: | + pip install httpx ua-generator requests + pip install --upgrade pip setuptools wheel - - name: Installa dipendenze - run: pip install httpx ua-generator - - - name: Configura DNS + - name: Configure DNS run: | sudo sh -c 'echo "nameserver 9.9.9.9" > /etc/resolv.conf' - sudo sh -c 'echo "nameserver 149.112.112.122" >> /etc/resolv.conf' cat /etc/resolv.conf - - name: Esegui lo script di aggiornamento domini - run: python domain_updater.py + - name: Execute domain update script + run: python .github/.domain/domain_update.py - - name: Commit e Push delle modifiche (se presenti) + - name: Commit and push changes (if any) run: | git config --global user.name 'github-actions[bot]' git config --global user.email 'github-actions[bot]@users.noreply.github.com' - # Controlla se domain.json è stato modificato - if ! git diff --quiet domain.json; then - git add domain.json - git commit -m "Aggiornamento automatico domini [skip ci]" - echo "Modifiche committate. Tentativo di push..." + # Check if domains.json was modified + if ! git diff --quiet .github/.domain/domains.json; then + git add .github/.domain/domains.json + git commit -m "Automatic domain update [skip ci]" + echo "Changes committed. Attempting to push..." git push else - echo "Nessuna modifica a domain.json da committare." - fi + echo "No changes to .github/.domain/domains.json to commit." + fi \ No newline at end of file From 73cc2662b80a2852292d75f90f78ea002106fa3c Mon Sep 17 00:00:00 2001 From: Alessandro Perazzetta <482310+AlessandroPerazzetta@users.noreply.github.com> Date: Sat, 31 May 2025 11:30:59 +0200 Subject: [PATCH 05/20] Dns check refactor (#328) * refactor: streamline proxy checking in search function * refactor: update DNS check method, try a real dns resolution instead of checking dns provider * refactor: enhance DNS resolution check to support multiple domains across platforms * refactor: replace os.socket with socket for DNS resolution consistency --------- Co-authored-by: None <62809003+Arrowar@users.noreply.github.com> --- StreamingCommunity/Util/os.py | 74 +++++++++++++++++++++++------------ StreamingCommunity/run.py | 17 ++++++-- 2 files changed, 63 insertions(+), 28 deletions(-) diff --git a/StreamingCommunity/Util/os.py b/StreamingCommunity/Util/os.py index 5e35f03..2d8f7d1 100644 --- a/StreamingCommunity/Util/os.py +++ b/StreamingCommunity/Util/os.py @@ -12,7 +12,7 @@ import inspect import subprocess import contextlib import importlib.metadata - +import socket # External library from unidecode import unidecode @@ -283,37 +283,61 @@ class InternManager(): else: return f"{bytes / (1024 * 1024):.2f} MB/s" - def check_dns_provider(self): + # def check_dns_provider(self): + # """ + # Check if the system's current DNS server matches any known DNS providers. + + # Returns: + # bool: True if the current DNS server matches a known provider, + # False if no match is found or in case of errors + # """ + # dns_providers = { + # "Cloudflare": ["1.1.1.1", "1.0.0.1"], + # "Google": ["8.8.8.8", "8.8.4.4"], + # "OpenDNS": ["208.67.222.222", "208.67.220.220"], + # "Quad9": ["9.9.9.9", "149.112.112.112"], + # "AdGuard": ["94.140.14.14", "94.140.15.15"], + # "Comodo": ["8.26.56.26", "8.20.247.20"], + # "Level3": ["209.244.0.3", "209.244.0.4"], + # "Norton": ["199.85.126.10", "199.85.127.10"], + # "CleanBrowsing": ["185.228.168.9", "185.228.169.9"], + # "Yandex": ["77.88.8.8", "77.88.8.1"] + # } + + # try: + # resolver = dns.resolver.Resolver() + # nameservers = resolver.nameservers + + # if not nameservers: + # return False + + # for server in nameservers: + # for provider, ips in dns_providers.items(): + # if server in ips: + # return True + # return False + + # except Exception: + # return False + + def check_dns_resolve(self): """ - Check if the system's current DNS server matches any known DNS providers. + Check if the system's current DNS server can resolve a domain name. + Works on both Windows and Unix-like systems. Returns: - bool: True if the current DNS server matches a known provider, - False if no match is found or in case of errors + bool: True if the current DNS server can resolve a domain name, + False if can't resolve or in case of errors """ - dns_providers = { - "Cloudflare": ["1.1.1.1", "1.0.0.1"], - "Google": ["8.8.8.8", "8.8.4.4"], - "OpenDNS": ["208.67.222.222", "208.67.220.220"], - "Quad9": ["9.9.9.9", "149.112.112.112"], - } + test_domains = ["github.com", "google.com", "microsoft.com", "amazon.com"] try: - resolver = dns.resolver.Resolver() - nameservers = resolver.nameservers - - if not nameservers: - return False - - for server in nameservers: - for provider, ips in dns_providers.items(): - if server in ips: - return True + for domain in test_domains: + # socket.gethostbyname() works consistently across all platforms + socket.gethostbyname(domain) + return True + except (socket.gaierror, socket.error): return False - - except Exception: - return False - class OsSummary: def __init__(self): diff --git a/StreamingCommunity/run.py b/StreamingCommunity/run.py index e37bd74..2db8a86 100644 --- a/StreamingCommunity/run.py +++ b/StreamingCommunity/run.py @@ -210,7 +210,19 @@ def main(script_id = 0): log_not = Logger() initialize() - if not internet_manager.check_dns_provider(): + # if not internet_manager.check_dns_provider(): + # print() + # console.print("[red]❌ ERROR: DNS configuration is required!") + # console.print("[red]The program cannot function correctly without proper DNS settings.") + # console.print("[yellow]Please configure one of these DNS servers:") + # console.print("[blue]• Cloudflare (1.1.1.1) 'https://developers.cloudflare.com/1.1.1.1/setup/windows/'") + # console.print("[blue]• Quad9 (9.9.9.9) 'https://docs.quad9.net/Setup_Guides/Windows/Windows_10/'") + # console.print("\n[yellow]⚠️ The program will not work until you configure your DNS settings.") + + # time.sleep(2) + # msg.ask("[yellow]Press Enter to continue ...") + + if not internet_manager.check_dns_resolve(): print() console.print("[red]❌ ERROR: DNS configuration is required!") console.print("[red]The program cannot function correctly without proper DNS settings.") @@ -219,8 +231,7 @@ def main(script_id = 0): console.print("[blue]• Quad9 (9.9.9.9) 'https://docs.quad9.net/Setup_Guides/Windows/Windows_10/'") console.print("\n[yellow]⚠️ The program will not work until you configure your DNS settings.") - time.sleep(2) - msg.ask("[yellow]Press Enter to continue ...") + os._exit(0) # Load search functions search_functions = load_search_functions() From 4b40b8ce225b3ded1c670d171d87f2268de5d4e9 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sat, 31 May 2025 12:17:33 +0000 Subject: [PATCH 06/20] Automatic domain update [skip ci] --- .github/.domain/domains.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/.domain/domains.json b/.github/.domain/domains.json index a7f588a..f3ae14d 100644 --- a/.github/.domain/domains.json +++ b/.github/.domain/domains.json @@ -48,10 +48,10 @@ "time_change": "2025-05-26 23:22:45" }, "streamingcommunity": { - "domain": "blog", - "full_url": "https://streamingunity.blog/", - "old_domain": "to", - "time_change": "2025-05-31 10:45:55" + "domain": "bio", + "full_url": "https://streamingunity.bio/", + "old_domain": "blog", + "time_change": "2025-05-31 12:17:33" }, "altadefinizionegratis": { "domain": "icu", From a45fd0d37e85173d79e4486cc5a83b2fe96166c8 Mon Sep 17 00:00:00 2001 From: Alessandro Perazzetta <482310+AlessandroPerazzetta@users.noreply.github.com> Date: Sat, 31 May 2025 20:07:30 +0200 Subject: [PATCH 07/20] Dns check (#332) * refactor: streamline proxy checking in search function * refactor: update DNS check method, try a real dns resolution instead of checking dns provider * refactor: enhance DNS resolution check to support multiple domains across platforms * refactor: replace os.socket with socket for DNS resolution consistency --------- Co-authored-by: None <62809003+Arrowar@users.noreply.github.com> From b8e28a30c0a58ff74e7fbfab03cf03810421cd90 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sun, 1 Jun 2025 01:02:20 +0000 Subject: [PATCH 08/20] Automatic domain update [skip ci] --- .github/.domain/domains.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/.domain/domains.json b/.github/.domain/domains.json index f3ae14d..7cd57d0 100644 --- a/.github/.domain/domains.json +++ b/.github/.domain/domains.json @@ -6,10 +6,10 @@ "time_change": "2025-03-19 12:20:19" }, "cb01new": { - "domain": "download", - "full_url": "https://cb01net.download/", - "old_domain": "my", - "time_change": "2025-05-26 22:23:24" + "domain": "life", + "full_url": "https://cb01net.life/", + "old_domain": "download", + "time_change": "2025-06-01 01:02:16" }, "animeunity": { "domain": "so", From 6713de4ecc478209c4766635cf0e20b3c609383d Mon Sep 17 00:00:00 2001 From: Lovi <62809003+Arrowar@users.noreply.github.com> Date: Sun, 1 Jun 2025 16:31:24 +0200 Subject: [PATCH 09/20] Bump v3.0.9 --- .github/.domain/domain_update.py | 111 +++++++++++++++++++++++++-- .github/workflows/update_domain.yml | 2 +- StreamingCommunity/Upload/version.py | 2 +- setup.py | 2 +- 4 files changed, 106 insertions(+), 11 deletions(-) diff --git a/.github/.domain/domain_update.py b/.github/.domain/domain_update.py index 937661f..52c2ded 100644 --- a/.github/.domain/domain_update.py +++ b/.github/.domain/domain_update.py @@ -1,5 +1,3 @@ -# 20.04.2024 - import re import os import json @@ -47,6 +45,90 @@ def get_new_tld(full_url): return None +def get_enhanced_headers(): + ua = ua_generator.generate(device='desktop', browser='chrome') + headers = ua.headers.get() + + additional_headers = { + 'DNT': '1', + 'Upgrade-Insecure-Requests': '1', + 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', + 'Accept-Language': 'en-US,en;q=0.9,it;q=0.8', + 'Accept-Encoding': 'gzip, deflate, br', + 'Cache-Control': 'max-age=0', + 'Connection': 'keep-alive', + 'Referer': 'https://www.google.com/', + } + + headers.update(additional_headers) + return headers + +def extract_redirect_from_403(response, original_url): + redirect_headers = ['location', 'refresh', 'x-redirect-to', 'x-location', 'redirect'] + for header in redirect_headers: + if header in response.headers: + return response.headers[header] + + try: + content = response.text + + js_patterns = [ + r'window\.location\.href\s*=\s*["\']([^"\']+)["\']', + r'window\.location\s*=\s*["\']([^"\']+)["\']', + r'location\.href\s*=\s*["\']([^"\']+)["\']', + r'document\.location\s*=\s*["\']([^"\']+)["\']', + r'top\.location\.href\s*=\s*["\']([^"\']+)["\']', + r'parent\.location\s*=\s*["\']([^"\']+)["\']' + ] + + for pattern in js_patterns: + match = re.search(pattern, content, re.IGNORECASE) + if match: + return match.group(1) + + meta_patterns = [ + r']*http-equiv=["\']?refresh["\']?[^>]*content=["\'][^"\']*url=([^"\'>\s]+)', + r']*content=["\'][^"\']*url=([^"\'>\s]+)[^>]*http-equiv=["\']?refresh["\']?' + ] + + for pattern in meta_patterns: + match = re.search(pattern, content, re.IGNORECASE) + if match: + return match.group(1) + + text_patterns = [ + r'[Rr]edirect(?:ed)?\s+to:?\s*([^\s<>"\']+)', + r'[Nn]ew\s+[Uu][Rr][Ll]:?\s*([^\s<>"\']+)', + r'[Mm]oved\s+to:?\s*([^\s<>"\']+)', + r'[Ff]ound\s+at:?\s*([^\s<>"\']+)', + r'[Gg]o\s+to:?\s*([^\s<>"\']+)', + r'[Vv]isit:?\s*([^\s<>"\']+)', + r'https?://[^\s<>"\']+\.[a-z]{2,}[^\s<>"\']*' + ] + + for pattern in text_patterns: + match = re.search(pattern, content) + if match: + potential_url = match.group(1) if '(' in pattern else match.group(0) + if potential_url.startswith(('http://', 'https://', '//')): + return potential_url + + link_patterns = [ + r']*href=["\']([^"\']+)["\'][^>]*>(?:click here|continue|proceed|go here)', + r']*rel=["\']?canonical["\']?[^>]*href=["\']([^"\']+)["\']', + r']*href=["\']([^"\']+)["\']' + ] + + for pattern in link_patterns: + match = re.search(pattern, content, re.IGNORECASE) + if match: + return match.group(1) + + except Exception: + pass + + return None + def extract_domain_from_response(response, original_url): if 'location' in response.headers: return response.headers['location'] @@ -108,7 +190,10 @@ def extract_domain_from_response(response, original_url): return None -def try_url(url_to_try, headers, timeout=15): +def try_url(url_to_try, headers=None, timeout=15): + if headers is None: + headers = get_enhanced_headers() + try: with httpx.Client(headers=headers, timeout=timeout, follow_redirects=False) as client: response = client.get(url_to_try) @@ -136,7 +221,20 @@ def try_url(url_to_try, headers, timeout=15): request=response.request ) - elif response.status_code in [403, 409, 429, 503]: + elif response.status_code == 403: + print(f" [!] HTTP 403 - attempting enhanced extraction") + + redirect_url = extract_redirect_from_403(response, url_to_try) + if redirect_url: + print(f" [+] Found redirect URL in 403 response: {redirect_url}") + return httpx.Response( + status_code=200, + headers={"location": redirect_url}, + content=b"", + request=response.request + ) + + elif response.status_code in [409, 429, 503]: print(f" [!] HTTP {response.status_code} - attempting to extract redirect info") location = response.headers.get('location') @@ -194,15 +292,12 @@ def update_domain_entries(data): print(f" [!] 'full_url' missing. Skipped.") continue - ua = ua_generator.generate(device=('desktop', 'mobile'), browser=('chrome', 'edge', 'firefox', 'safari')) - current_headers = ua.headers.get() - print(f" [] Stored URL: {original_full_url}") if original_domain_in_entry: print(f" [] Stored Domain (TLD): {original_domain_in_entry}") print(f" [] Testing URL: {original_full_url}") - response = try_url(original_full_url, current_headers) + response = try_url(original_full_url) if response: final_url_from_request = str(response.url) diff --git a/.github/workflows/update_domain.yml b/.github/workflows/update_domain.yml index 231c795..205596d 100644 --- a/.github/workflows/update_domain.yml +++ b/.github/workflows/update_domain.yml @@ -2,7 +2,7 @@ name: Update domains on: schedule: - - cron: "0 */2 * * *" + - cron: "0 */3 * * *" workflow_dispatch: jobs: diff --git a/StreamingCommunity/Upload/version.py b/StreamingCommunity/Upload/version.py index 535de5b..da2e3af 100644 --- a/StreamingCommunity/Upload/version.py +++ b/StreamingCommunity/Upload/version.py @@ -1,5 +1,5 @@ __title__ = 'StreamingCommunity' -__version__ = '3.0.8' +__version__ = '3.0.9' __author__ = 'Arrowar' __description__ = 'A command-line program to download film' __copyright__ = 'Copyright 2024' diff --git a/setup.py b/setup.py index 1fe021a..760cae5 100644 --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ with open(os.path.join(os.path.dirname(__file__), "requirements.txt"), "r", enco setup( name="StreamingCommunity", - version="3.0.8", + version="3.0.9", long_description=read_readme(), long_description_content_type="text/markdown", author="Lovi-0", From d0207b366994349f92378e3ab2ec646c948acc71 Mon Sep 17 00:00:00 2001 From: Lovi <62809003+Arrowar@users.noreply.github.com> Date: Mon, 2 Jun 2025 11:08:46 +0200 Subject: [PATCH 10/20] Fix wrong version pip --- .github/.domain/domain_update.py | 634 ++++++++++++++------------- .github/.domain/domains.json | 8 +- StreamingCommunity/Upload/update.py | 9 +- StreamingCommunity/Upload/version.py | 2 +- setup.py | 17 +- 5 files changed, 345 insertions(+), 325 deletions(-) diff --git a/.github/.domain/domain_update.py b/.github/.domain/domain_update.py index 52c2ded..2c69a48 100644 --- a/.github/.domain/domain_update.py +++ b/.github/.domain/domain_update.py @@ -1,358 +1,360 @@ -import re +# 20.04.2024 + import os import json from datetime import datetime -from urllib.parse import urlparse, urlunparse +from urllib.parse import urlparse, unquote + +# External libraries import httpx +import tldextract import ua_generator - -JSON_FILE_PATH = os.path.join(".github", ".domain", "domains.json") +import dns.resolver -def load_domains(file_path): +# Variables +SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) +JSON_FILE_PATH = os.path.join(SCRIPT_DIR, "domains.json") +ua = ua_generator.generate(device='desktop', browser=('chrome', 'edge')) + + +def get_headers(): + return ua.headers.get() + +def get_tld(url_str): + try: + parsed = urlparse(unquote(url_str)) + domain = parsed.netloc.lower().lstrip('www.') + parts = domain.split('.') + return parts[-1] if len(parts) >= 2 else None + + except Exception: + return None + +def get_base_domain(url_str): + try: + parsed = urlparse(url_str) + domain = parsed.netloc.lower().lstrip('www.') + parts = domain.split('.') + return '.'.join(parts[:-1]) if len(parts) > 2 else parts[0] + + except Exception: + return None + +def get_base_url(url_str): + try: + parsed = urlparse(url_str) + return f"{parsed.scheme}://{parsed.netloc}" + + except Exception: + return None + +def log(msg, level='INFO'): + levels = { + 'INFO': '[ ]', + 'SUCCESS': '[+]', + 'WARNING': '[!]', + 'ERROR': '[-]' + } + entry = f"{levels.get(level, '[?]')} {msg}" + print(entry) + +def load_json_data(file_path): if not os.path.exists(file_path): - print(f"Error: The file {file_path} was not found.") + log(f"Error: The file {file_path} was not found.", "ERROR") return None try: with open(file_path, 'r', encoding='utf-8') as f: return json.load(f) - + except Exception as e: - print(f"Error reading the file {file_path}: {e}") + log(f"Error reading the file {file_path}: {e}", "ERROR") return None -def save_domains(file_path, data): +def save_json_data(file_path, data): try: with open(file_path, 'w', encoding='utf-8') as f: json.dump(data, f, indent=2, ensure_ascii=False) - print(f"Data successfully saved to {file_path}") + log(f"Data successfully saved to {file_path}", "SUCCESS") except Exception as e: - print(f"Error saving the file {file_path}: {e}") + log(f"Error saving the file {file_path}: {e}", "ERROR") + +def parse_url(url): + if not url.startswith(('http://', 'https://')): + url = 'https://' + url -def get_new_tld(full_url): try: - parsed_url = urlparse(full_url) - hostname = parsed_url.hostname - if hostname: - parts = hostname.split('.') - return parts[-1] - - except Exception: - pass - - return None - -def get_enhanced_headers(): - ua = ua_generator.generate(device='desktop', browser='chrome') - headers = ua.headers.get() + extracted = tldextract.extract(url) + parsed = urlparse(url) + clean_url = f"{parsed.scheme}://{parsed.netloc}/" + full_domain = f"{extracted.domain}.{extracted.suffix}" if extracted.domain else extracted.suffix + domain_tld = extracted.suffix + result = { + 'url': clean_url, + 'full_domain': full_domain, + 'domain': domain_tld, + 'suffix': extracted.suffix, + 'subdomain': extracted.subdomain or None + } + return result - additional_headers = { - 'DNT': '1', - 'Upgrade-Insecure-Requests': '1', - 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', - 'Accept-Language': 'en-US,en;q=0.9,it;q=0.8', - 'Accept-Encoding': 'gzip, deflate, br', - 'Cache-Control': 'max-age=0', - 'Connection': 'keep-alive', - 'Referer': 'https://www.google.com/', + except Exception as e: + log(f"Error parsing URL: {e}", "ERROR") + return None + +def check_dns_resolution(domain): + try: + resolver = dns.resolver.Resolver() + resolver.timeout = 2 + resolver.lifetime = 2 + + try: + answers = resolver.resolve(domain, 'A') + return str(answers[0]) + except: + try: + answers = resolver.resolve(domain, 'AAAA') + return str(answers[0]) + except: + pass + return None + except: + return None + +def find_new_domain(input_url, output_file=None, verbose=True, json_output=False): + log_buffer = [] + original_info = parse_url(input_url) + + if not original_info: + log(f"Could not parse original URL: {input_url}", "ERROR") + if json_output: + return {'full_url': input_url, 'domain': None} + return None + + log(f"Starting analysis for: {original_info['full_domain']}") + orig_ip = check_dns_resolution(original_info['full_domain']) + if orig_ip: + log(f"Original domain resolves to: {orig_ip}", "SUCCESS") + else: + log(f"Original domain does not resolve to an IP address", "WARNING") + + headers = get_headers() + new_domains = [] + redirects = [] + final_url = None + final_domain_info = None + url_to_test_in_loop = None + + for protocol in ['https://', 'http://']: + try: + url_to_test_in_loop = f"{protocol}{original_info['full_domain']}" + log(f"Testing connectivity to {url_to_test_in_loop}") + redirect_chain = [] + current_url = url_to_test_in_loop + max_redirects = 10 + redirect_count = 0 + + while redirect_count < max_redirects: + with httpx.Client(verify=False, follow_redirects=False, timeout=5) as client: + response = client.get(current_url, headers=headers) + + redirect_info = {'url': current_url, 'status_code': response.status_code} + redirect_chain.append(redirect_info) + log(f"Request to {current_url} - Status: {response.status_code}") + + if response.status_code in (301, 302, 303, 307, 308): + if 'location' in response.headers: + next_url = response.headers['location'] + if next_url.startswith('/'): + parsed_current = urlparse(current_url) + next_url = f"{parsed_current.scheme}://{parsed_current.netloc}{next_url}" + + log(f"Redirect found: {next_url} (Status: {response.status_code})") + current_url = next_url + redirect_count += 1 + redirect_domain_info_val = parse_url(next_url) + if redirect_domain_info_val and redirect_domain_info_val['full_domain'] != original_info['full_domain']: + new_domains.append({'domain': redirect_domain_info_val['full_domain'], 'url': next_url, 'source': 'redirect'}) + + else: + log(f"Redirect status code but no Location header", "WARNING") + break + else: + break + + if redirect_chain: + final_url = redirect_chain[-1]['url'] + final_domain_info = parse_url(final_url) + redirects.extend(redirect_chain) + log(f"Final URL after redirects: {final_url}", "SUCCESS") + if final_domain_info and final_domain_info['full_domain'] != original_info['full_domain']: + new_domains.append({'domain': final_domain_info['full_domain'], 'url': final_url, 'source': 'final_url'}) + + final_status = redirect_chain[-1]['status_code'] if redirect_chain else None + + if final_status and final_status < 400 and final_status != 403: + break + + if final_status == 403 and redirect_chain and len(redirect_chain) > 1: + log(f"Got 403 Forbidden, but captured {len(redirect_chain)-1} redirects before that", "SUCCESS") + break + + except httpx.RequestError as e: + log(f"Error connecting to {protocol}{original_info['full_domain']}: {str(e)}", "ERROR") + + url_for_auto_redirect = input_url + if url_to_test_in_loop: + url_for_auto_redirect = url_to_test_in_loop + elif original_info and original_info.get('url'): + url_for_auto_redirect = original_info['url'] + + if not redirects or not new_domains: + log("Trying alternate method with automatic redirect following") + + try: + with httpx.Client(verify=False, follow_redirects=True, timeout=5) as client: + response_auto = client.get(url_for_auto_redirect, headers=headers) + + log(f"Connected with auto-redirects: Status {response_auto.status_code}") + + if response_auto.history: + log(f"Found {len(response_auto.history)} redirects with auto-following", "SUCCESS") + + for r_hist in response_auto.history: + redirect_info_auto = {'url': str(r_hist.url), 'status_code': r_hist.status_code} + redirects.append(redirect_info_auto) + log(f"Auto-redirect: {r_hist.url} (Status: {r_hist.status_code})") + + final_url = str(response_auto.url) + final_domain_info = parse_url(final_url) + for redirect_hist_item in response_auto.history: + redirect_domain_val = parse_url(str(redirect_hist_item.url)) + if redirect_domain_val and original_info and redirect_domain_val['full_domain'] != original_info['full_domain']: + new_domains.append({'domain': redirect_domain_val['full_domain'], 'url': str(redirect_hist_item.url), 'source': 'auto-redirect'}) + + current_final_url_info = parse_url(str(response_auto.url)) + + if current_final_url_info and original_info and current_final_url_info['full_domain'] != original_info['full_domain']: + is_already_added = any(d['domain'] == current_final_url_info['full_domain'] and d['source'] == 'auto-redirect' for d in new_domains) + if not is_already_added: + new_domains.append({'domain': current_final_url_info['full_domain'], 'url': str(response_auto.url), 'source': 'final_url_auto'}) + final_url = str(response_auto.url) + final_domain_info = current_final_url_info + log(f"Final URL from auto-redirect: {final_url}", "SUCCESS") + + except httpx.RequestError as e: + log(f"Error with auto-redirect attempt: {str(e)}", "ERROR") + except NameError: + log(f"Error: URL for auto-redirect attempt was not defined.", "ERROR") + + unique_domains = [] + seen_domains = set() + for domain_info_item in new_domains: + if domain_info_item['domain'] not in seen_domains: + seen_domains.add(domain_info_item['domain']) + unique_domains.append(domain_info_item) + + if not final_url: + final_url = input_url + if not final_domain_info: + final_domain_info = original_info + + if final_domain_info: + parsed_final_url_info = parse_url(final_url) + if parsed_final_url_info: + final_url = parsed_final_url_info['url'] + final_domain_info = parsed_final_url_info + else: + final_domain_info = original_info + final_url = original_info['url'] if original_info else input_url + + results_original_domain = original_info['full_domain'] if original_info else None + results_final_domain_tld = final_domain_info['domain'] if final_domain_info and 'domain' in final_domain_info else None + + results = { + 'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S"), + 'original_url': input_url, + 'original_domain': results_original_domain, + 'original_ip': orig_ip, + 'new_domains': unique_domains, + 'redirects': redirects, + 'log': log_buffer } + simplified_json_output = {'full_url': final_url, 'domain': results_final_domain_tld} - headers.update(additional_headers) - return headers + if verbose: + log(f"DEBUG - Simplified output: {simplified_json_output}", "INFO") + + if output_file: + try: + with open(output_file, 'w', encoding='utf-8') as f: + json.dump(results, f, indent=2, ensure_ascii=False) + log(f"Results saved to {output_file}", "SUCCESS") + except Exception as e: + log(f"Error writing to output file: {str(e)}", "ERROR") + + if json_output: + return simplified_json_output + else: + return results -def extract_redirect_from_403(response, original_url): - redirect_headers = ['location', 'refresh', 'x-redirect-to', 'x-location', 'redirect'] - for header in redirect_headers: - if header in response.headers: - return response.headers[header] - - try: - content = response.text - - js_patterns = [ - r'window\.location\.href\s*=\s*["\']([^"\']+)["\']', - r'window\.location\s*=\s*["\']([^"\']+)["\']', - r'location\.href\s*=\s*["\']([^"\']+)["\']', - r'document\.location\s*=\s*["\']([^"\']+)["\']', - r'top\.location\.href\s*=\s*["\']([^"\']+)["\']', - r'parent\.location\s*=\s*["\']([^"\']+)["\']' - ] - - for pattern in js_patterns: - match = re.search(pattern, content, re.IGNORECASE) - if match: - return match.group(1) - - meta_patterns = [ - r']*http-equiv=["\']?refresh["\']?[^>]*content=["\'][^"\']*url=([^"\'>\s]+)', - r']*content=["\'][^"\']*url=([^"\'>\s]+)[^>]*http-equiv=["\']?refresh["\']?' - ] - - for pattern in meta_patterns: - match = re.search(pattern, content, re.IGNORECASE) - if match: - return match.group(1) - - text_patterns = [ - r'[Rr]edirect(?:ed)?\s+to:?\s*([^\s<>"\']+)', - r'[Nn]ew\s+[Uu][Rr][Ll]:?\s*([^\s<>"\']+)', - r'[Mm]oved\s+to:?\s*([^\s<>"\']+)', - r'[Ff]ound\s+at:?\s*([^\s<>"\']+)', - r'[Gg]o\s+to:?\s*([^\s<>"\']+)', - r'[Vv]isit:?\s*([^\s<>"\']+)', - r'https?://[^\s<>"\']+\.[a-z]{2,}[^\s<>"\']*' - ] - - for pattern in text_patterns: - match = re.search(pattern, content) - if match: - potential_url = match.group(1) if '(' in pattern else match.group(0) - if potential_url.startswith(('http://', 'https://', '//')): - return potential_url - - link_patterns = [ - r']*href=["\']([^"\']+)["\'][^>]*>(?:click here|continue|proceed|go here)', - r']*rel=["\']?canonical["\']?[^>]*href=["\']([^"\']+)["\']', - r']*href=["\']([^"\']+)["\']' - ] - - for pattern in link_patterns: - match = re.search(pattern, content, re.IGNORECASE) - if match: - return match.group(1) - - except Exception: - pass - - return None - -def extract_domain_from_response(response, original_url): - if 'location' in response.headers: - return response.headers['location'] - - if str(response.url) != original_url: - return str(response.url) - - try: - content_type = response.headers.get('content-type', '').lower() - if 'text/html' in content_type or 'text/plain' in content_type: - response_text = response.text - - js_redirect_patterns = [ - r'window\.location\.href\s*=\s*["\']([^"\']+)["\']', - r'window\.location\s*=\s*["\']([^"\']+)["\']', - r'location\.href\s*=\s*["\']([^"\']+)["\']', - r'document\.location\s*=\s*["\']([^"\']+)["\']' - ] - - for pattern in js_redirect_patterns: - js_match = re.search(pattern, response_text, re.IGNORECASE) - if js_match: - return js_match.group(1) - - meta_patterns = [ - r']*http-equiv=["\']?refresh["\']?[^>]*content=["\'][^"\']*url=([^"\'>\s]+)', - r']*content=["\'][^"\']*url=([^"\'>\s]+)[^>]*http-equiv=["\']?refresh["\']?' - ] - - for pattern in meta_patterns: - meta_match = re.search(pattern, response_text, re.IGNORECASE) - if meta_match: - return meta_match.group(1) - - canonical_match = re.search(r']*rel=["\']?canonical["\']?[^>]*href=["\']([^"\']+)["\']', response_text, re.IGNORECASE) - if canonical_match: - return canonical_match.group(1) - - base_match = re.search(r']*href=["\']([^"\']+)["\']', response_text, re.IGNORECASE) - if base_match: - return base_match.group(1) - - error_redirect_patterns = [ - r'[Rr]edirect(?:ed)?\s+to:?\s*([^\s<>"\']+)', - r'[Nn]ew\s+[Uu][Rr][Ll]:?\s*([^\s<>"\']+)', - r'[Mm]oved\s+to:?\s*([^\s<>"\']+)', - r'[Ff]ound\s+at:?\s*([^\s<>"\']+)' - ] - - for pattern in error_redirect_patterns: - error_match = re.search(pattern, response_text) - if error_match: - potential_url = error_match.group(1) - if potential_url.startswith(('http://', 'https://', '//')): - return potential_url - - except Exception as e: - print(f" [!] Error extracting from response content: {e}") - - return None - -def try_url(url_to_try, headers=None, timeout=15): - if headers is None: - headers = get_enhanced_headers() - - try: - with httpx.Client(headers=headers, timeout=timeout, follow_redirects=False) as client: - response = client.get(url_to_try) - - if response.status_code in [301, 302, 303, 307, 308]: - location = response.headers.get('location') - if location: - print(f" [+] Found redirect ({response.status_code}) to: {location}") - try: - final_response = client.get(location) - if 200 <= final_response.status_code < 400: - return final_response - else: - return httpx.Response( - status_code=200, - headers={"location": location}, - content=b"", - request=response.request - ) - except Exception: - return httpx.Response( - status_code=200, - headers={"location": location}, - content=b"", - request=response.request - ) - - elif response.status_code == 403: - print(f" [!] HTTP 403 - attempting enhanced extraction") - - redirect_url = extract_redirect_from_403(response, url_to_try) - if redirect_url: - print(f" [+] Found redirect URL in 403 response: {redirect_url}") - return httpx.Response( - status_code=200, - headers={"location": redirect_url}, - content=b"", - request=response.request - ) - - elif response.status_code in [409, 429, 503]: - print(f" [!] HTTP {response.status_code} - attempting to extract redirect info") - - location = response.headers.get('location') - if location: - print(f" [+] Found location header in error response: {location}") - return httpx.Response( - status_code=200, - headers={"location": location}, - content=b"", - request=response.request - ) - - new_url = extract_domain_from_response(response, url_to_try) - if new_url and new_url != url_to_try: - print(f" [+] Found redirect URL in error response content: {new_url}") - return httpx.Response( - status_code=200, - headers={"location": new_url}, - content=b"", - request=response.request - ) - - if 200 <= response.status_code < 400: - return response - - print(f" [!] HTTP {response.status_code} for {url_to_try}") - - except httpx.HTTPStatusError as http_err: - new_url = extract_domain_from_response(http_err.response, url_to_try) - if new_url: - print(f" [+] Found new URL from HTTPStatusError response: {new_url}") - return httpx.Response( - status_code=200, - headers={"location": new_url}, - content=b"", - request=http_err.request - ) - except Exception as e: - print(f" [!] Error for {url_to_try}: {type(e).__name__}") - - return None - -def update_domain_entries(data): - if not data: +def update_site_entry(site_name: str, all_domains_data: dict): + site_config = all_domains_data.get(site_name, {}) + log(f"Processing site: {site_name}", "INFO") + if not site_config.get('full_url'): + log(f"Site {site_name} has no full_url in config. Skipping.", "WARNING") return False - updated_count = 0 + current_full_url = site_config.get('full_url') + current_domain_tld = site_config.get('domain') + found_domain_info = find_new_domain(current_full_url, verbose=False, json_output=True) - for key, entry in data.items(): - print(f"\n--- [DOMAIN] {key} ---") - original_full_url = entry.get("full_url") - original_domain_in_entry = entry.get("domain") + if found_domain_info and found_domain_info.get('full_url') and found_domain_info.get('domain'): + new_full_url = found_domain_info['full_url'] + new_domain_tld = found_domain_info['domain'] - if not original_full_url: - print(f" [!] 'full_url' missing. Skipped.") - continue + if new_full_url != current_full_url or new_domain_tld != current_domain_tld: + log(f"Update found for {site_name}: URL '{current_full_url}' -> '{new_full_url}', TLD '{current_domain_tld}' -> '{new_domain_tld}'", "SUCCESS") + updated_entry = site_config.copy() + updated_entry['full_url'] = new_full_url + updated_entry['domain'] = new_domain_tld + if new_domain_tld != current_domain_tld : + updated_entry['old_domain'] = current_domain_tld if current_domain_tld else "" - print(f" [] Stored URL: {original_full_url}") - if original_domain_in_entry: - print(f" [] Stored Domain (TLD): {original_domain_in_entry}") + updated_entry['time_change'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + all_domains_data[site_name] = updated_entry + return True - print(f" [] Testing URL: {original_full_url}") - response = try_url(original_full_url) - - if response: - final_url_from_request = str(response.url) - print(f" [+] Redirect/Response to: {final_url_from_request}") - - parsed_final_url = urlparse(final_url_from_request) - normalized_full_url = urlunparse(parsed_final_url._replace(path='/', params='', query='', fragment='')) - if parsed_final_url.path == '' and not normalized_full_url.endswith('/'): - normalized_full_url += '/' - - if normalized_full_url != final_url_from_request: - print(f" [+] Normalized URL: {normalized_full_url}") - - if normalized_full_url != original_full_url: - new_tld_val = get_new_tld(final_url_from_request) - - if new_tld_val: - entry["full_url"] = normalized_full_url - - if new_tld_val != original_domain_in_entry: - print(f" [-] Domain TLD Changed: '{original_domain_in_entry}' -> '{new_tld_val}'") - entry["old_domain"] = original_domain_in_entry if original_domain_in_entry else entry.get("old_domain", "") - entry["domain"] = new_tld_val - entry["time_change"] = datetime.now().strftime('%Y-%m-%d %H:%M:%S') - print(f" [-] Domain & URL Updated: New TLD '{new_tld_val}', New URL '{normalized_full_url}'") - else: - entry["domain"] = new_tld_val - print(f" [-] URL Updated (TLD Unchanged '{new_tld_val}'): New URL '{normalized_full_url}'") - - updated_count += 1 - - else: - print(f" [!] Could not extract TLD from {final_url_from_request}. URL not updated despite potential change.") - else: - print(f" [] Same Domain: {final_url_from_request}") - else: - print(f" [-] No response for {key}") - - return updated_count > 0 + log(f"No changes detected for {site_name}.", "INFO") + return False + else: + log(f"Could not reliably find new domain info for {site_name} from URL: {current_full_url}. No search fallback.", "WARNING") + return False def main(): - print("Starting domain update script...") - domain_data = load_domains(JSON_FILE_PATH) + log("Starting domain update script...") + all_domains_data = load_json_data(JSON_FILE_PATH) + if not all_domains_data: + log("Cannot proceed: Domain data is missing or could not be loaded.", "ERROR") + log("Script finished.") + return - if domain_data: - if update_domain_entries(domain_data): - save_domains(JSON_FILE_PATH, domain_data) - print("\nUpdate complete. Some entries were modified.") - else: - print("\nUpdate complete. No domains were modified.") - else: - print("\nCannot proceed without domain data.") + any_updates_made = False + for site_name_key in list(all_domains_data.keys()): + if update_site_entry(site_name_key, all_domains_data): + any_updates_made = True + print("\n") - print("Script finished.") + if any_updates_made: + save_json_data(JSON_FILE_PATH, all_domains_data) + log("Update complete. Some entries were modified.", "SUCCESS") + else: + log("Update complete. No domains were modified.", "INFO") + log("Script finished.") if __name__ == "__main__": main() \ No newline at end of file diff --git a/.github/.domain/domains.json b/.github/.domain/domains.json index 7cd57d0..e01483c 100644 --- a/.github/.domain/domains.json +++ b/.github/.domain/domains.json @@ -54,9 +54,9 @@ "time_change": "2025-05-31 12:17:33" }, "altadefinizionegratis": { - "domain": "icu", - "full_url": "https://altadefinizionegratis.icu/", - "old_domain": "taipei", - "time_change": "2025-05-18 11:21:05" + "domain": "cc", + "full_url": "https://altadefinizionegratis.cc/", + "old_domain": "icu", + "time_change": "2025-06-02 10:35:25" } } \ No newline at end of file diff --git a/StreamingCommunity/Upload/update.py b/StreamingCommunity/Upload/update.py index ee9fbb0..be79847 100644 --- a/StreamingCommunity/Upload/update.py +++ b/StreamingCommunity/Upload/update.py @@ -4,6 +4,7 @@ import os import sys import time import asyncio +import importlib.metadata # External library import httpx @@ -11,7 +12,7 @@ from rich.console import Console # Internal utilities -from .version import __version__, __author__, __title__ +from .version import __version__ as source_code_version, __author__, __title__ from StreamingCommunity.Util.config_json import config_manager from StreamingCommunity.Util.headers import get_userAgent @@ -75,7 +76,11 @@ def update(): percentual_stars = 0 # Get the current version (installed version) - current_version = __version__ + try: + current_version = importlib.metadata.version(__title__) + except importlib.metadata.PackageNotFoundError: + console.print(f"[yellow]Warning: Could not determine installed version for '{__title__}' via importlib.metadata. Falling back to source version.[/yellow]") + current_version = source_code_version # Get commit details latest_commit = response_commits[0] if response_commits else None diff --git a/StreamingCommunity/Upload/version.py b/StreamingCommunity/Upload/version.py index da2e3af..6ffcf08 100644 --- a/StreamingCommunity/Upload/version.py +++ b/StreamingCommunity/Upload/version.py @@ -2,4 +2,4 @@ __title__ = 'StreamingCommunity' __version__ = '3.0.9' __author__ = 'Arrowar' __description__ = 'A command-line program to download film' -__copyright__ = 'Copyright 2024' +__copyright__ = 'Copyright 2025' \ No newline at end of file diff --git a/setup.py b/setup.py index 760cae5..309554f 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,5 @@ import os +import re from setuptools import setup, find_packages def read_readme(): @@ -8,9 +9,21 @@ def read_readme(): with open(os.path.join(os.path.dirname(__file__), "requirements.txt"), "r", encoding="utf-8-sig") as f: required_packages = f.read().splitlines() +def get_version(): + try: + import pkg_resources + return pkg_resources.get_distribution('StreamingCommunity').version + except: + version_file_path = os.path.join(os.path.dirname(__file__), "StreamingCommunity", "Upload", "version.py") + with open(version_file_path, "r", encoding="utf-8") as f: + version_match = re.search(r"^__version__\s*=\s*['\"]([^'\"]*)['\"]", f.read(), re.M) + if version_match: + return version_match.group(1) + raise RuntimeError("Unable to find version string in StreamingCommunity/Upload/version.py.") + setup( name="StreamingCommunity", - version="3.0.9", + version=get_version(), long_description=read_readme(), long_description_content_type="text/markdown", author="Lovi-0", @@ -29,4 +42,4 @@ setup( "Bug Reports": "https://github.com/Lovi-0/StreamingCommunity/issues", "Source": "https://github.com/Lovi-0/StreamingCommunity", } -) \ No newline at end of file +) \ No newline at end of file From 6efeb96201e68cb586b67a538b779a3038e719d8 Mon Sep 17 00:00:00 2001 From: None <62809003+Arrowar@users.noreply.github.com> Date: Mon, 2 Jun 2025 12:58:38 +0200 Subject: [PATCH 11/20] Update update_domain.yml --- .github/workflows/update_domain.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/update_domain.yml b/.github/workflows/update_domain.yml index 205596d..eafae46 100644 --- a/.github/workflows/update_domain.yml +++ b/.github/workflows/update_domain.yml @@ -22,7 +22,8 @@ jobs: - name: Install dependencies run: | - pip install httpx ua-generator requests + pip install httpx tldextract ua-generator dnspython + pip install --upgrade pip setuptools wheel - name: Configure DNS From 3cbabfb98b95a7f10a8decc31edcb06c2088eb68 Mon Sep 17 00:00:00 2001 From: Lovi <62809003+Arrowar@users.noreply.github.com> Date: Mon, 2 Jun 2025 18:14:36 +0200 Subject: [PATCH 12/20] core: Fix requirements --- StreamingCommunity/Api/Player/supervideo.py | 9 +-- StreamingCommunity/Api/Player/vixcloud.py | 23 +++++--- .../Api/Site/altadefinizione/film.py | 10 +++- .../Site/altadefinizione/util/ScrapeSerie.py | 56 ++++++++++++------- .../Api/Site/streamingcommunity/film.py | 4 ++ StreamingCommunity/Upload/update.py | 2 +- requirements.txt | 2 + 7 files changed, 70 insertions(+), 36 deletions(-) diff --git a/StreamingCommunity/Api/Player/supervideo.py b/StreamingCommunity/Api/Player/supervideo.py index 93062eb..32aba79 100644 --- a/StreamingCommunity/Api/Player/supervideo.py +++ b/StreamingCommunity/Api/Player/supervideo.py @@ -5,9 +5,9 @@ import logging # External libraries -import httpx import jsbeautifier from bs4 import BeautifulSoup +from curl_cffi import requests # Internal utilities @@ -28,7 +28,6 @@ class VideoSource: - url (str): The URL of the video source. """ self.headers = get_headers() - self.client = httpx.Client() self.url = url def make_request(self, url: str) -> str: @@ -42,8 +41,10 @@ class VideoSource: - str: The response content if successful, None otherwise. """ try: - response = self.client.get(url, headers=self.headers, timeout=MAX_TIMEOUT, follow_redirects=True) - response.raise_for_status() + response = requests.get(url, headers=self.headers, timeout=MAX_TIMEOUT, impersonate="chrome110") + if response.status_code >= 400: + logging.error(f"Request failed with status code: {response.status_code}") + return None return response.text except Exception as e: diff --git a/StreamingCommunity/Api/Player/vixcloud.py b/StreamingCommunity/Api/Player/vixcloud.py index ed3c871..728d95f 100644 --- a/StreamingCommunity/Api/Player/vixcloud.py +++ b/StreamingCommunity/Api/Player/vixcloud.py @@ -39,6 +39,7 @@ class VideoSource: self.is_series = is_series self.media_id = media_id self.iframe_src = None + self.window_parameter = None def get_iframe(self, episode_id: int) -> None: """ @@ -109,41 +110,45 @@ class VideoSource: # Parse script to get video information self.parse_script(script_text=script) + except httpx.HTTPStatusError as e: + if e.response.status_code == 404: + console.print("[yellow]This content will be available soon![/yellow]") + return + + logging.error(f"Error getting content: {e}") + raise + except Exception as e: logging.error(f"Error getting content: {e}") raise - def get_playlist(self) -> str: + def get_playlist(self) -> str | None: """ Generate authenticated playlist URL. Returns: - str: Fully constructed playlist URL with authentication parameters + str | None: Fully constructed playlist URL with authentication parameters, or None if content unavailable """ + if not self.window_parameter: + return None + params = {} - # Add 'h' parameter if video quality is 1080p if self.canPlayFHD: params['h'] = 1 - # Parse the original URL parsed_url = urlparse(self.window_parameter.url) query_params = parse_qs(parsed_url.query) - # Check specifically for 'b=1' in the query parameters if 'b' in query_params and query_params['b'] == ['1']: params['b'] = 1 - # Add authentication parameters (token and expiration) params.update({ "token": self.window_parameter.token, "expires": self.window_parameter.expires }) - # Build the updated query string query_string = urlencode(params) - - # Construct the new URL with updated query parameters return urlunparse(parsed_url._replace(query=query_string)) diff --git a/StreamingCommunity/Api/Site/altadefinizione/film.py b/StreamingCommunity/Api/Site/altadefinizione/film.py index c9a300e..ad2f41f 100644 --- a/StreamingCommunity/Api/Site/altadefinizione/film.py +++ b/StreamingCommunity/Api/Site/altadefinizione/film.py @@ -61,16 +61,22 @@ def download_film(select_title: MediaItem) -> str: # Extract mostraguarda URL try: response = httpx.get(select_title.url, headers=get_headers(), timeout=10) + response.raise_for_status() + soup = BeautifulSoup(response.text, 'html.parser') iframes = soup.find_all('iframe') mostraguarda = iframes[0]['src'] except Exception as e: console.print(f"[red]Site: {site_constant.SITE_NAME}, request error: {e}, get mostraguarda") + return None # Extract supervideo URL + supervideo_url = None try: response = httpx.get(mostraguarda, headers=get_headers(), timeout=10) + response.raise_for_status() + soup = BeautifulSoup(response.text, 'html.parser') pattern = r'//supervideo\.[^/]+/[a-z]/[a-zA-Z0-9]+' supervideo_match = re.search(pattern, response.text) @@ -78,7 +84,9 @@ def download_film(select_title: MediaItem) -> str: except Exception as e: console.print(f"[red]Site: {site_constant.SITE_NAME}, request error: {e}, get supervideo URL") - + console.print("[yellow]This content will be available soon![/yellow]") + return None + # Init class video_source = VideoSource(supervideo_url) master_playlist = video_source.get_playlist() diff --git a/StreamingCommunity/Api/Site/altadefinizione/util/ScrapeSerie.py b/StreamingCommunity/Api/Site/altadefinizione/util/ScrapeSerie.py index b65f1d6..76851c8 100644 --- a/StreamingCommunity/Api/Site/altadefinizione/util/ScrapeSerie.py +++ b/StreamingCommunity/Api/Site/altadefinizione/util/ScrapeSerie.py @@ -38,38 +38,52 @@ class GetSerieInfo: soup = BeautifulSoup(response.text, "html.parser") self.series_name = soup.find("title").get_text(strip=True).split(" - ")[0] - # Process all seasons - season_items = soup.find_all('div', class_='accordion-item') - - for season_idx, season_item in enumerate(season_items, 1): - season_header = season_item.find('div', class_='accordion-header') - if not season_header: - continue - - season_name = season_header.get_text(strip=True) + # Find all season dropdowns + seasons_dropdown = soup.find('div', class_='dropdown seasons') + if not seasons_dropdown: + return + + # Get all season items + season_items = seasons_dropdown.find_all('span', {'data-season': True}) + + for season_item in season_items: + season_num = int(season_item['data-season']) + season_name = season_item.get_text(strip=True) - # Create a new season and get a reference to it + # Create a new season current_season = self.seasons_manager.add_season({ - 'number': season_idx, + 'number': season_num, 'name': season_name }) - # Find episodes for this season - episode_divs = season_item.find_all('div', class_='down-episode') - for ep_idx, ep_div in enumerate(episode_divs, 1): - episode_name_tag = ep_div.find('b') - if not episode_name_tag: + # Find all episodes for this season + episodes_container = soup.find('div', {'class': 'dropdown mirrors', 'data-season': str(season_num)}) + if not episodes_container: + continue + + # Get all episode mirrors for this season + episode_mirrors = soup.find_all('div', {'class': 'dropdown mirrors', + 'data-season': str(season_num)}) + + for mirror in episode_mirrors: + episode_data = mirror.get('data-episode', '').split('-') + if len(episode_data) != 2: continue - episode_name = episode_name_tag.get_text(strip=True) - link_tag = ep_div.find('a', string=lambda text: text and "Supervideo" in text) - episode_url = link_tag['href'] if link_tag else None + ep_num = int(episode_data[1]) + + # Find supervideo link + supervideo_span = mirror.find('span', {'data-id': 'supervideo'}) + if not supervideo_span: + continue + + episode_url = supervideo_span.get('data-link', '') # Add episode to the season if current_season: current_season.episodes.add({ - 'number': ep_idx, - 'name': episode_name, + 'number': ep_num, + 'name': f"Episodio {ep_num}", 'url': episode_url }) diff --git a/StreamingCommunity/Api/Site/streamingcommunity/film.py b/StreamingCommunity/Api/Site/streamingcommunity/film.py index 6cbe862..a3b98f6 100644 --- a/StreamingCommunity/Api/Site/streamingcommunity/film.py +++ b/StreamingCommunity/Api/Site/streamingcommunity/film.py @@ -62,6 +62,10 @@ def download_film(select_title: MediaItem, proxy: str = None) -> str: video_source.get_content() master_playlist = video_source.get_playlist() + if master_playlist is None: + console.print(f"[red]Site: {site_constant.SITE_NAME}, error: No master playlist found[/red]") + return None + # Define the filename and path for the downloaded film title_name = os_manager.get_sanitize_file(select_title.name) + ".mp4" mp4_path = os.path.join(site_constant.MOVIE_FOLDER, title_name.replace(".mp4", "")) diff --git a/StreamingCommunity/Upload/update.py b/StreamingCommunity/Upload/update.py index be79847..d90085f 100644 --- a/StreamingCommunity/Upload/update.py +++ b/StreamingCommunity/Upload/update.py @@ -79,7 +79,7 @@ def update(): try: current_version = importlib.metadata.version(__title__) except importlib.metadata.PackageNotFoundError: - console.print(f"[yellow]Warning: Could not determine installed version for '{__title__}' via importlib.metadata. Falling back to source version.[/yellow]") + #console.print(f"[yellow]Warning: Could not determine installed version for '{__title__}' via importlib.metadata. Falling back to source version.[/yellow]") current_version = source_code_version # Get commit details diff --git a/requirements.txt b/requirements.txt index 3790b61..95706c7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,6 +6,7 @@ m3u8 certifi psutil unidecode +curl_cffi dnspython jsbeautifier pathvalidate @@ -13,3 +14,4 @@ pycryptodomex ua-generator qbittorrent-api pyTelegramBotAPI +beautifulsoup4 \ No newline at end of file From dcfd22bc2b7d8132517b111c9854723ca186432c Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Tue, 3 Jun 2025 15:27:02 +0000 Subject: [PATCH 13/20] Automatic domain update [skip ci] --- .github/.domain/domains.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/.domain/domains.json b/.github/.domain/domains.json index e01483c..85745cc 100644 --- a/.github/.domain/domains.json +++ b/.github/.domain/domains.json @@ -48,10 +48,10 @@ "time_change": "2025-05-26 23:22:45" }, "streamingcommunity": { - "domain": "bio", - "full_url": "https://streamingunity.bio/", - "old_domain": "blog", - "time_change": "2025-05-31 12:17:33" + "domain": "bid", + "full_url": "https://streamingunity.bid/", + "old_domain": "bio", + "time_change": "2025-06-03 15:27:02" }, "altadefinizionegratis": { "domain": "cc", From f4529e5f05cae062897b1d8e735568e61d36df08 Mon Sep 17 00:00:00 2001 From: None <62809003+Arrowar@users.noreply.github.com> Date: Tue, 3 Jun 2025 17:30:27 +0200 Subject: [PATCH 14/20] Update schedule --- .github/workflows/update_domain.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/update_domain.yml b/.github/workflows/update_domain.yml index eafae46..534956b 100644 --- a/.github/workflows/update_domain.yml +++ b/.github/workflows/update_domain.yml @@ -2,7 +2,7 @@ name: Update domains on: schedule: - - cron: "0 */3 * * *" + - cron: "0 7-21 * * *" workflow_dispatch: jobs: @@ -47,4 +47,4 @@ jobs: git push else echo "No changes to .github/.domain/domains.json to commit." - fi \ No newline at end of file + fi From ccc2478067fd6c257b8403429f5c600da69a706e Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 5 Jun 2025 11:18:33 +0000 Subject: [PATCH 15/20] Automatic domain update [skip ci] --- .github/.domain/domains.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/.domain/domains.json b/.github/.domain/domains.json index 85745cc..e5a8942 100644 --- a/.github/.domain/domains.json +++ b/.github/.domain/domains.json @@ -48,10 +48,10 @@ "time_change": "2025-05-26 23:22:45" }, "streamingcommunity": { - "domain": "bid", - "full_url": "https://streamingunity.bid/", - "old_domain": "bio", - "time_change": "2025-06-03 15:27:02" + "domain": "art", + "full_url": "https://streamingunity.art/", + "old_domain": "bid", + "time_change": "2025-06-05 11:18:33" }, "altadefinizionegratis": { "domain": "cc", From 49e038a2c8ed1f2af0fa143648b3fc55232b2365 Mon Sep 17 00:00:00 2001 From: Lovi <62809003+Arrowar@users.noreply.github.com> Date: Fri, 6 Jun 2025 12:30:20 +0200 Subject: [PATCH 16/20] Core: Add arm64 version. --- .github/.domain/domains.json | 8 ++++---- .github/{media => .domain}/loc-badge.json | 0 .github/workflows/build.yml | 18 ++++++++++++++++++ .github/workflows/update-loc.yml | 4 ++-- README.md | 6 ++++-- .../Lib/Downloader/HLS/downloader.py | 18 +++++++++++++----- StreamingCommunity/global_search.py | 4 ++-- 7 files changed, 43 insertions(+), 15 deletions(-) rename .github/{media => .domain}/loc-badge.json (100%) diff --git a/.github/.domain/domains.json b/.github/.domain/domains.json index 85745cc..e5a8942 100644 --- a/.github/.domain/domains.json +++ b/.github/.domain/domains.json @@ -48,10 +48,10 @@ "time_change": "2025-05-26 23:22:45" }, "streamingcommunity": { - "domain": "bid", - "full_url": "https://streamingunity.bid/", - "old_domain": "bio", - "time_change": "2025-06-03 15:27:02" + "domain": "art", + "full_url": "https://streamingunity.art/", + "old_domain": "bid", + "time_change": "2025-06-05 11:18:33" }, "altadefinizionegratis": { "domain": "cc", diff --git a/.github/media/loc-badge.json b/.github/.domain/loc-badge.json similarity index 100% rename from .github/media/loc-badge.json rename to .github/.domain/loc-badge.json diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 6f58f25..82e529f 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -75,9 +75,24 @@ jobs: executable: StreamingCommunity_linux_previous separator: ':' + # ARM64 build + - os: ubuntu-latest + artifact_name: StreamingCommunity_linux_arm64 + executable: StreamingCommunity_linux_arm64 + separator: ':' + architecture: arm64 + runs-on: ${{ matrix.os }} + # For ARM64, set architecture if present + defaults: + run: + shell: bash steps: + - name: Set up QEMU (for ARM64) + if: ${{ matrix.architecture == 'arm64' }} + uses: docker/setup-qemu-action@v3 + - name: Checkout repository uses: actions/checkout@v4 with: @@ -94,6 +109,7 @@ jobs: uses: actions/setup-python@v4 with: python-version: '3.12' + architecture: ${{ matrix.architecture || 'x64' }} - name: Install dependencies run: | @@ -122,6 +138,8 @@ jobs: --hidden-import=Cryptodome.Util --hidden-import=Cryptodome.Util.Padding \ --hidden-import=Cryptodome.Random \ --hidden-import=telebot \ + --hidden-import=curl_cffi --hidden-import=_cffi_backend \ + --collect-all curl_cffi \ --additional-hooks-dir=pyinstaller/hooks \ --add-data "StreamingCommunity${{ matrix.separator }}StreamingCommunity" \ --name=${{ matrix.artifact_name }} test_run.py diff --git a/.github/workflows/update-loc.yml b/.github/workflows/update-loc.yml index ea325fe..a570fed 100644 --- a/.github/workflows/update-loc.yml +++ b/.github/workflows/update-loc.yml @@ -16,12 +16,12 @@ jobs: - name: Count Lines of Code run: | LOC=$(cloc . --json | jq '.SUM.code') - echo "{\"schemaVersion\": 1, \"label\": \"Lines of Code\", \"message\": \"$LOC\", \"color\": \"green\"}" > .github/media/loc-badge.json + echo "{\"schemaVersion\": 1, \"label\": \"Lines of Code\", \"message\": \"$LOC\", \"color\": \"green\"}" > .github/.domain/loc-badge.json - name: Commit and Push LOC Badge run: | git config --local user.name "GitHub Actions" git config --local user.email "actions@github.com" - git add .github/media/loc-badge.json + git add .github/.domain/loc-badge.json git commit -m "Update lines of code badge" || echo "No changes to commit" git push \ No newline at end of file diff --git a/README.md b/README.md index 0780427..1a712a4 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,7 @@ PyPI Downloads - Lines of Code + Lines of Code

@@ -518,7 +518,7 @@ To enable qBittorrent integration, follow the setup guide [here](https://github. "download_subtitle": true, "merge_subs": true, "specific_list_subtitles": [ - "ita", + "ita", // Specify language codes or use ["*"] to download all available subtitles "eng" ], "cleanup_tmp_folder": true @@ -544,6 +544,8 @@ To enable qBittorrent integration, follow the setup guide [here](https://github. - `download_subtitle`: Whether to download subtitles - `merge_subs`: Whether to merge subtitles with video - `specific_list_subtitles`: List of subtitle languages to download + * Use `["*"]` to download all available subtitles + * Or specify individual languages like `["ita", "eng"]` * Can be changed with `--specific_list_subtitles ita,eng` #### Cleanup diff --git a/StreamingCommunity/Lib/Downloader/HLS/downloader.py b/StreamingCommunity/Lib/Downloader/HLS/downloader.py index 2b56ef2..5d8c5de 100644 --- a/StreamingCommunity/Lib/Downloader/HLS/downloader.py +++ b/StreamingCommunity/Lib/Downloader/HLS/downloader.py @@ -180,10 +180,14 @@ class M3U8Manager: self.sub_streams = [] if ENABLE_SUBTITLE: - self.sub_streams = [ - s for s in (self.parser._subtitle.get_all_uris_and_names() or []) - if s.get('language') in DOWNLOAD_SPECIFIC_SUBTITLE - ] + if "*" in DOWNLOAD_SPECIFIC_SUBTITLE: + self.sub_streams = self.parser._subtitle.get_all_uris_and_names() or [] + + else: + self.sub_streams = [ + s for s in (self.parser._subtitle.get_all_uris_and_names() or []) + if s.get('language') in DOWNLOAD_SPECIFIC_SUBTITLE + ] def log_selection(self): tuple_available_resolution = self.parser._video.get_list_resolution() @@ -209,9 +213,13 @@ class M3U8Manager: f"[red]Set:[/red] {set_codec_info}" ) + # Get available subtitles and their languages available_subtitles = self.parser._subtitle.get_all_uris_and_names() or [] available_sub_languages = [sub.get('language') for sub in available_subtitles] - downloadable_sub_languages = list(set(available_sub_languages) & set(DOWNLOAD_SPECIFIC_SUBTITLE)) + + # If "*" is in DOWNLOAD_SPECIFIC_SUBTITLE, all languages are downloadable + downloadable_sub_languages = available_sub_languages if "*" in DOWNLOAD_SPECIFIC_SUBTITLE else list(set(available_sub_languages) & set(DOWNLOAD_SPECIFIC_SUBTITLE)) + if available_sub_languages: console.print( f"[cyan bold]Subtitle [/cyan bold] [green]Available:[/green] [purple]{', '.join(available_sub_languages)}[/purple] | " diff --git a/StreamingCommunity/global_search.py b/StreamingCommunity/global_search.py index b3c5526..d420de3 100644 --- a/StreamingCommunity/global_search.py +++ b/StreamingCommunity/global_search.py @@ -157,7 +157,7 @@ def global_search(search_terms: str = None, selected_sites: list = None): # Display progress information console.print(f"\n[bold green]Searching for:[/bold green] [yellow]{search_terms}[/yellow]") - console.print(f"[bold green]Searching across:[/bold green] {len(selected_sites)} sites") + console.print(f"[bold green]Searching across:[/bold green] {len(selected_sites)} sites \n") with Progress() as progress: search_task = progress.add_task("[cyan]Searching...", total=len(selected_sites)) @@ -188,7 +188,7 @@ def global_search(search_terms: str = None, selected_sites: list = None): item_dict['source_alias'] = alias all_results[alias].append(item_dict) - console.print(f"[green]Found {len(database.media_list)} results from {site_name}") + console.print(f"\n[green]Found {len(database.media_list)} results from {site_name}") except Exception as e: console.print(f"[bold red]Error searching {site_name}:[/bold red] {str(e)}") From 1d38d04906970791d000e4494c915acefc442db3 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sat, 7 Jun 2025 07:18:45 +0000 Subject: [PATCH 17/20] Automatic domain update [skip ci] --- .github/.domain/domains.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/.domain/domains.json b/.github/.domain/domains.json index e5a8942..afd4e2d 100644 --- a/.github/.domain/domains.json +++ b/.github/.domain/domains.json @@ -6,10 +6,10 @@ "time_change": "2025-03-19 12:20:19" }, "cb01new": { - "domain": "life", - "full_url": "https://cb01net.life/", - "old_domain": "download", - "time_change": "2025-06-01 01:02:16" + "domain": "digital", + "full_url": "https://cb01net.digital/", + "old_domain": "life", + "time_change": "2025-06-07 07:18:34" }, "animeunity": { "domain": "so", From eec0d4239aefbbd74809a44265e2fae885f2c745 Mon Sep 17 00:00:00 2001 From: Alessandro Perazzetta <482310+AlessandroPerazzetta@users.noreply.github.com> Date: Mon, 9 Jun 2025 17:40:50 +0200 Subject: [PATCH 18/20] Check dns resolve domains (#338) * refactor: streamline proxy checking in search function * refactor: update DNS check method, try a real dns resolution instead of checking dns provider * Fix merge conflicts * Automatic domain update [skip ci] * Automatic domain update [skip ci] * Automatic domain update [skip ci] * Enhance DNS resolution check to accept a custom list of domains * Update run.py --------- Co-authored-by: github-actions[bot] Co-authored-by: None <62809003+Arrowar@users.noreply.github.com> --- StreamingCommunity/Util/os.py | 7 +++++-- StreamingCommunity/run.py | 24 +++++++++++------------- 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/StreamingCommunity/Util/os.py b/StreamingCommunity/Util/os.py index 2d8f7d1..5ce6bcf 100644 --- a/StreamingCommunity/Util/os.py +++ b/StreamingCommunity/Util/os.py @@ -320,16 +320,19 @@ class InternManager(): # except Exception: # return False - def check_dns_resolve(self): + def check_dns_resolve(self, domains_list: list = None): """ Check if the system's current DNS server can resolve a domain name. Works on both Windows and Unix-like systems. + Args: + domains_list (list, optional): List of domains to test. Defaults to common domains. + Returns: bool: True if the current DNS server can resolve a domain name, False if can't resolve or in case of errors """ - test_domains = ["github.com", "google.com", "microsoft.com", "amazon.com"] + test_domains = domains_list or ["github.com", "google.com", "microsoft.com", "amazon.com"] try: for domain in test_domains: diff --git a/StreamingCommunity/run.py b/StreamingCommunity/run.py index 2db8a86..82e4436 100644 --- a/StreamingCommunity/run.py +++ b/StreamingCommunity/run.py @@ -9,6 +9,7 @@ import platform import argparse import importlib import threading, asyncio +from urllib.parse import urlparse from typing import Callable @@ -153,6 +154,7 @@ def initialize(): except: console.log("[red]Error with loading github.") + def restart_script(): """Riavvia lo script con gli stessi argomenti della riga di comando.""" print("\nRiavvio dello script...\n") @@ -191,6 +193,11 @@ def force_exit(): os._exit(0) +def _extract_hostname(url_string: str) -> str: + """Safely extracts the hostname from a URL string.""" + return urlparse(url_string).hostname + + def main(script_id = 0): color_map = { @@ -209,20 +216,11 @@ def main(script_id = 0): # Create logger log_not = Logger() initialize() - - # if not internet_manager.check_dns_provider(): - # print() - # console.print("[red]❌ ERROR: DNS configuration is required!") - # console.print("[red]The program cannot function correctly without proper DNS settings.") - # console.print("[yellow]Please configure one of these DNS servers:") - # console.print("[blue]• Cloudflare (1.1.1.1) 'https://developers.cloudflare.com/1.1.1.1/setup/windows/'") - # console.print("[blue]• Quad9 (9.9.9.9) 'https://docs.quad9.net/Setup_Guides/Windows/Windows_10/'") - # console.print("\n[yellow]⚠️ The program will not work until you configure your DNS settings.") - # time.sleep(2) - # msg.ask("[yellow]Press Enter to continue ...") + # Get all site hostname + hostname_list = [hostname for site_info in config_manager.configSite.values() if (hostname := _extract_hostname(site_info.get('full_url')))] - if not internet_manager.check_dns_resolve(): + if not internet_manager.check_dns_resolve(hostname_list): print() console.print("[red]❌ ERROR: DNS configuration is required!") console.print("[red]The program cannot function correctly without proper DNS settings.") @@ -374,4 +372,4 @@ def main(script_id = 0): # Delete script_id script_id = TelegramSession.get_session() if script_id != "unknown": - TelegramSession.deleteScriptId(script_id) \ No newline at end of file + TelegramSession.deleteScriptId(script_id) From d16b99d1f5cbceb391c6c7d7ad8e9b7586affa84 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Tue, 10 Jun 2025 10:23:11 +0000 Subject: [PATCH 19/20] Automatic domain update [skip ci] --- .github/.domain/domains.json | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/.domain/domains.json b/.github/.domain/domains.json index afd4e2d..65ea82b 100644 --- a/.github/.domain/domains.json +++ b/.github/.domain/domains.json @@ -25,9 +25,9 @@ }, "guardaserie": { "domain": "meme", - "full_url": "https://guardaserie.meme/", + "full_url": "http://guardaserie.meme/", "old_domain": "meme", - "time_change": "2025-03-19 12:20:24" + "time_change": "2025-06-10 10:23:05" }, "ddlstreamitaly": { "domain": "co", @@ -48,10 +48,10 @@ "time_change": "2025-05-26 23:22:45" }, "streamingcommunity": { - "domain": "art", - "full_url": "https://streamingunity.art/", - "old_domain": "bid", - "time_change": "2025-06-05 11:18:33" + "domain": "agency", + "full_url": "https://streamingunity.agency/", + "old_domain": "art", + "time_change": "2025-06-10 10:23:11" }, "altadefinizionegratis": { "domain": "cc", From b60e1e296c66e13395f3271117711df714cc2513 Mon Sep 17 00:00:00 2001 From: Lovi <62809003+Arrowar@users.noreply.github.com> Date: Tue, 10 Jun 2025 18:26:47 +0200 Subject: [PATCH 20/20] Workflow: Add amend strategy. --- .github/.domain/domains.json | 20 ++++++++--------- .github/workflows/update_domain.yml | 34 ++++++++++++++++++----------- README.md | 3 --- 3 files changed, 31 insertions(+), 26 deletions(-) diff --git a/.github/.domain/domains.json b/.github/.domain/domains.json index 65ea82b..99a31ca 100644 --- a/.github/.domain/domains.json +++ b/.github/.domain/domains.json @@ -6,10 +6,10 @@ "time_change": "2025-03-19 12:20:19" }, "cb01new": { - "domain": "digital", - "full_url": "https://cb01net.digital/", - "old_domain": "life", - "time_change": "2025-06-07 07:18:34" + "domain": "live", + "full_url": "https://cb01net.live/", + "old_domain": "digital", + "time_change": "2025-06-11 07:20:30" }, "animeunity": { "domain": "so", @@ -25,9 +25,9 @@ }, "guardaserie": { "domain": "meme", - "full_url": "http://guardaserie.meme/", + "full_url": "https://guardaserie.meme/", "old_domain": "meme", - "time_change": "2025-06-10 10:23:05" + "time_change": "2025-06-11 07:20:36" }, "ddlstreamitaly": { "domain": "co", @@ -54,9 +54,9 @@ "time_change": "2025-06-10 10:23:11" }, "altadefinizionegratis": { - "domain": "cc", - "full_url": "https://altadefinizionegratis.cc/", - "old_domain": "icu", - "time_change": "2025-06-02 10:35:25" + "domain": "club", + "full_url": "https://altadefinizionegratis.club/", + "old_domain": "cc", + "time_change": "2025-06-11 07:20:42" } } \ No newline at end of file diff --git a/.github/workflows/update_domain.yml b/.github/workflows/update_domain.yml index 534956b..70d1196 100644 --- a/.github/workflows/update_domain.yml +++ b/.github/workflows/update_domain.yml @@ -1,5 +1,4 @@ -name: Update domains - +name: Update domains (Amend Strategy) on: schedule: - cron: "0 7-21 * * *" @@ -8,22 +7,25 @@ on: jobs: update-domains: runs-on: ubuntu-latest + permissions: contents: write steps: - name: Checkout code uses: actions/checkout@v4 - + with: + fetch-depth: 0 # Serve per l'amend + token: ${{ secrets.GITHUB_TOKEN }} + - name: Setup Python uses: actions/setup-python@v5 with: - python-version: '3.12' - + python-version: '3.12' + - name: Install dependencies run: | pip install httpx tldextract ua-generator dnspython - pip install --upgrade pip setuptools wheel - name: Configure DNS @@ -33,18 +35,24 @@ jobs: - name: Execute domain update script run: python .github/.domain/domain_update.py - - - name: Commit and push changes (if any) + + - name: Always amend last commit run: | git config --global user.name 'github-actions[bot]' git config --global user.email 'github-actions[bot]@users.noreply.github.com' - # Check if domains.json was modified if ! git diff --quiet .github/.domain/domains.json; then + echo "📝 Changes detected - amending last commit" git add .github/.domain/domains.json - git commit -m "Automatic domain update [skip ci]" - echo "Changes committed. Attempting to push..." - git push + git commit --amend --no-edit + git push --force-with-lease origin main else - echo "No changes to .github/.domain/domains.json to commit." + echo "✅ No changes to domains.json" fi + + - name: Verify repository state + if: failure() + run: | + echo "❌ Something went wrong. Repository state:" + git log --oneline -5 + git status \ No newline at end of file diff --git a/README.md b/README.md index 1a712a4..78a0022 100644 --- a/README.md +++ b/README.md @@ -814,9 +814,6 @@ Addon per Stremio che consente lo streaming HTTPS di film, serie, anime e TV in ### 🧩 [streamingcommunity-unofficialapi](https://github.com/Blu-Tiger/streamingcommunity-unofficialapi) API non ufficiale per accedere ai contenuti del sito italiano StreamingCommunity. -### 🎥 [stream-buddy](https://github.com/Bbalduzz/stream-buddy) -Tool per guardare o scaricare film dalla piattaforma StreamingCommunity. - # Disclaimer This software is provided "as is", without warranty of any kind, express or implied, including but not limited to the warranties of merchantability, fitness for a particular purpose, and noninfringement. In no event shall the authors or copyright holders be liable for any claim, damages, or other liability, whether in an action of contract, tort, or otherwise, arising from, out of, or in connection with the software or the use or other dealings in the software.