From d0207b366994349f92378e3ab2ec646c948acc71 Mon Sep 17 00:00:00 2001
From: Lovi <62809003+Arrowar@users.noreply.github.com>
Date: Mon, 2 Jun 2025 11:08:46 +0200
Subject: [PATCH] Fix wrong version pip
---
.github/.domain/domain_update.py | 634 ++++++++++++++-------------
.github/.domain/domains.json | 8 +-
StreamingCommunity/Upload/update.py | 9 +-
StreamingCommunity/Upload/version.py | 2 +-
setup.py | 17 +-
5 files changed, 345 insertions(+), 325 deletions(-)
diff --git a/.github/.domain/domain_update.py b/.github/.domain/domain_update.py
index 52c2ded..2c69a48 100644
--- a/.github/.domain/domain_update.py
+++ b/.github/.domain/domain_update.py
@@ -1,358 +1,360 @@
-import re
+# 20.04.2024
+
import os
import json
from datetime import datetime
-from urllib.parse import urlparse, urlunparse
+from urllib.parse import urlparse, unquote
+
+# External libraries
import httpx
+import tldextract
import ua_generator
-
-JSON_FILE_PATH = os.path.join(".github", ".domain", "domains.json")
+import dns.resolver
-def load_domains(file_path):
+# Variables
+SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
+JSON_FILE_PATH = os.path.join(SCRIPT_DIR, "domains.json")
+ua = ua_generator.generate(device='desktop', browser=('chrome', 'edge'))
+
+
+def get_headers():
+ return ua.headers.get()
+
+def get_tld(url_str):
+ try:
+ parsed = urlparse(unquote(url_str))
+ domain = parsed.netloc.lower().lstrip('www.')
+ parts = domain.split('.')
+ return parts[-1] if len(parts) >= 2 else None
+
+ except Exception:
+ return None
+
+def get_base_domain(url_str):
+ try:
+ parsed = urlparse(url_str)
+ domain = parsed.netloc.lower().lstrip('www.')
+ parts = domain.split('.')
+ return '.'.join(parts[:-1]) if len(parts) > 2 else parts[0]
+
+ except Exception:
+ return None
+
+def get_base_url(url_str):
+ try:
+ parsed = urlparse(url_str)
+ return f"{parsed.scheme}://{parsed.netloc}"
+
+ except Exception:
+ return None
+
+def log(msg, level='INFO'):
+ levels = {
+ 'INFO': '[ ]',
+ 'SUCCESS': '[+]',
+ 'WARNING': '[!]',
+ 'ERROR': '[-]'
+ }
+ entry = f"{levels.get(level, '[?]')} {msg}"
+ print(entry)
+
+def load_json_data(file_path):
if not os.path.exists(file_path):
- print(f"Error: The file {file_path} was not found.")
+ log(f"Error: The file {file_path} was not found.", "ERROR")
return None
try:
with open(file_path, 'r', encoding='utf-8') as f:
return json.load(f)
-
+
except Exception as e:
- print(f"Error reading the file {file_path}: {e}")
+ log(f"Error reading the file {file_path}: {e}", "ERROR")
return None
-def save_domains(file_path, data):
+def save_json_data(file_path, data):
try:
with open(file_path, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2, ensure_ascii=False)
- print(f"Data successfully saved to {file_path}")
+ log(f"Data successfully saved to {file_path}", "SUCCESS")
except Exception as e:
- print(f"Error saving the file {file_path}: {e}")
+ log(f"Error saving the file {file_path}: {e}", "ERROR")
+
+def parse_url(url):
+ if not url.startswith(('http://', 'https://')):
+ url = 'https://' + url
-def get_new_tld(full_url):
try:
- parsed_url = urlparse(full_url)
- hostname = parsed_url.hostname
- if hostname:
- parts = hostname.split('.')
- return parts[-1]
-
- except Exception:
- pass
-
- return None
-
-def get_enhanced_headers():
- ua = ua_generator.generate(device='desktop', browser='chrome')
- headers = ua.headers.get()
+ extracted = tldextract.extract(url)
+ parsed = urlparse(url)
+ clean_url = f"{parsed.scheme}://{parsed.netloc}/"
+ full_domain = f"{extracted.domain}.{extracted.suffix}" if extracted.domain else extracted.suffix
+ domain_tld = extracted.suffix
+ result = {
+ 'url': clean_url,
+ 'full_domain': full_domain,
+ 'domain': domain_tld,
+ 'suffix': extracted.suffix,
+ 'subdomain': extracted.subdomain or None
+ }
+ return result
- additional_headers = {
- 'DNT': '1',
- 'Upgrade-Insecure-Requests': '1',
- 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
- 'Accept-Language': 'en-US,en;q=0.9,it;q=0.8',
- 'Accept-Encoding': 'gzip, deflate, br',
- 'Cache-Control': 'max-age=0',
- 'Connection': 'keep-alive',
- 'Referer': 'https://www.google.com/',
+ except Exception as e:
+ log(f"Error parsing URL: {e}", "ERROR")
+ return None
+
+def check_dns_resolution(domain):
+ try:
+ resolver = dns.resolver.Resolver()
+ resolver.timeout = 2
+ resolver.lifetime = 2
+
+ try:
+ answers = resolver.resolve(domain, 'A')
+ return str(answers[0])
+ except:
+ try:
+ answers = resolver.resolve(domain, 'AAAA')
+ return str(answers[0])
+ except:
+ pass
+ return None
+ except:
+ return None
+
+def find_new_domain(input_url, output_file=None, verbose=True, json_output=False):
+ log_buffer = []
+ original_info = parse_url(input_url)
+
+ if not original_info:
+ log(f"Could not parse original URL: {input_url}", "ERROR")
+ if json_output:
+ return {'full_url': input_url, 'domain': None}
+ return None
+
+ log(f"Starting analysis for: {original_info['full_domain']}")
+ orig_ip = check_dns_resolution(original_info['full_domain'])
+ if orig_ip:
+ log(f"Original domain resolves to: {orig_ip}", "SUCCESS")
+ else:
+ log(f"Original domain does not resolve to an IP address", "WARNING")
+
+ headers = get_headers()
+ new_domains = []
+ redirects = []
+ final_url = None
+ final_domain_info = None
+ url_to_test_in_loop = None
+
+ for protocol in ['https://', 'http://']:
+ try:
+ url_to_test_in_loop = f"{protocol}{original_info['full_domain']}"
+ log(f"Testing connectivity to {url_to_test_in_loop}")
+ redirect_chain = []
+ current_url = url_to_test_in_loop
+ max_redirects = 10
+ redirect_count = 0
+
+ while redirect_count < max_redirects:
+ with httpx.Client(verify=False, follow_redirects=False, timeout=5) as client:
+ response = client.get(current_url, headers=headers)
+
+ redirect_info = {'url': current_url, 'status_code': response.status_code}
+ redirect_chain.append(redirect_info)
+ log(f"Request to {current_url} - Status: {response.status_code}")
+
+ if response.status_code in (301, 302, 303, 307, 308):
+ if 'location' in response.headers:
+ next_url = response.headers['location']
+ if next_url.startswith('/'):
+ parsed_current = urlparse(current_url)
+ next_url = f"{parsed_current.scheme}://{parsed_current.netloc}{next_url}"
+
+ log(f"Redirect found: {next_url} (Status: {response.status_code})")
+ current_url = next_url
+ redirect_count += 1
+ redirect_domain_info_val = parse_url(next_url)
+ if redirect_domain_info_val and redirect_domain_info_val['full_domain'] != original_info['full_domain']:
+ new_domains.append({'domain': redirect_domain_info_val['full_domain'], 'url': next_url, 'source': 'redirect'})
+
+ else:
+ log(f"Redirect status code but no Location header", "WARNING")
+ break
+ else:
+ break
+
+ if redirect_chain:
+ final_url = redirect_chain[-1]['url']
+ final_domain_info = parse_url(final_url)
+ redirects.extend(redirect_chain)
+ log(f"Final URL after redirects: {final_url}", "SUCCESS")
+ if final_domain_info and final_domain_info['full_domain'] != original_info['full_domain']:
+ new_domains.append({'domain': final_domain_info['full_domain'], 'url': final_url, 'source': 'final_url'})
+
+ final_status = redirect_chain[-1]['status_code'] if redirect_chain else None
+
+ if final_status and final_status < 400 and final_status != 403:
+ break
+
+ if final_status == 403 and redirect_chain and len(redirect_chain) > 1:
+ log(f"Got 403 Forbidden, but captured {len(redirect_chain)-1} redirects before that", "SUCCESS")
+ break
+
+ except httpx.RequestError as e:
+ log(f"Error connecting to {protocol}{original_info['full_domain']}: {str(e)}", "ERROR")
+
+ url_for_auto_redirect = input_url
+ if url_to_test_in_loop:
+ url_for_auto_redirect = url_to_test_in_loop
+ elif original_info and original_info.get('url'):
+ url_for_auto_redirect = original_info['url']
+
+ if not redirects or not new_domains:
+ log("Trying alternate method with automatic redirect following")
+
+ try:
+ with httpx.Client(verify=False, follow_redirects=True, timeout=5) as client:
+ response_auto = client.get(url_for_auto_redirect, headers=headers)
+
+ log(f"Connected with auto-redirects: Status {response_auto.status_code}")
+
+ if response_auto.history:
+ log(f"Found {len(response_auto.history)} redirects with auto-following", "SUCCESS")
+
+ for r_hist in response_auto.history:
+ redirect_info_auto = {'url': str(r_hist.url), 'status_code': r_hist.status_code}
+ redirects.append(redirect_info_auto)
+ log(f"Auto-redirect: {r_hist.url} (Status: {r_hist.status_code})")
+
+ final_url = str(response_auto.url)
+ final_domain_info = parse_url(final_url)
+ for redirect_hist_item in response_auto.history:
+ redirect_domain_val = parse_url(str(redirect_hist_item.url))
+ if redirect_domain_val and original_info and redirect_domain_val['full_domain'] != original_info['full_domain']:
+ new_domains.append({'domain': redirect_domain_val['full_domain'], 'url': str(redirect_hist_item.url), 'source': 'auto-redirect'})
+
+ current_final_url_info = parse_url(str(response_auto.url))
+
+ if current_final_url_info and original_info and current_final_url_info['full_domain'] != original_info['full_domain']:
+ is_already_added = any(d['domain'] == current_final_url_info['full_domain'] and d['source'] == 'auto-redirect' for d in new_domains)
+ if not is_already_added:
+ new_domains.append({'domain': current_final_url_info['full_domain'], 'url': str(response_auto.url), 'source': 'final_url_auto'})
+ final_url = str(response_auto.url)
+ final_domain_info = current_final_url_info
+ log(f"Final URL from auto-redirect: {final_url}", "SUCCESS")
+
+ except httpx.RequestError as e:
+ log(f"Error with auto-redirect attempt: {str(e)}", "ERROR")
+ except NameError:
+ log(f"Error: URL for auto-redirect attempt was not defined.", "ERROR")
+
+ unique_domains = []
+ seen_domains = set()
+ for domain_info_item in new_domains:
+ if domain_info_item['domain'] not in seen_domains:
+ seen_domains.add(domain_info_item['domain'])
+ unique_domains.append(domain_info_item)
+
+ if not final_url:
+ final_url = input_url
+ if not final_domain_info:
+ final_domain_info = original_info
+
+ if final_domain_info:
+ parsed_final_url_info = parse_url(final_url)
+ if parsed_final_url_info:
+ final_url = parsed_final_url_info['url']
+ final_domain_info = parsed_final_url_info
+ else:
+ final_domain_info = original_info
+ final_url = original_info['url'] if original_info else input_url
+
+ results_original_domain = original_info['full_domain'] if original_info else None
+ results_final_domain_tld = final_domain_info['domain'] if final_domain_info and 'domain' in final_domain_info else None
+
+ results = {
+ 'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
+ 'original_url': input_url,
+ 'original_domain': results_original_domain,
+ 'original_ip': orig_ip,
+ 'new_domains': unique_domains,
+ 'redirects': redirects,
+ 'log': log_buffer
}
+ simplified_json_output = {'full_url': final_url, 'domain': results_final_domain_tld}
- headers.update(additional_headers)
- return headers
+ if verbose:
+ log(f"DEBUG - Simplified output: {simplified_json_output}", "INFO")
+
+ if output_file:
+ try:
+ with open(output_file, 'w', encoding='utf-8') as f:
+ json.dump(results, f, indent=2, ensure_ascii=False)
+ log(f"Results saved to {output_file}", "SUCCESS")
+ except Exception as e:
+ log(f"Error writing to output file: {str(e)}", "ERROR")
+
+ if json_output:
+ return simplified_json_output
+ else:
+ return results
-def extract_redirect_from_403(response, original_url):
- redirect_headers = ['location', 'refresh', 'x-redirect-to', 'x-location', 'redirect']
- for header in redirect_headers:
- if header in response.headers:
- return response.headers[header]
-
- try:
- content = response.text
-
- js_patterns = [
- r'window\.location\.href\s*=\s*["\']([^"\']+)["\']',
- r'window\.location\s*=\s*["\']([^"\']+)["\']',
- r'location\.href\s*=\s*["\']([^"\']+)["\']',
- r'document\.location\s*=\s*["\']([^"\']+)["\']',
- r'top\.location\.href\s*=\s*["\']([^"\']+)["\']',
- r'parent\.location\s*=\s*["\']([^"\']+)["\']'
- ]
-
- for pattern in js_patterns:
- match = re.search(pattern, content, re.IGNORECASE)
- if match:
- return match.group(1)
-
- meta_patterns = [
- r']*http-equiv=["\']?refresh["\']?[^>]*content=["\'][^"\']*url=([^"\'>\s]+)',
- r']*content=["\'][^"\']*url=([^"\'>\s]+)[^>]*http-equiv=["\']?refresh["\']?'
- ]
-
- for pattern in meta_patterns:
- match = re.search(pattern, content, re.IGNORECASE)
- if match:
- return match.group(1)
-
- text_patterns = [
- r'[Rr]edirect(?:ed)?\s+to:?\s*([^\s<>"\']+)',
- r'[Nn]ew\s+[Uu][Rr][Ll]:?\s*([^\s<>"\']+)',
- r'[Mm]oved\s+to:?\s*([^\s<>"\']+)',
- r'[Ff]ound\s+at:?\s*([^\s<>"\']+)',
- r'[Gg]o\s+to:?\s*([^\s<>"\']+)',
- r'[Vv]isit:?\s*([^\s<>"\']+)',
- r'https?://[^\s<>"\']+\.[a-z]{2,}[^\s<>"\']*'
- ]
-
- for pattern in text_patterns:
- match = re.search(pattern, content)
- if match:
- potential_url = match.group(1) if '(' in pattern else match.group(0)
- if potential_url.startswith(('http://', 'https://', '//')):
- return potential_url
-
- link_patterns = [
- r']*href=["\']([^"\']+)["\'][^>]*>(?:click here|continue|proceed|go here)',
- r']*rel=["\']?canonical["\']?[^>]*href=["\']([^"\']+)["\']',
- r']*href=["\']([^"\']+)["\']'
- ]
-
- for pattern in link_patterns:
- match = re.search(pattern, content, re.IGNORECASE)
- if match:
- return match.group(1)
-
- except Exception:
- pass
-
- return None
-
-def extract_domain_from_response(response, original_url):
- if 'location' in response.headers:
- return response.headers['location']
-
- if str(response.url) != original_url:
- return str(response.url)
-
- try:
- content_type = response.headers.get('content-type', '').lower()
- if 'text/html' in content_type or 'text/plain' in content_type:
- response_text = response.text
-
- js_redirect_patterns = [
- r'window\.location\.href\s*=\s*["\']([^"\']+)["\']',
- r'window\.location\s*=\s*["\']([^"\']+)["\']',
- r'location\.href\s*=\s*["\']([^"\']+)["\']',
- r'document\.location\s*=\s*["\']([^"\']+)["\']'
- ]
-
- for pattern in js_redirect_patterns:
- js_match = re.search(pattern, response_text, re.IGNORECASE)
- if js_match:
- return js_match.group(1)
-
- meta_patterns = [
- r']*http-equiv=["\']?refresh["\']?[^>]*content=["\'][^"\']*url=([^"\'>\s]+)',
- r']*content=["\'][^"\']*url=([^"\'>\s]+)[^>]*http-equiv=["\']?refresh["\']?'
- ]
-
- for pattern in meta_patterns:
- meta_match = re.search(pattern, response_text, re.IGNORECASE)
- if meta_match:
- return meta_match.group(1)
-
- canonical_match = re.search(r']*rel=["\']?canonical["\']?[^>]*href=["\']([^"\']+)["\']', response_text, re.IGNORECASE)
- if canonical_match:
- return canonical_match.group(1)
-
- base_match = re.search(r']*href=["\']([^"\']+)["\']', response_text, re.IGNORECASE)
- if base_match:
- return base_match.group(1)
-
- error_redirect_patterns = [
- r'[Rr]edirect(?:ed)?\s+to:?\s*([^\s<>"\']+)',
- r'[Nn]ew\s+[Uu][Rr][Ll]:?\s*([^\s<>"\']+)',
- r'[Mm]oved\s+to:?\s*([^\s<>"\']+)',
- r'[Ff]ound\s+at:?\s*([^\s<>"\']+)'
- ]
-
- for pattern in error_redirect_patterns:
- error_match = re.search(pattern, response_text)
- if error_match:
- potential_url = error_match.group(1)
- if potential_url.startswith(('http://', 'https://', '//')):
- return potential_url
-
- except Exception as e:
- print(f" [!] Error extracting from response content: {e}")
-
- return None
-
-def try_url(url_to_try, headers=None, timeout=15):
- if headers is None:
- headers = get_enhanced_headers()
-
- try:
- with httpx.Client(headers=headers, timeout=timeout, follow_redirects=False) as client:
- response = client.get(url_to_try)
-
- if response.status_code in [301, 302, 303, 307, 308]:
- location = response.headers.get('location')
- if location:
- print(f" [+] Found redirect ({response.status_code}) to: {location}")
- try:
- final_response = client.get(location)
- if 200 <= final_response.status_code < 400:
- return final_response
- else:
- return httpx.Response(
- status_code=200,
- headers={"location": location},
- content=b"",
- request=response.request
- )
- except Exception:
- return httpx.Response(
- status_code=200,
- headers={"location": location},
- content=b"",
- request=response.request
- )
-
- elif response.status_code == 403:
- print(f" [!] HTTP 403 - attempting enhanced extraction")
-
- redirect_url = extract_redirect_from_403(response, url_to_try)
- if redirect_url:
- print(f" [+] Found redirect URL in 403 response: {redirect_url}")
- return httpx.Response(
- status_code=200,
- headers={"location": redirect_url},
- content=b"",
- request=response.request
- )
-
- elif response.status_code in [409, 429, 503]:
- print(f" [!] HTTP {response.status_code} - attempting to extract redirect info")
-
- location = response.headers.get('location')
- if location:
- print(f" [+] Found location header in error response: {location}")
- return httpx.Response(
- status_code=200,
- headers={"location": location},
- content=b"",
- request=response.request
- )
-
- new_url = extract_domain_from_response(response, url_to_try)
- if new_url and new_url != url_to_try:
- print(f" [+] Found redirect URL in error response content: {new_url}")
- return httpx.Response(
- status_code=200,
- headers={"location": new_url},
- content=b"",
- request=response.request
- )
-
- if 200 <= response.status_code < 400:
- return response
-
- print(f" [!] HTTP {response.status_code} for {url_to_try}")
-
- except httpx.HTTPStatusError as http_err:
- new_url = extract_domain_from_response(http_err.response, url_to_try)
- if new_url:
- print(f" [+] Found new URL from HTTPStatusError response: {new_url}")
- return httpx.Response(
- status_code=200,
- headers={"location": new_url},
- content=b"",
- request=http_err.request
- )
- except Exception as e:
- print(f" [!] Error for {url_to_try}: {type(e).__name__}")
-
- return None
-
-def update_domain_entries(data):
- if not data:
+def update_site_entry(site_name: str, all_domains_data: dict):
+ site_config = all_domains_data.get(site_name, {})
+ log(f"Processing site: {site_name}", "INFO")
+ if not site_config.get('full_url'):
+ log(f"Site {site_name} has no full_url in config. Skipping.", "WARNING")
return False
- updated_count = 0
+ current_full_url = site_config.get('full_url')
+ current_domain_tld = site_config.get('domain')
+ found_domain_info = find_new_domain(current_full_url, verbose=False, json_output=True)
- for key, entry in data.items():
- print(f"\n--- [DOMAIN] {key} ---")
- original_full_url = entry.get("full_url")
- original_domain_in_entry = entry.get("domain")
+ if found_domain_info and found_domain_info.get('full_url') and found_domain_info.get('domain'):
+ new_full_url = found_domain_info['full_url']
+ new_domain_tld = found_domain_info['domain']
- if not original_full_url:
- print(f" [!] 'full_url' missing. Skipped.")
- continue
+ if new_full_url != current_full_url or new_domain_tld != current_domain_tld:
+ log(f"Update found for {site_name}: URL '{current_full_url}' -> '{new_full_url}', TLD '{current_domain_tld}' -> '{new_domain_tld}'", "SUCCESS")
+ updated_entry = site_config.copy()
+ updated_entry['full_url'] = new_full_url
+ updated_entry['domain'] = new_domain_tld
+ if new_domain_tld != current_domain_tld :
+ updated_entry['old_domain'] = current_domain_tld if current_domain_tld else ""
- print(f" [] Stored URL: {original_full_url}")
- if original_domain_in_entry:
- print(f" [] Stored Domain (TLD): {original_domain_in_entry}")
+ updated_entry['time_change'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+ all_domains_data[site_name] = updated_entry
+ return True
- print(f" [] Testing URL: {original_full_url}")
- response = try_url(original_full_url)
-
- if response:
- final_url_from_request = str(response.url)
- print(f" [+] Redirect/Response to: {final_url_from_request}")
-
- parsed_final_url = urlparse(final_url_from_request)
- normalized_full_url = urlunparse(parsed_final_url._replace(path='/', params='', query='', fragment=''))
- if parsed_final_url.path == '' and not normalized_full_url.endswith('/'):
- normalized_full_url += '/'
-
- if normalized_full_url != final_url_from_request:
- print(f" [+] Normalized URL: {normalized_full_url}")
-
- if normalized_full_url != original_full_url:
- new_tld_val = get_new_tld(final_url_from_request)
-
- if new_tld_val:
- entry["full_url"] = normalized_full_url
-
- if new_tld_val != original_domain_in_entry:
- print(f" [-] Domain TLD Changed: '{original_domain_in_entry}' -> '{new_tld_val}'")
- entry["old_domain"] = original_domain_in_entry if original_domain_in_entry else entry.get("old_domain", "")
- entry["domain"] = new_tld_val
- entry["time_change"] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
- print(f" [-] Domain & URL Updated: New TLD '{new_tld_val}', New URL '{normalized_full_url}'")
- else:
- entry["domain"] = new_tld_val
- print(f" [-] URL Updated (TLD Unchanged '{new_tld_val}'): New URL '{normalized_full_url}'")
-
- updated_count += 1
-
- else:
- print(f" [!] Could not extract TLD from {final_url_from_request}. URL not updated despite potential change.")
- else:
- print(f" [] Same Domain: {final_url_from_request}")
-
else:
- print(f" [-] No response for {key}")
-
- return updated_count > 0
+ log(f"No changes detected for {site_name}.", "INFO")
+ return False
+ else:
+ log(f"Could not reliably find new domain info for {site_name} from URL: {current_full_url}. No search fallback.", "WARNING")
+ return False
def main():
- print("Starting domain update script...")
- domain_data = load_domains(JSON_FILE_PATH)
+ log("Starting domain update script...")
+ all_domains_data = load_json_data(JSON_FILE_PATH)
+ if not all_domains_data:
+ log("Cannot proceed: Domain data is missing or could not be loaded.", "ERROR")
+ log("Script finished.")
+ return
- if domain_data:
- if update_domain_entries(domain_data):
- save_domains(JSON_FILE_PATH, domain_data)
- print("\nUpdate complete. Some entries were modified.")
- else:
- print("\nUpdate complete. No domains were modified.")
- else:
- print("\nCannot proceed without domain data.")
+ any_updates_made = False
+ for site_name_key in list(all_domains_data.keys()):
+ if update_site_entry(site_name_key, all_domains_data):
+ any_updates_made = True
+ print("\n")
- print("Script finished.")
+ if any_updates_made:
+ save_json_data(JSON_FILE_PATH, all_domains_data)
+ log("Update complete. Some entries were modified.", "SUCCESS")
+ else:
+ log("Update complete. No domains were modified.", "INFO")
+ log("Script finished.")
if __name__ == "__main__":
main()
\ No newline at end of file
diff --git a/.github/.domain/domains.json b/.github/.domain/domains.json
index 7cd57d0..e01483c 100644
--- a/.github/.domain/domains.json
+++ b/.github/.domain/domains.json
@@ -54,9 +54,9 @@
"time_change": "2025-05-31 12:17:33"
},
"altadefinizionegratis": {
- "domain": "icu",
- "full_url": "https://altadefinizionegratis.icu/",
- "old_domain": "taipei",
- "time_change": "2025-05-18 11:21:05"
+ "domain": "cc",
+ "full_url": "https://altadefinizionegratis.cc/",
+ "old_domain": "icu",
+ "time_change": "2025-06-02 10:35:25"
}
}
\ No newline at end of file
diff --git a/StreamingCommunity/Upload/update.py b/StreamingCommunity/Upload/update.py
index ee9fbb0..be79847 100644
--- a/StreamingCommunity/Upload/update.py
+++ b/StreamingCommunity/Upload/update.py
@@ -4,6 +4,7 @@ import os
import sys
import time
import asyncio
+import importlib.metadata
# External library
import httpx
@@ -11,7 +12,7 @@ from rich.console import Console
# Internal utilities
-from .version import __version__, __author__, __title__
+from .version import __version__ as source_code_version, __author__, __title__
from StreamingCommunity.Util.config_json import config_manager
from StreamingCommunity.Util.headers import get_userAgent
@@ -75,7 +76,11 @@ def update():
percentual_stars = 0
# Get the current version (installed version)
- current_version = __version__
+ try:
+ current_version = importlib.metadata.version(__title__)
+ except importlib.metadata.PackageNotFoundError:
+ console.print(f"[yellow]Warning: Could not determine installed version for '{__title__}' via importlib.metadata. Falling back to source version.[/yellow]")
+ current_version = source_code_version
# Get commit details
latest_commit = response_commits[0] if response_commits else None
diff --git a/StreamingCommunity/Upload/version.py b/StreamingCommunity/Upload/version.py
index da2e3af..6ffcf08 100644
--- a/StreamingCommunity/Upload/version.py
+++ b/StreamingCommunity/Upload/version.py
@@ -2,4 +2,4 @@ __title__ = 'StreamingCommunity'
__version__ = '3.0.9'
__author__ = 'Arrowar'
__description__ = 'A command-line program to download film'
-__copyright__ = 'Copyright 2024'
+__copyright__ = 'Copyright 2025'
\ No newline at end of file
diff --git a/setup.py b/setup.py
index 760cae5..309554f 100644
--- a/setup.py
+++ b/setup.py
@@ -1,4 +1,5 @@
import os
+import re
from setuptools import setup, find_packages
def read_readme():
@@ -8,9 +9,21 @@ def read_readme():
with open(os.path.join(os.path.dirname(__file__), "requirements.txt"), "r", encoding="utf-8-sig") as f:
required_packages = f.read().splitlines()
+def get_version():
+ try:
+ import pkg_resources
+ return pkg_resources.get_distribution('StreamingCommunity').version
+ except:
+ version_file_path = os.path.join(os.path.dirname(__file__), "StreamingCommunity", "Upload", "version.py")
+ with open(version_file_path, "r", encoding="utf-8") as f:
+ version_match = re.search(r"^__version__\s*=\s*['\"]([^'\"]*)['\"]", f.read(), re.M)
+ if version_match:
+ return version_match.group(1)
+ raise RuntimeError("Unable to find version string in StreamingCommunity/Upload/version.py.")
+
setup(
name="StreamingCommunity",
- version="3.0.9",
+ version=get_version(),
long_description=read_readme(),
long_description_content_type="text/markdown",
author="Lovi-0",
@@ -29,4 +42,4 @@ setup(
"Bug Reports": "https://github.com/Lovi-0/StreamingCommunity/issues",
"Source": "https://github.com/Lovi-0/StreamingCommunity",
}
-)
\ No newline at end of file
+)
\ No newline at end of file