Compare commits

...

18 Commits
v3.0.7 ... main

Author SHA1 Message Date
None
f4529e5f05
Update schedule 2025-06-03 17:30:27 +02:00
github-actions[bot]
dcfd22bc2b Automatic domain update [skip ci] 2025-06-03 15:27:02 +00:00
Lovi
3cbabfb98b core: Fix requirements 2025-06-02 18:14:36 +02:00
None
6efeb96201
Update update_domain.yml 2025-06-02 12:58:38 +02:00
Lovi
d0207b3669 Fix wrong version pip 2025-06-02 11:08:46 +02:00
Lovi
6713de4ecc Bump v3.0.9 2025-06-01 16:31:24 +02:00
github-actions[bot]
b8e28a30c0 Automatic domain update [skip ci] 2025-06-01 01:02:20 +00:00
Alessandro Perazzetta
a45fd0d37e
Dns check (#332)
* refactor: streamline proxy checking in search function

* refactor: update DNS check method, try a real dns resolution instead of checking dns provider

* refactor: enhance DNS resolution check to support multiple domains across platforms

* refactor: replace os.socket with socket for DNS resolution consistency

---------

Co-authored-by: None <62809003+Arrowar@users.noreply.github.com>
2025-05-31 20:07:30 +02:00
github-actions[bot]
4b40b8ce22 Automatic domain update [skip ci] 2025-05-31 12:17:33 +00:00
Alessandro Perazzetta
73cc2662b8
Dns check refactor (#328)
* refactor: streamline proxy checking in search function

* refactor: update DNS check method, try a real dns resolution instead of checking dns provider

* refactor: enhance DNS resolution check to support multiple domains across platforms

* refactor: replace os.socket with socket for DNS resolution consistency

---------

Co-authored-by: None <62809003+Arrowar@users.noreply.github.com>
2025-05-31 11:30:59 +02:00
Lovi
1776538c6c github: Update domains 2025-05-31 11:28:38 +02:00
None
884bcf656c
Create update_domain.yml 2025-05-31 10:59:11 +02:00
Lovi
71e97c2c65 Site: Update endpoint 2025-05-31 10:58:12 +02:00
Lovi
ded66f446e Remove database of domain 2025-05-31 10:52:16 +02:00
Lovi
86c7293779 Bump v3.0.8 2025-05-25 16:59:29 +02:00
Lovi
ef6c8c9cb3 api: Fix tipo raiplay 2025-05-25 15:37:53 +02:00
Alessandro Perazzetta
c01945fdbc
refactor: streamline proxy checking in search function (#326) 2025-05-22 08:36:44 +02:00
Lovi
4f0c58f14d api: fix actual_search_query 2025-05-18 16:31:15 +02:00
24 changed files with 1063 additions and 574 deletions

360
.github/.domain/domain_update.py vendored Normal file
View File

@ -0,0 +1,360 @@
# 20.04.2024
import os
import json
from datetime import datetime
from urllib.parse import urlparse, unquote
# External libraries
import httpx
import tldextract
import ua_generator
import dns.resolver
# Variables
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
JSON_FILE_PATH = os.path.join(SCRIPT_DIR, "domains.json")
ua = ua_generator.generate(device='desktop', browser=('chrome', 'edge'))
def get_headers():
return ua.headers.get()
def get_tld(url_str):
try:
parsed = urlparse(unquote(url_str))
domain = parsed.netloc.lower().lstrip('www.')
parts = domain.split('.')
return parts[-1] if len(parts) >= 2 else None
except Exception:
return None
def get_base_domain(url_str):
try:
parsed = urlparse(url_str)
domain = parsed.netloc.lower().lstrip('www.')
parts = domain.split('.')
return '.'.join(parts[:-1]) if len(parts) > 2 else parts[0]
except Exception:
return None
def get_base_url(url_str):
try:
parsed = urlparse(url_str)
return f"{parsed.scheme}://{parsed.netloc}"
except Exception:
return None
def log(msg, level='INFO'):
levels = {
'INFO': '[ ]',
'SUCCESS': '[+]',
'WARNING': '[!]',
'ERROR': '[-]'
}
entry = f"{levels.get(level, '[?]')} {msg}"
print(entry)
def load_json_data(file_path):
if not os.path.exists(file_path):
log(f"Error: The file {file_path} was not found.", "ERROR")
return None
try:
with open(file_path, 'r', encoding='utf-8') as f:
return json.load(f)
except Exception as e:
log(f"Error reading the file {file_path}: {e}", "ERROR")
return None
def save_json_data(file_path, data):
try:
with open(file_path, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2, ensure_ascii=False)
log(f"Data successfully saved to {file_path}", "SUCCESS")
except Exception as e:
log(f"Error saving the file {file_path}: {e}", "ERROR")
def parse_url(url):
if not url.startswith(('http://', 'https://')):
url = 'https://' + url
try:
extracted = tldextract.extract(url)
parsed = urlparse(url)
clean_url = f"{parsed.scheme}://{parsed.netloc}/"
full_domain = f"{extracted.domain}.{extracted.suffix}" if extracted.domain else extracted.suffix
domain_tld = extracted.suffix
result = {
'url': clean_url,
'full_domain': full_domain,
'domain': domain_tld,
'suffix': extracted.suffix,
'subdomain': extracted.subdomain or None
}
return result
except Exception as e:
log(f"Error parsing URL: {e}", "ERROR")
return None
def check_dns_resolution(domain):
try:
resolver = dns.resolver.Resolver()
resolver.timeout = 2
resolver.lifetime = 2
try:
answers = resolver.resolve(domain, 'A')
return str(answers[0])
except:
try:
answers = resolver.resolve(domain, 'AAAA')
return str(answers[0])
except:
pass
return None
except:
return None
def find_new_domain(input_url, output_file=None, verbose=True, json_output=False):
log_buffer = []
original_info = parse_url(input_url)
if not original_info:
log(f"Could not parse original URL: {input_url}", "ERROR")
if json_output:
return {'full_url': input_url, 'domain': None}
return None
log(f"Starting analysis for: {original_info['full_domain']}")
orig_ip = check_dns_resolution(original_info['full_domain'])
if orig_ip:
log(f"Original domain resolves to: {orig_ip}", "SUCCESS")
else:
log(f"Original domain does not resolve to an IP address", "WARNING")
headers = get_headers()
new_domains = []
redirects = []
final_url = None
final_domain_info = None
url_to_test_in_loop = None
for protocol in ['https://', 'http://']:
try:
url_to_test_in_loop = f"{protocol}{original_info['full_domain']}"
log(f"Testing connectivity to {url_to_test_in_loop}")
redirect_chain = []
current_url = url_to_test_in_loop
max_redirects = 10
redirect_count = 0
while redirect_count < max_redirects:
with httpx.Client(verify=False, follow_redirects=False, timeout=5) as client:
response = client.get(current_url, headers=headers)
redirect_info = {'url': current_url, 'status_code': response.status_code}
redirect_chain.append(redirect_info)
log(f"Request to {current_url} - Status: {response.status_code}")
if response.status_code in (301, 302, 303, 307, 308):
if 'location' in response.headers:
next_url = response.headers['location']
if next_url.startswith('/'):
parsed_current = urlparse(current_url)
next_url = f"{parsed_current.scheme}://{parsed_current.netloc}{next_url}"
log(f"Redirect found: {next_url} (Status: {response.status_code})")
current_url = next_url
redirect_count += 1
redirect_domain_info_val = parse_url(next_url)
if redirect_domain_info_val and redirect_domain_info_val['full_domain'] != original_info['full_domain']:
new_domains.append({'domain': redirect_domain_info_val['full_domain'], 'url': next_url, 'source': 'redirect'})
else:
log(f"Redirect status code but no Location header", "WARNING")
break
else:
break
if redirect_chain:
final_url = redirect_chain[-1]['url']
final_domain_info = parse_url(final_url)
redirects.extend(redirect_chain)
log(f"Final URL after redirects: {final_url}", "SUCCESS")
if final_domain_info and final_domain_info['full_domain'] != original_info['full_domain']:
new_domains.append({'domain': final_domain_info['full_domain'], 'url': final_url, 'source': 'final_url'})
final_status = redirect_chain[-1]['status_code'] if redirect_chain else None
if final_status and final_status < 400 and final_status != 403:
break
if final_status == 403 and redirect_chain and len(redirect_chain) > 1:
log(f"Got 403 Forbidden, but captured {len(redirect_chain)-1} redirects before that", "SUCCESS")
break
except httpx.RequestError as e:
log(f"Error connecting to {protocol}{original_info['full_domain']}: {str(e)}", "ERROR")
url_for_auto_redirect = input_url
if url_to_test_in_loop:
url_for_auto_redirect = url_to_test_in_loop
elif original_info and original_info.get('url'):
url_for_auto_redirect = original_info['url']
if not redirects or not new_domains:
log("Trying alternate method with automatic redirect following")
try:
with httpx.Client(verify=False, follow_redirects=True, timeout=5) as client:
response_auto = client.get(url_for_auto_redirect, headers=headers)
log(f"Connected with auto-redirects: Status {response_auto.status_code}")
if response_auto.history:
log(f"Found {len(response_auto.history)} redirects with auto-following", "SUCCESS")
for r_hist in response_auto.history:
redirect_info_auto = {'url': str(r_hist.url), 'status_code': r_hist.status_code}
redirects.append(redirect_info_auto)
log(f"Auto-redirect: {r_hist.url} (Status: {r_hist.status_code})")
final_url = str(response_auto.url)
final_domain_info = parse_url(final_url)
for redirect_hist_item in response_auto.history:
redirect_domain_val = parse_url(str(redirect_hist_item.url))
if redirect_domain_val and original_info and redirect_domain_val['full_domain'] != original_info['full_domain']:
new_domains.append({'domain': redirect_domain_val['full_domain'], 'url': str(redirect_hist_item.url), 'source': 'auto-redirect'})
current_final_url_info = parse_url(str(response_auto.url))
if current_final_url_info and original_info and current_final_url_info['full_domain'] != original_info['full_domain']:
is_already_added = any(d['domain'] == current_final_url_info['full_domain'] and d['source'] == 'auto-redirect' for d in new_domains)
if not is_already_added:
new_domains.append({'domain': current_final_url_info['full_domain'], 'url': str(response_auto.url), 'source': 'final_url_auto'})
final_url = str(response_auto.url)
final_domain_info = current_final_url_info
log(f"Final URL from auto-redirect: {final_url}", "SUCCESS")
except httpx.RequestError as e:
log(f"Error with auto-redirect attempt: {str(e)}", "ERROR")
except NameError:
log(f"Error: URL for auto-redirect attempt was not defined.", "ERROR")
unique_domains = []
seen_domains = set()
for domain_info_item in new_domains:
if domain_info_item['domain'] not in seen_domains:
seen_domains.add(domain_info_item['domain'])
unique_domains.append(domain_info_item)
if not final_url:
final_url = input_url
if not final_domain_info:
final_domain_info = original_info
if final_domain_info:
parsed_final_url_info = parse_url(final_url)
if parsed_final_url_info:
final_url = parsed_final_url_info['url']
final_domain_info = parsed_final_url_info
else:
final_domain_info = original_info
final_url = original_info['url'] if original_info else input_url
results_original_domain = original_info['full_domain'] if original_info else None
results_final_domain_tld = final_domain_info['domain'] if final_domain_info and 'domain' in final_domain_info else None
results = {
'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
'original_url': input_url,
'original_domain': results_original_domain,
'original_ip': orig_ip,
'new_domains': unique_domains,
'redirects': redirects,
'log': log_buffer
}
simplified_json_output = {'full_url': final_url, 'domain': results_final_domain_tld}
if verbose:
log(f"DEBUG - Simplified output: {simplified_json_output}", "INFO")
if output_file:
try:
with open(output_file, 'w', encoding='utf-8') as f:
json.dump(results, f, indent=2, ensure_ascii=False)
log(f"Results saved to {output_file}", "SUCCESS")
except Exception as e:
log(f"Error writing to output file: {str(e)}", "ERROR")
if json_output:
return simplified_json_output
else:
return results
def update_site_entry(site_name: str, all_domains_data: dict):
site_config = all_domains_data.get(site_name, {})
log(f"Processing site: {site_name}", "INFO")
if not site_config.get('full_url'):
log(f"Site {site_name} has no full_url in config. Skipping.", "WARNING")
return False
current_full_url = site_config.get('full_url')
current_domain_tld = site_config.get('domain')
found_domain_info = find_new_domain(current_full_url, verbose=False, json_output=True)
if found_domain_info and found_domain_info.get('full_url') and found_domain_info.get('domain'):
new_full_url = found_domain_info['full_url']
new_domain_tld = found_domain_info['domain']
if new_full_url != current_full_url or new_domain_tld != current_domain_tld:
log(f"Update found for {site_name}: URL '{current_full_url}' -> '{new_full_url}', TLD '{current_domain_tld}' -> '{new_domain_tld}'", "SUCCESS")
updated_entry = site_config.copy()
updated_entry['full_url'] = new_full_url
updated_entry['domain'] = new_domain_tld
if new_domain_tld != current_domain_tld :
updated_entry['old_domain'] = current_domain_tld if current_domain_tld else ""
updated_entry['time_change'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
all_domains_data[site_name] = updated_entry
return True
else:
log(f"No changes detected for {site_name}.", "INFO")
return False
else:
log(f"Could not reliably find new domain info for {site_name} from URL: {current_full_url}. No search fallback.", "WARNING")
return False
def main():
log("Starting domain update script...")
all_domains_data = load_json_data(JSON_FILE_PATH)
if not all_domains_data:
log("Cannot proceed: Domain data is missing or could not be loaded.", "ERROR")
log("Script finished.")
return
any_updates_made = False
for site_name_key in list(all_domains_data.keys()):
if update_site_entry(site_name_key, all_domains_data):
any_updates_made = True
print("\n")
if any_updates_made:
save_json_data(JSON_FILE_PATH, all_domains_data)
log("Update complete. Some entries were modified.", "SUCCESS")
else:
log("Update complete. No domains were modified.", "INFO")
log("Script finished.")
if __name__ == "__main__":
main()

62
.github/.domain/domains.json vendored Normal file
View File

@ -0,0 +1,62 @@
{
"1337xx": {
"domain": "to",
"full_url": "https://www.1337xx.to/",
"old_domain": "to",
"time_change": "2025-03-19 12:20:19"
},
"cb01new": {
"domain": "life",
"full_url": "https://cb01net.life/",
"old_domain": "download",
"time_change": "2025-06-01 01:02:16"
},
"animeunity": {
"domain": "so",
"full_url": "https://www.animeunity.so/",
"old_domain": "so",
"time_change": "2025-03-19 12:20:23"
},
"animeworld": {
"domain": "ac",
"full_url": "https://www.animeworld.ac/",
"old_domain": "ac",
"time_change": "2025-03-21 12:20:27"
},
"guardaserie": {
"domain": "meme",
"full_url": "https://guardaserie.meme/",
"old_domain": "meme",
"time_change": "2025-03-19 12:20:24"
},
"ddlstreamitaly": {
"domain": "co",
"full_url": "https://ddlstreamitaly.co/",
"old_domain": "co",
"time_change": "2025-03-19 12:20:26"
},
"streamingwatch": {
"domain": "org",
"full_url": "https://www.streamingwatch.org/",
"old_domain": "org",
"time_change": "2025-04-29 12:30:30"
},
"altadefinizione": {
"domain": "spa",
"full_url": "https://altadefinizione.spa/",
"old_domain": "locker",
"time_change": "2025-05-26 23:22:45"
},
"streamingcommunity": {
"domain": "bid",
"full_url": "https://streamingunity.bid/",
"old_domain": "bio",
"time_change": "2025-06-03 15:27:02"
},
"altadefinizionegratis": {
"domain": "cc",
"full_url": "https://altadefinizionegratis.cc/",
"old_domain": "icu",
"time_change": "2025-06-02 10:35:25"
}
}

View File

@ -38,14 +38,11 @@ body {
flex-direction: column;
}
header {
background-color: var(--header-bg);
backdrop-filter: blur(10px);
position: fixed;
width: 100%;
padding: 15px 0;
z-index: 1000;
box-shadow: 0 2px 12px var(--shadow-color);
.container {
max-width: 1400px;
margin: 0 auto;
padding: 20px;
flex: 1;
}
.header-container {
@ -88,13 +85,6 @@ header {
font-size: 1.1rem;
}
.container {
max-width: 1400px;
margin: 0 auto;
padding: 20px;
flex: 1;
}
.site-grid {
display: grid;
grid-template-columns: repeat(auto-fill, minmax(300px, 1fr));
@ -166,78 +156,6 @@ header {
color: var(--accent-color);
}
.site-content {
text-align: center;
width: 100%;
}
.domain {
color: var(--text-color);
opacity: 0.8;
font-size: 0.9rem;
margin-bottom: 1.5rem;
word-break: break-all;
}
.site-item a {
margin-top: 1rem;
background: linear-gradient(135deg, var(--primary-color), var(--secondary-color));
color: white;
text-decoration: none;
font-weight: 500;
padding: 12px 28px;
border-radius: 8px;
width: fit-content;
margin: 0 auto;
display: flex;
align-items: center;
gap: 8px;
}
.site-item a:hover {
opacity: 0.9;
transform: translateY(-2px);
}
.site-title {
opacity: 0;
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
background: rgba(0, 0, 0, 0.8);
padding: 10px 20px;
border-radius: 8px;
transition: opacity 0.3s ease;
color: white;
font-size: 1.2rem;
text-align: center;
width: 80%;
pointer-events: none;
z-index: 2;
}
.site-item:hover .site-title {
opacity: 1;
}
.site-item::after {
content: '';
position: absolute;
top: 0;
left: 0;
right: 0;
bottom: 0;
background: rgba(0, 0, 0, 0.5);
opacity: 0;
transition: opacity 0.3s ease;
pointer-events: none;
}
.site-item:hover::after {
opacity: 1;
}
.site-info {
display: flex;
flex-direction: column;
@ -264,6 +182,211 @@ header {
opacity: 1;
}
.site-status {
position: absolute;
top: 10px;
right: 10px;
width: 12px;
height: 12px;
border-radius: 50%;
background: #4CAF50;
}
.site-status.offline {
background: #f44336;
}
.status-indicator {
position: fixed;
top: 20px;
right: 20px;
background: var(--card-background);
border: 1px solid var(--border-color);
border-radius: 12px;
padding: 15px 20px;
box-shadow: 0 4px 20px var(--shadow-color);
z-index: 1001;
min-width: 280px;
max-width: 400px;
transition: all 0.3s ease;
}
.status-indicator.hidden {
opacity: 0;
transform: translateY(-20px);
pointer-events: none;
}
.status-header {
display: flex;
align-items: center;
gap: 10px;
margin-bottom: 15px;
font-weight: 600;
color: var(--primary-color);
}
.status-icon {
width: 20px;
height: 20px;
border: 2px solid var(--primary-color);
border-radius: 50%;
border-top-color: transparent;
animation: spin 1s linear infinite;
}
.status-icon.ready {
border: none;
background: #4CAF50;
animation: none;
position: relative;
}
.status-icon.ready::after {
content: '✓';
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
color: white;
font-size: 12px;
font-weight: bold;
}
@keyframes spin {
0% { transform: rotate(0deg); }
100% { transform: rotate(360deg); }
}
.status-text {
color: var(--text-color);
font-size: 0.9rem;
margin-bottom: 10px;
}
.checking-sites {
max-height: 200px;
overflow-y: auto;
background: var(--background-color);
border-radius: 8px;
padding: 10px;
border: 1px solid var(--border-color);
}
.checking-site {
display: flex;
align-items: center;
justify-content: between;
gap: 10px;
padding: 6px 8px;
margin-bottom: 4px;
border-radius: 6px;
background: var(--card-background);
font-size: 0.8rem;
color: var(--text-color);
transition: all 0.2s ease;
}
.checking-site.completed {
opacity: 0.6;
background: var(--card-hover);
}
.checking-site.online {
border-left: 3px solid #4CAF50;
}
.checking-site.offline {
border-left: 3px solid #f44336;
}
.checking-site .site-name {
flex: 1;
font-weight: 500;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
.checking-site .site-status-icon {
width: 12px;
height: 12px;
border-radius: 50%;
flex-shrink: 0;
}
.checking-site .site-status-icon.checking {
background: var(--primary-color);
animation: pulse 1s infinite;
}
.checking-site .site-status-icon.online {
background: #4CAF50;
}
.checking-site .site-status-icon.offline {
background: #f44336;
}
@keyframes pulse {
0%, 100% { opacity: 1; }
50% { opacity: 0.5; }
}
.progress-bar {
width: 100%;
height: 6px;
background: var(--background-color);
border-radius: 3px;
overflow: hidden;
margin-top: 10px;
}
.progress-fill {
height: 100%;
background: linear-gradient(90deg, var(--primary-color), var(--accent-color));
width: 0%;
transition: width 0.3s ease;
border-radius: 3px;
}
.loader {
width: 48px;
height: 48px;
border: 3px solid var(--primary-color);
border-bottom-color: transparent;
border-radius: 50%;
display: inline-block;
position: relative;
box-sizing: border-box;
animation: rotation 1s linear infinite;
}
.loader::after {
content: '';
position: absolute;
box-sizing: border-box;
left: 0;
top: 0;
width: 48px;
height: 48px;
border-radius: 50%;
border: 3px solid transparent;
border-bottom-color: var(--accent-color);
animation: rotationBack 0.5s linear infinite;
transform: rotate(45deg);
}
@keyframes rotation {
0% { transform: rotate(0deg) }
100% { transform: rotate(360deg) }
}
@keyframes rotationBack {
0% { transform: rotate(0deg) }
100% { transform: rotate(-360deg) }
}
footer {
background: var(--card-background);
border-top: 1px solid var(--border-color);
@ -355,26 +478,6 @@ footer {
transform: scale(1.2);
}
.github-stats {
display: flex;
gap: 10px;
margin-top: 10px;
font-size: 0.8rem;
}
.github-badge {
background-color: var(--background-color);
padding: 4px 8px;
border-radius: 4px;
display: flex;
align-items: center;
gap: 4px;
}
.github-badge i {
color: var(--accent-color);
}
.footer-description {
margin-top: 15px;
font-size: 0.9rem;
@ -383,103 +486,13 @@ footer {
line-height: 1.5;
}
.update-info {
text-align: center;
margin-top: 30px;
padding-top: 30px;
border-top: 1px solid var(--border-color);
}
.update-note {
color: var(--accent-color);
font-size: 0.9rem;
opacity: 0.9;
}
.theme-toggle {
position: relative;
top: unset;
right: unset;
z-index: 1;
}
.theme-toggle input {
display: none;
}
.theme-toggle label {
cursor: pointer;
padding: 8px;
background: var(--background-color);
border-radius: 50%;
display: flex;
align-items: center;
justify-content: center;
box-shadow: 0 0 10px var(--shadow-color);
border: 1px solid var(--border-color);
transition: all 0.3s ease;
}
.theme-toggle label:hover {
border-color: var(--primary-color);
transform: translateY(-2px);
}
.theme-toggle .fa-sun {
display: none;
color: #ffd700;
}
.theme-toggle .fa-moon {
color: #8c52ff;
}
.theme-toggle input:checked ~ label .fa-sun {
display: block;
}
.theme-toggle input:checked ~ label .fa-moon {
display: none;
}
.loader {
width: 48px;
height: 48px;
border: 3px solid var(--primary-color);
border-bottom-color: transparent;
border-radius: 50%;
display: inline-block;
position: relative;
box-sizing: border-box;
animation: rotation 1s linear infinite;
}
.loader::after {
content: '';
position: absolute;
box-sizing: border-box;
left: 0;
top: 0;
width: 48px;
height: 48px;
border-radius: 50%;
border: 3px solid transparent;
border-bottom-color: var(--accent-color);
animation: rotationBack 0.5s linear infinite;
transform: rotate(45deg);
}
@keyframes rotation {
0% { transform: rotate(0deg) }
100% { transform: rotate(360deg) }
}
@keyframes rotationBack {
0% { transform: rotate(0deg) }
100% { transform: rotate(-360deg) }
}
/* Improved Responsiveness */
/* Responsiveness */
@media (max-width: 768px) {
.site-grid {
grid-template-columns: repeat(auto-fill, minmax(250px, 1fr));
@ -496,11 +509,7 @@ footer {
grid-template-columns: 1fr;
gap: 20px;
padding: 15px;
}
.theme-toggle {
top: 10px;
right: 10px;
text-align: center;
}
.header-container {
@ -517,27 +526,6 @@ footer {
width: 100%;
justify-content: center;
}
}
@media (max-width: 480px) {
.site-grid {
grid-template-columns: 1fr;
}
.site-item {
min-height: 220px;
}
.container {
padding: 10px;
}
}
@media (max-width: 768px) {
.footer-content {
grid-template-columns: 1fr;
text-align: center;
}
.footer-title::after {
left: 50%;
@ -557,83 +545,16 @@ footer {
}
}
.time-change {
color: var(--text-color);
opacity: 0.7;
font-size: 0.85rem;
margin-bottom: 0.5rem;
word-break: break-all;
}
@media (max-width: 480px) {
.site-grid {
grid-template-columns: 1fr;
}
.label {
color: var(--accent-color);
font-weight: 500;
}
.controls-container {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 20px;
padding: 15px 20px;
background: var(--card-background);
border-radius: 12px;
border: 1px solid var(--border-color);
}
.grid-controls {
display: flex;
align-items: center;
gap: 10px;
}
.grid-controls label {
color: var(--text-color);
font-weight: 500;
}
.grid-controls select {
padding: 8px 12px;
border-radius: 8px;
border: 1px solid var(--border-color);
background: var(--background-color);
color: var(--text-color);
cursor: pointer;
transition: all 0.3s ease;
}
.grid-controls select:hover {
border-color: var(--primary-color);
}
.sites-stats {
display: flex;
gap: 20px;
align-items: center;
}
.total-sites, .last-update-global {
display: flex;
align-items: center;
gap: 8px;
color: var(--text-color);
font-size: 0.9rem;
}
.total-sites i, .last-update-global i {
color: var(--primary-color);
}
.site-status {
position: absolute;
top: 10px;
right: 10px;
width: 12px;
height: 12px;
border-radius: 50%;
background: #4CAF50;
}
.site-status.offline {
background: #f44336;
.site-item {
min-height: 220px;
}
.container {
padding: 10px;
}
}

View File

@ -1,32 +1,82 @@
document.documentElement.setAttribute('data-theme', 'dark');
function initGridControls() {
const gridSize = document.getElementById('grid-size');
const siteGrid = document.querySelector('.site-grid');
gridSize.addEventListener('change', function() {
switch(this.value) {
case 'small':
siteGrid.style.gridTemplateColumns = 'repeat(auto-fill, minmax(200px, 1fr))';
break;
case 'medium':
siteGrid.style.gridTemplateColumns = 'repeat(auto-fill, minmax(300px, 1fr))';
break;
case 'large':
siteGrid.style.gridTemplateColumns = 'repeat(auto-fill, minmax(400px, 1fr))';
break;
}
localStorage.setItem('preferredGridSize', this.value);
});
let statusIndicator = null;
let checkingSites = new Map();
let totalSites = 0;
let completedSites = 0;
const savedSize = localStorage.getItem('preferredGridSize');
if (savedSize) {
gridSize.value = savedSize;
gridSize.dispatchEvent(new Event('change'));
function createStatusIndicator() {
statusIndicator = document.createElement('div');
statusIndicator.className = 'status-indicator';
statusIndicator.innerHTML = `
<div class="status-header">
<div class="status-icon"></div>
<span class="status-title">Loading Sites...</span>
</div>
<div class="status-text">Initializing site checks...</div>
<div class="progress-bar">
<div class="progress-fill"></div>
</div>
<div class="checking-sites"></div>
`;
document.body.appendChild(statusIndicator);
return statusIndicator;
}
function updateStatusIndicator(status, text, progress = 0) {
if (!statusIndicator) return;
const statusIcon = statusIndicator.querySelector('.status-icon');
const statusTitle = statusIndicator.querySelector('.status-title');
const statusText = statusIndicator.querySelector('.status-text');
const progressFill = statusIndicator.querySelector('.progress-fill');
statusTitle.textContent = status;
statusText.textContent = text;
progressFill.style.width = `${progress}%`;
if (status === 'Ready') {
statusIcon.classList.add('ready');
setTimeout(() => {
statusIndicator.classList.add('hidden');
setTimeout(() => statusIndicator.remove(), 300);
}, 2000);
}
}
async function checkSiteStatus(url) {
function addSiteToCheck(siteName, siteUrl) {
if (!statusIndicator) return;
const checkingSitesContainer = statusIndicator.querySelector('.checking-sites');
const siteElement = document.createElement('div');
siteElement.className = 'checking-site';
siteElement.innerHTML = `
<span class="site-name">${siteName}</span>
<div class="site-status-icon checking"></div>
`;
checkingSitesContainer.appendChild(siteElement);
checkingSites.set(siteName, siteElement);
}
function updateSiteStatus(siteName, isOnline) {
const siteElement = checkingSites.get(siteName);
if (!siteElement) return;
const statusIcon = siteElement.querySelector('.site-status-icon');
statusIcon.classList.remove('checking');
statusIcon.classList.add(isOnline ? 'online' : 'offline');
siteElement.classList.add('completed', isOnline ? 'online' : 'offline');
completedSites++;
const progress = (completedSites / totalSites) * 100;
updateStatusIndicator(
'Checking Sites...',
`Checked ${completedSites}/${totalSites} sites`,
progress
);
}
async function checkSiteStatus(url, siteName) {
try {
console.log(`Checking status for: ${url}`);
const controller = new AbortController();
@ -46,66 +96,75 @@ async function checkSiteStatus(url) {
const isOnline = response.type === 'opaque';
console.log(`Site ${url} is ${isOnline ? 'online' : 'offline'} (Type: ${response.type})`);
if (siteName) {
updateSiteStatus(siteName, isOnline);
}
return isOnline;
} catch (error) {
console.log(`Error checking ${url}:`, error.message);
if (siteName) {
updateSiteStatus(siteName, false);
}
return false;
}
}
const supabaseUrl = 'https://zvfngpoxwrgswnzytadh.supabase.co';
const supabaseKey = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6Inp2Zm5ncG94d3Jnc3duenl0YWRoIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NDAxNTIxNjMsImV4cCI6MjA1NTcyODE2M30.FNTCCMwi0QaKjOu8gtZsT5yQttUW8QiDDGXmzkn89QE';
const domainsJsonUrl = 'https://raw.githubusercontent.com/Arrowar/StreamingCommunity/refs/heads/main/.github/.domain/domains.json';
async function loadSiteData() {
try {
console.log('Starting to load site data...');
console.log('Starting to load site data from GitHub...');
createStatusIndicator();
updateStatusIndicator('Loading...', 'Fetching site data from GitHub repository...', 0);
const siteList = document.getElementById('site-list');
siteList.innerHTML = '<div class="loader"></div>';
const headers = {
'accept': '*/*',
'accept-language': 'it-IT,it;q=0.9,en-US;q=0.8,en;q=0.7',
'apikey': supabaseKey,
'authorization': `Bearer ${supabaseKey}`,
'content-type': 'application/json',
'cache-control': 'no-cache',
'pragma': 'no-cache',
'range': '0-9'
};
console.log('Fetching from Supabase with headers:', headers);
const response = await fetch(`${supabaseUrl}/rest/v1/public?select=*`, {
method: 'GET',
headers: headers
});
console.log(`Fetching from GitHub: ${domainsJsonUrl}`);
const response = await fetch(domainsJsonUrl);
if (!response.ok) throw new Error(`HTTP error! Status: ${response.status}`);
const data = await response.json();
const configSite = await response.json(); // Directly get the site data object
siteList.innerHTML = ''; if (data && data.length > 0) {
console.log('Raw data from Supabase:', data);
const configSite = data[0].data;
console.log('Parsed config site:', configSite);
let totalSites = Object.keys(configSite).length;
siteList.innerHTML = '';
if (configSite && Object.keys(configSite).length > 0) { // Check if configSite is a non-empty object
totalSites = Object.keys(configSite).length;
completedSites = 0;
let latestUpdate = new Date(0);
document.getElementById('sites-count').textContent = totalSites;
updateStatusIndicator('Checking Sites...', `Starting checks for ${totalSites} sites...`, 0);
Object.entries(configSite).forEach(([siteName, site]) => {
addSiteToCheck(siteName, site.full_url);
});
for (const siteName in configSite) {
const site = configSite[siteName];
const statusChecks = Object.entries(configSite).map(async ([siteName, site]) => {
const isOnline = await checkSiteStatus(site.full_url, siteName);
return { siteName, site, isOnline };
});
const results = await Promise.all(statusChecks);
updateStatusIndicator('Ready', 'All sites checked successfully!', 100);
results.forEach(({ siteName, site, isOnline }) => {
const siteItem = document.createElement('div');
siteItem.className = 'site-item';
siteItem.style.cursor = 'pointer';
// Add status indicator
const statusDot = document.createElement('div');
statusDot.className = 'site-status';
const isOnline = await checkSiteStatus(site.full_url);
if (!isOnline) statusDot.classList.add('offline');
siteItem.appendChild(statusDot);
// Update latest update time
const updateTime = new Date(site.time_change);
if (updateTime > latestUpdate) {
latestUpdate = updateTime;
@ -133,7 +192,9 @@ async function loadSiteData() {
oldDomain.className = 'old-domain';
oldDomain.innerHTML = `<i class="fas fa-history"></i> ${site.old_domain}`;
siteInfo.appendChild(oldDomain);
} siteItem.addEventListener('click', function() {
}
siteItem.addEventListener('click', function() {
window.open(site.full_url, '_blank', 'noopener,noreferrer');
});
@ -150,7 +211,7 @@ async function loadSiteData() {
siteItem.appendChild(siteTitle);
siteItem.appendChild(siteInfo);
siteList.appendChild(siteItem);
}
});
const formattedDate = latestUpdate.toLocaleDateString('it-IT', {
year: 'numeric',
@ -162,6 +223,7 @@ async function loadSiteData() {
document.getElementById('last-update-time').textContent = formattedDate;
} else {
siteList.innerHTML = '<div class="no-sites">No sites available</div>';
updateStatusIndicator('Ready', 'No sites found in the JSON file.', 100);
}
} catch (error) {
console.error('Errore:', error);
@ -171,6 +233,10 @@ async function loadSiteData() {
<button onclick="loadSiteData()" class="retry-button">Riprova</button>
</div>
`;
if (statusIndicator) {
updateStatusIndicator('Error', `Failed to load: ${error.message}`, 0);
statusIndicator.querySelector('.status-icon').style.background = '#f44336';
}
}
}

50
.github/workflows/update_domain.yml vendored Normal file
View File

@ -0,0 +1,50 @@
name: Update domains
on:
schedule:
- cron: "0 7-21 * * *"
workflow_dispatch:
jobs:
update-domains:
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: '3.12'
- name: Install dependencies
run: |
pip install httpx tldextract ua-generator dnspython
pip install --upgrade pip setuptools wheel
- name: Configure DNS
run: |
sudo sh -c 'echo "nameserver 9.9.9.9" > /etc/resolv.conf'
cat /etc/resolv.conf
- name: Execute domain update script
run: python .github/.domain/domain_update.py
- name: Commit and push changes (if any)
run: |
git config --global user.name 'github-actions[bot]'
git config --global user.email 'github-actions[bot]@users.noreply.github.com'
# Check if domains.json was modified
if ! git diff --quiet .github/.domain/domains.json; then
git add .github/.domain/domains.json
git commit -m "Automatic domain update [skip ci]"
echo "Changes committed. Attempting to push..."
git push
else
echo "No changes to .github/.domain/domains.json to commit."
fi

1
.gitignore vendored
View File

@ -52,5 +52,4 @@ cmd.txt
bot_config.json
scripts.json
active_requests.json
domains.json
working_proxies.json

View File

@ -1,5 +1,5 @@
<p align="center">
<img src="https://i.ibb.co/v6RnT0wY/s2.jpg" alt="Project Logo" width="600"/>
<img src="https://i.ibb.co/v6RnT0wY/s2.jpg" alt="Project Logo" width="450"/>
</p>
<p align="center">

View File

@ -5,9 +5,9 @@ import logging
# External libraries
import httpx
import jsbeautifier
from bs4 import BeautifulSoup
from curl_cffi import requests
# Internal utilities
@ -28,7 +28,6 @@ class VideoSource:
- url (str): The URL of the video source.
"""
self.headers = get_headers()
self.client = httpx.Client()
self.url = url
def make_request(self, url: str) -> str:
@ -42,8 +41,10 @@ class VideoSource:
- str: The response content if successful, None otherwise.
"""
try:
response = self.client.get(url, headers=self.headers, timeout=MAX_TIMEOUT, follow_redirects=True)
response.raise_for_status()
response = requests.get(url, headers=self.headers, timeout=MAX_TIMEOUT, impersonate="chrome110")
if response.status_code >= 400:
logging.error(f"Request failed with status code: {response.status_code}")
return None
return response.text
except Exception as e:

View File

@ -39,6 +39,7 @@ class VideoSource:
self.is_series = is_series
self.media_id = media_id
self.iframe_src = None
self.window_parameter = None
def get_iframe(self, episode_id: int) -> None:
"""
@ -109,41 +110,45 @@ class VideoSource:
# Parse script to get video information
self.parse_script(script_text=script)
except httpx.HTTPStatusError as e:
if e.response.status_code == 404:
console.print("[yellow]This content will be available soon![/yellow]")
return
logging.error(f"Error getting content: {e}")
raise
except Exception as e:
logging.error(f"Error getting content: {e}")
raise
def get_playlist(self) -> str:
def get_playlist(self) -> str | None:
"""
Generate authenticated playlist URL.
Returns:
str: Fully constructed playlist URL with authentication parameters
str | None: Fully constructed playlist URL with authentication parameters, or None if content unavailable
"""
if not self.window_parameter:
return None
params = {}
# Add 'h' parameter if video quality is 1080p
if self.canPlayFHD:
params['h'] = 1
# Parse the original URL
parsed_url = urlparse(self.window_parameter.url)
query_params = parse_qs(parsed_url.query)
# Check specifically for 'b=1' in the query parameters
if 'b' in query_params and query_params['b'] == ['1']:
params['b'] = 1
# Add authentication parameters (token and expiration)
params.update({
"token": self.window_parameter.token,
"expires": self.window_parameter.expires
})
# Build the updated query string
query_string = urlencode(params)
# Construct the new URL with updated query parameters
return urlunparse(parsed_url._replace(query=query_string))

View File

@ -61,16 +61,22 @@ def download_film(select_title: MediaItem) -> str:
# Extract mostraguarda URL
try:
response = httpx.get(select_title.url, headers=get_headers(), timeout=10)
response.raise_for_status()
soup = BeautifulSoup(response.text, 'html.parser')
iframes = soup.find_all('iframe')
mostraguarda = iframes[0]['src']
except Exception as e:
console.print(f"[red]Site: {site_constant.SITE_NAME}, request error: {e}, get mostraguarda")
return None
# Extract supervideo URL
supervideo_url = None
try:
response = httpx.get(mostraguarda, headers=get_headers(), timeout=10)
response.raise_for_status()
soup = BeautifulSoup(response.text, 'html.parser')
pattern = r'//supervideo\.[^/]+/[a-z]/[a-zA-Z0-9]+'
supervideo_match = re.search(pattern, response.text)
@ -78,7 +84,9 @@ def download_film(select_title: MediaItem) -> str:
except Exception as e:
console.print(f"[red]Site: {site_constant.SITE_NAME}, request error: {e}, get supervideo URL")
console.print("[yellow]This content will be available soon![/yellow]")
return None
# Init class
video_source = VideoSource(supervideo_url)
master_playlist = video_source.get_playlist()

View File

@ -38,38 +38,52 @@ class GetSerieInfo:
soup = BeautifulSoup(response.text, "html.parser")
self.series_name = soup.find("title").get_text(strip=True).split(" - ")[0]
# Process all seasons
season_items = soup.find_all('div', class_='accordion-item')
for season_idx, season_item in enumerate(season_items, 1):
season_header = season_item.find('div', class_='accordion-header')
if not season_header:
continue
season_name = season_header.get_text(strip=True)
# Find all season dropdowns
seasons_dropdown = soup.find('div', class_='dropdown seasons')
if not seasons_dropdown:
return
# Get all season items
season_items = seasons_dropdown.find_all('span', {'data-season': True})
for season_item in season_items:
season_num = int(season_item['data-season'])
season_name = season_item.get_text(strip=True)
# Create a new season and get a reference to it
# Create a new season
current_season = self.seasons_manager.add_season({
'number': season_idx,
'number': season_num,
'name': season_name
})
# Find episodes for this season
episode_divs = season_item.find_all('div', class_='down-episode')
for ep_idx, ep_div in enumerate(episode_divs, 1):
episode_name_tag = ep_div.find('b')
if not episode_name_tag:
# Find all episodes for this season
episodes_container = soup.find('div', {'class': 'dropdown mirrors', 'data-season': str(season_num)})
if not episodes_container:
continue
# Get all episode mirrors for this season
episode_mirrors = soup.find_all('div', {'class': 'dropdown mirrors',
'data-season': str(season_num)})
for mirror in episode_mirrors:
episode_data = mirror.get('data-episode', '').split('-')
if len(episode_data) != 2:
continue
episode_name = episode_name_tag.get_text(strip=True)
link_tag = ep_div.find('a', string=lambda text: text and "Supervideo" in text)
episode_url = link_tag['href'] if link_tag else None
ep_num = int(episode_data[1])
# Find supervideo link
supervideo_span = mirror.find('span', {'data-id': 'supervideo'})
if not supervideo_span:
continue
episode_url = supervideo_span.get('data-link', '')
# Add episode to the season
if current_season:
current_season.episodes.add({
'number': ep_idx,
'name': episode_name,
'number': ep_num,
'name': f"Episodio {ep_num}",
'url': episode_url
})

View File

@ -31,7 +31,8 @@ class ScrapSerie:
self.client = httpx.Client(
cookies={"sessionId": self.session_id},
headers={"User-Agent": get_userAgent(), "csrf-token": self.csrf_token},
base_url=full_url
base_url=full_url,
verify=False
)
try:

View File

@ -21,7 +21,7 @@ from .film import download_film
# Variable
indice = 5
_useFor = "Film_&_Serie"
_priority = 1 # NOTE: Site search need the use of tmbd obj
_priority = 0
_engineDownload = "hls"
_deprecate = False

View File

@ -1,9 +1,5 @@
# 21.05.24
import threading
import queue
# External libraries
import httpx
from rich.console import Console
@ -13,12 +9,9 @@ from rich.console import Console
from StreamingCommunity.Util.config_json import config_manager
from StreamingCommunity.Util.headers import get_userAgent
from StreamingCommunity.Util.table import TVShowManager
from StreamingCommunity.Lib.TMBD.tmdb import tmdb
# Logic class
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaManager
from .util.ScrapeSerie import GetSerieInfo
# Variable
@ -26,76 +19,33 @@ console = Console()
media_search_manager = MediaManager()
table_show_manager = TVShowManager()
max_timeout = config_manager.get_int("REQUESTS", "timeout")
MAX_THREADS = 12
def determine_media_type(title):
def determine_media_type(item):
"""
Use TMDB to determine if a title is a movie or TV show.
Determine if the item is a film or TV series by checking actual seasons count
using GetSerieInfo.
"""
try:
# First search as a movie
movie_results = tmdb._make_request("search/movie", {"query": title})
movie_count = len(movie_results.get("results", []))
# Then search as a TV show
tv_results = tmdb._make_request("search/tv", {"query": title})
tv_count = len(tv_results.get("results", []))
# If results found in only one category, use that
if movie_count > 0 and tv_count == 0:
return "film"
elif tv_count > 0 and movie_count == 0:
return "tv"
# If both have results, compare popularity
if movie_count > 0 and tv_count > 0:
top_movie = movie_results["results"][0]
top_tv = tv_results["results"][0]
return "film" if top_movie.get("popularity", 0) > top_tv.get("popularity", 0) else "tv"
# Extract program name from path_id
program_name = None
if item.get('path_id'):
parts = item['path_id'].strip('/').split('/')
if len(parts) >= 2:
program_name = parts[-1].split('.')[0]
return "film"
if not program_name:
return "film"
scraper = GetSerieInfo(program_name)
scraper.collect_info_title()
return "tv" if scraper.getNumberSeason() > 0 else "film"
except Exception as e:
console.log(f"Error determining media type with TMDB: {e}")
console.print(f"[red]Error determining media type: {e}[/red]")
return "film"
def worker_determine_type(work_queue, result_dict, worker_id):
"""
Worker function to process items from queue and determine media types.
Parameters:
- work_queue: Queue containing items to process
- result_dict: Dictionary to store results
- worker_id: ID of the worker thread
"""
while not work_queue.empty():
try:
index, item = work_queue.get(block=False)
title = item.get('titolo', '')
media_type = determine_media_type(title)
result_dict[index] = {
'id': item.get('id', ''),
'name': title,
'type': media_type,
'path_id': item.get('path_id', ''),
'url': f"https://www.raiplay.it{item.get('url', '')}",
'image': f"https://www.raiplay.it{item.get('immagine', '')}",
}
work_queue.task_done()
except queue.Empty:
break
except Exception as e:
console.log(f"Worker {worker_id} error: {e}")
work_queue.task_done()
def title_search(query: str) -> int:
"""
Search for titles based on a search query.
@ -141,33 +91,15 @@ def title_search(query: str) -> int:
data = response.json().get('agg').get('titoli').get('cards')
data = data[:15] if len(data) > 15 else data
# Use multithreading to determine media types in parallel
work_queue = queue.Queue()
result_dict = {}
# Add items to the work queue
for i, item in enumerate(data):
work_queue.put((i, item))
# Create and start worker threads
threads = []
for i in range(min(MAX_THREADS, len(data))):
thread = threading.Thread(
target=worker_determine_type,
args=(work_queue, result_dict, i),
daemon=True
)
threads.append(thread)
thread.start()
# Wait for all threads to complete
for thread in threads:
thread.join()
# Add all results to media manager in correct order
for i in range(len(data)):
if i in result_dict:
media_search_manager.add_media(result_dict[i])
# Process each item and add to media manager
for item in data:
media_search_manager.add_media({
'id': item.get('id', ''),
'name': item.get('titolo', ''),
'type': determine_media_type(item),
'path_id': item.get('path_id', ''),
'url': f"https://www.raiplay.it{item.get('url', '')}",
'image': f"https://www.raiplay.it{item.get('immagine', '')}",
})
# Return the number of titles found
return media_search_manager.get_length()

View File

@ -30,28 +30,48 @@ class GetSerieInfo:
try:
program_url = f"{self.base_url}/programmi/{self.program_name}.json"
response = httpx.get(url=program_url, headers=get_headers(), timeout=max_timeout)
# If 404, content is not yet available
if response.status_code == 404:
logging.info(f"Content not yet available: {self.program_name}")
return
response.raise_for_status()
json_data = response.json()
# Look for seasons in the 'blocks' property
for block in json_data.get('blocks'):
if block.get('type') == 'RaiPlay Multimedia Block' and block.get('name', '').lower() == 'episodi':
self.publishing_block_id = block.get('id')
# Extract seasons from sets array
for season_set in block.get('sets', []):
if 'stagione' in season_set.get('name', '').lower():
self.seasons_manager.add_season({
'id': season_set.get('id', ''),
'number': len(self.seasons_manager.seasons) + 1,
'name': season_set.get('name', ''),
'path': season_set.get('path_id', ''),
'episodes_count': season_set.get('episode_size', {}).get('number', 0)
})
for block in json_data.get('blocks', []):
except Exception as e:
# Check if block is a season block or episodi block
if block.get('type') == 'RaiPlay Multimedia Block':
if block.get('name', '').lower() == 'episodi':
self.publishing_block_id = block.get('id')
# Extract seasons from sets array
for season_set in block.get('sets', []):
if 'stagione' in season_set.get('name', '').lower():
self._add_season(season_set, block.get('id'))
elif 'stagione' in block.get('name', '').lower():
self.publishing_block_id = block.get('id')
# Extract season directly from block's sets
for season_set in block.get('sets', []):
self._add_season(season_set, block.get('id'))
except httpx.HTTPError as e:
logging.error(f"Error collecting series info: {e}")
except Exception as e:
logging.error(f"Unexpected error collecting series info: {e}")
def _add_season(self, season_set: dict, block_id: str):
self.seasons_manager.add_season({
'id': season_set.get('id', ''),
'number': len(self.seasons_manager.seasons) + 1,
'name': season_set.get('name', ''),
'path': season_set.get('path_id', ''),
'episodes_count': season_set.get('episode_size', {}).get('number', 0)
})
def collect_info_season(self, number_season: int) -> None:
"""Get episodes for a specific season."""

View File

@ -121,14 +121,16 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
if site_constant.TELEGRAM_BOT:
bot = get_bot_instance()
# Check proxy if not already set
finder = ProxyFinder(site_constant.FULL_URL)
proxy = finder.find_fast_proxy()
if direct_item:
select_title_obj = MediaItem(**direct_item)
process_search_result(select_title_obj, selections, proxy)
return
# Check proxy if not already set
finder = ProxyFinder(site_constant.FULL_URL)
proxy = finder.find_fast_proxy()
actual_search_query = get_user_input(string_to_search)
@ -142,7 +144,7 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
# Perform search on the database using the obtained query
finder = ProxyFinder(site_constant.FULL_URL)
proxy = finder.find_fast_proxy()
len_database = title_search(string_to_search, proxy)
len_database = title_search(actual_search_query, proxy)
# If only the database object (media_search_manager populated by title_search) is needed
if get_onlyDatabase:

View File

@ -62,6 +62,10 @@ def download_film(select_title: MediaItem, proxy: str = None) -> str:
video_source.get_content()
master_playlist = video_source.get_playlist()
if master_playlist is None:
console.print(f"[red]Site: {site_constant.SITE_NAME}, error: No master playlist found[/red]")
return None
# Define the filename and path for the downloaded film
title_name = os_manager.get_sanitize_file(select_title.name) + ".mp4"
mp4_path = os.path.join(site_constant.MOVIE_FOLDER, title_name.replace(".mp4", ""))

View File

@ -4,6 +4,7 @@ import os
import sys
import time
import asyncio
import importlib.metadata
# External library
import httpx
@ -11,7 +12,7 @@ from rich.console import Console
# Internal utilities
from .version import __version__, __author__, __title__
from .version import __version__ as source_code_version, __author__, __title__
from StreamingCommunity.Util.config_json import config_manager
from StreamingCommunity.Util.headers import get_userAgent
@ -75,7 +76,11 @@ def update():
percentual_stars = 0
# Get the current version (installed version)
current_version = __version__
try:
current_version = importlib.metadata.version(__title__)
except importlib.metadata.PackageNotFoundError:
#console.print(f"[yellow]Warning: Could not determine installed version for '{__title__}' via importlib.metadata. Falling back to source version.[/yellow]")
current_version = source_code_version
# Get commit details
latest_commit = response_commits[0] if response_commits else None

View File

@ -1,5 +1,5 @@
__title__ = 'StreamingCommunity'
__version__ = '3.0.7'
__version__ = '3.0.9'
__author__ = 'Arrowar'
__description__ = 'A command-line program to download film'
__copyright__ = 'Copyright 2024'
__copyright__ = 'Copyright 2025'

View File

@ -39,9 +39,6 @@ class ConfigManager:
# Get the actual path of the module file
current_file_path = os.path.abspath(__file__)
# Navigate upwards to find the project root
# Assuming this file is in a package structure like StreamingCommunity/Util/config_json.py
# We need to go up 2 levels to reach the project root
base_path = os.path.dirname(os.path.dirname(os.path.dirname(current_file_path)))
# Initialize file paths
@ -271,33 +268,32 @@ class ConfigManager:
self._load_site_data_from_file()
def _load_site_data_from_api(self) -> None:
"""Load site data from API."""
"""Load site data from GitHub."""
domains_github_url = "https://raw.githubusercontent.com/Arrowar/StreamingCommunity/refs/heads/main/.github/.domain/domains.json"
headers = {
"apikey": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6Inp2Zm5ncG94d3Jnc3duenl0YWRoIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NDAxNTIxNjMsImV4cCI6MjA1NTcyODE2M30.FNTCCMwi0QaKjOu8gtZsT5yQttUW8QiDDGXmzkn89QE",
"Authorization": f"Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6Inp2Zm5ncG94d3Jnc3duenl0YWRoIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NDAxNTIxNjMsImV4cCI6MjA1NTcyODE2M30.FNTCCMwi0QaKjOu8gtZsT5yQttUW8QiDDGXmzkn89QE",
"Content-Type": "application/json",
"User-Agent": get_userAgent()
"User-Agent": get_userAgent()
}
try:
console.print("[bold cyan]Retrieving site data from API...[/bold cyan]")
response = requests.get("https://zvfngpoxwrgswnzytadh.supabase.co/rest/v1/public", timeout=8, headers=headers)
console.print(f"[bold cyan]Retrieving site data from GitHub:[/bold cyan] [green]{domains_github_url}[/green]")
response = requests.get(domains_github_url, timeout=8, headers=headers)
if response.ok:
data = response.json()
if data and len(data) > 0:
self.configSite = data[0]['data']
site_count = len(self.configSite) if isinstance(self.configSite, dict) else 0
else:
console.print("[bold yellow]API returned an empty data set[/bold yellow]")
self.configSite = response.json()
site_count = len(self.configSite) if isinstance(self.configSite, dict) else 0
console.print(f"[bold green]Site data loaded from GitHub:[/bold green] {site_count} streaming services found.")
else:
console.print(f"[bold red]API request failed:[/bold red] HTTP {response.status_code}, {response.text[:100]}")
console.print(f"[bold red]GitHub request failed:[/bold red] HTTP {response.status_code}, {response.text[:100]}")
self._handle_site_data_fallback()
except json.JSONDecodeError as e:
console.print(f"[bold red]Error parsing JSON from GitHub:[/bold red] {str(e)}")
self._handle_site_data_fallback()
except Exception as e:
console.print(f"[bold red]API connection error:[/bold red] {str(e)}")
console.print(f"[bold red]GitHub connection error:[/bold red] {str(e)}")
self._handle_site_data_fallback()
def _load_site_data_from_file(self) -> None:
@ -562,7 +558,6 @@ class ConfigManager:
return section in config_source
# Helper function to check the platform
def get_use_large_bar():
"""
Determine if the large bar feature should be enabled.

View File

@ -12,7 +12,7 @@ import inspect
import subprocess
import contextlib
import importlib.metadata
import socket
# External library
from unidecode import unidecode
@ -283,43 +283,61 @@ class InternManager():
else:
return f"{bytes / (1024 * 1024):.2f} MB/s"
def check_dns_provider(self):
# def check_dns_provider(self):
# """
# Check if the system's current DNS server matches any known DNS providers.
# Returns:
# bool: True if the current DNS server matches a known provider,
# False if no match is found or in case of errors
# """
# dns_providers = {
# "Cloudflare": ["1.1.1.1", "1.0.0.1"],
# "Google": ["8.8.8.8", "8.8.4.4"],
# "OpenDNS": ["208.67.222.222", "208.67.220.220"],
# "Quad9": ["9.9.9.9", "149.112.112.112"],
# "AdGuard": ["94.140.14.14", "94.140.15.15"],
# "Comodo": ["8.26.56.26", "8.20.247.20"],
# "Level3": ["209.244.0.3", "209.244.0.4"],
# "Norton": ["199.85.126.10", "199.85.127.10"],
# "CleanBrowsing": ["185.228.168.9", "185.228.169.9"],
# "Yandex": ["77.88.8.8", "77.88.8.1"]
# }
# try:
# resolver = dns.resolver.Resolver()
# nameservers = resolver.nameservers
# if not nameservers:
# return False
# for server in nameservers:
# for provider, ips in dns_providers.items():
# if server in ips:
# return True
# return False
# except Exception:
# return False
def check_dns_resolve(self):
"""
Check if the system's current DNS server matches any known DNS providers.
Check if the system's current DNS server can resolve a domain name.
Works on both Windows and Unix-like systems.
Returns:
bool: True if the current DNS server matches a known provider,
False if no match is found or in case of errors
bool: True if the current DNS server can resolve a domain name,
False if can't resolve or in case of errors
"""
dns_providers = {
"Cloudflare": ["1.1.1.1", "1.0.0.1"],
"Google": ["8.8.8.8", "8.8.4.4"],
"OpenDNS": ["208.67.222.222", "208.67.220.220"],
"Quad9": ["9.9.9.9", "149.112.112.112"],
"AdGuard": ["94.140.14.14", "94.140.15.15"],
"Comodo": ["8.26.56.26", "8.20.247.20"],
"Level3": ["209.244.0.3", "209.244.0.4"],
"Norton": ["199.85.126.10", "199.85.127.10"],
"CleanBrowsing": ["185.228.168.9", "185.228.169.9"],
"Yandex": ["77.88.8.8", "77.88.8.1"]
}
test_domains = ["github.com", "google.com", "microsoft.com", "amazon.com"]
try:
resolver = dns.resolver.Resolver()
nameservers = resolver.nameservers
if not nameservers:
return False
for server in nameservers:
for provider, ips in dns_providers.items():
if server in ips:
return True
for domain in test_domains:
# socket.gethostbyname() works consistently across all platforms
socket.gethostbyname(domain)
return True
except (socket.gaierror, socket.error):
return False
except Exception:
return False
class OsSummary:
def __init__(self):

View File

@ -210,7 +210,19 @@ def main(script_id = 0):
log_not = Logger()
initialize()
if not internet_manager.check_dns_provider():
# if not internet_manager.check_dns_provider():
# print()
# console.print("[red]❌ ERROR: DNS configuration is required!")
# console.print("[red]The program cannot function correctly without proper DNS settings.")
# console.print("[yellow]Please configure one of these DNS servers:")
# console.print("[blue]• Cloudflare (1.1.1.1) 'https://developers.cloudflare.com/1.1.1.1/setup/windows/'")
# console.print("[blue]• Quad9 (9.9.9.9) 'https://docs.quad9.net/Setup_Guides/Windows/Windows_10/'")
# console.print("\n[yellow]⚠️ The program will not work until you configure your DNS settings.")
# time.sleep(2)
# msg.ask("[yellow]Press Enter to continue ...")
if not internet_manager.check_dns_resolve():
print()
console.print("[red]❌ ERROR: DNS configuration is required!")
console.print("[red]The program cannot function correctly without proper DNS settings.")
@ -219,8 +231,7 @@ def main(script_id = 0):
console.print("[blue]• Quad9 (9.9.9.9) 'https://docs.quad9.net/Setup_Guides/Windows/Windows_10/'")
console.print("\n[yellow]⚠️ The program will not work until you configure your DNS settings.")
time.sleep(2)
msg.ask("[yellow]Press Enter to continue ...")
os._exit(0)
# Load search functions
search_functions = load_search_functions()

View File

@ -6,6 +6,7 @@ m3u8
certifi
psutil
unidecode
curl_cffi
dnspython
jsbeautifier
pathvalidate
@ -13,3 +14,4 @@ pycryptodomex
ua-generator
qbittorrent-api
pyTelegramBotAPI
beautifulsoup4

View File

@ -1,4 +1,5 @@
import os
import re
from setuptools import setup, find_packages
def read_readme():
@ -8,9 +9,21 @@ def read_readme():
with open(os.path.join(os.path.dirname(__file__), "requirements.txt"), "r", encoding="utf-8-sig") as f:
required_packages = f.read().splitlines()
def get_version():
try:
import pkg_resources
return pkg_resources.get_distribution('StreamingCommunity').version
except:
version_file_path = os.path.join(os.path.dirname(__file__), "StreamingCommunity", "Upload", "version.py")
with open(version_file_path, "r", encoding="utf-8") as f:
version_match = re.search(r"^__version__\s*=\s*['\"]([^'\"]*)['\"]", f.read(), re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string in StreamingCommunity/Upload/version.py.")
setup(
name="StreamingCommunity",
version="3.0.7",
version=get_version(),
long_description=read_readme(),
long_description_content_type="text/markdown",
author="Lovi-0",
@ -29,4 +42,4 @@ setup(
"Bug Reports": "https://github.com/Lovi-0/StreamingCommunity/issues",
"Source": "https://github.com/Lovi-0/StreamingCommunity",
}
)
)