Compare commits

...

24 Commits
v3.0.5 ... main

Author SHA1 Message Date
None
f4529e5f05
Update schedule 2025-06-03 17:30:27 +02:00
github-actions[bot]
dcfd22bc2b Automatic domain update [skip ci] 2025-06-03 15:27:02 +00:00
Lovi
3cbabfb98b core: Fix requirements 2025-06-02 18:14:36 +02:00
None
6efeb96201
Update update_domain.yml 2025-06-02 12:58:38 +02:00
Lovi
d0207b3669 Fix wrong version pip 2025-06-02 11:08:46 +02:00
Lovi
6713de4ecc Bump v3.0.9 2025-06-01 16:31:24 +02:00
github-actions[bot]
b8e28a30c0 Automatic domain update [skip ci] 2025-06-01 01:02:20 +00:00
Alessandro Perazzetta
a45fd0d37e
Dns check (#332)
* refactor: streamline proxy checking in search function

* refactor: update DNS check method, try a real dns resolution instead of checking dns provider

* refactor: enhance DNS resolution check to support multiple domains across platforms

* refactor: replace os.socket with socket for DNS resolution consistency

---------

Co-authored-by: None <62809003+Arrowar@users.noreply.github.com>
2025-05-31 20:07:30 +02:00
github-actions[bot]
4b40b8ce22 Automatic domain update [skip ci] 2025-05-31 12:17:33 +00:00
Alessandro Perazzetta
73cc2662b8
Dns check refactor (#328)
* refactor: streamline proxy checking in search function

* refactor: update DNS check method, try a real dns resolution instead of checking dns provider

* refactor: enhance DNS resolution check to support multiple domains across platforms

* refactor: replace os.socket with socket for DNS resolution consistency

---------

Co-authored-by: None <62809003+Arrowar@users.noreply.github.com>
2025-05-31 11:30:59 +02:00
Lovi
1776538c6c github: Update domains 2025-05-31 11:28:38 +02:00
None
884bcf656c
Create update_domain.yml 2025-05-31 10:59:11 +02:00
Lovi
71e97c2c65 Site: Update endpoint 2025-05-31 10:58:12 +02:00
Lovi
ded66f446e Remove database of domain 2025-05-31 10:52:16 +02:00
Lovi
86c7293779 Bump v3.0.8 2025-05-25 16:59:29 +02:00
Lovi
ef6c8c9cb3 api: Fix tipo raiplay 2025-05-25 15:37:53 +02:00
Alessandro Perazzetta
c01945fdbc
refactor: streamline proxy checking in search function (#326) 2025-05-22 08:36:44 +02:00
Lovi
4f0c58f14d api: fix actual_search_query 2025-05-18 16:31:15 +02:00
Lovi
b3db6aa8c1 Bump v3.0.7 2025-05-18 14:36:55 +02:00
None
1c89398054
Fix telegram and proxy (#322)
* Add ENABLE_VIDEO

* Fix proxy

* Add error proxy

* Update config.json

* Fix telegram_bot (#312)

* Update config.json

* Fix telegram_bot

* fix bug

* Fix StreamingCommunity site

* Delete console.log

* fix doppio string_to_search

* Update __init__.py

* Update site.py

* Update config.json

* Update site.py

* Update config.json

* Update __init__.py

* Update __init__.py

* Fix proxy (#319)

* Add ENABLE_VIDEO

* Fix proxy

* Add error proxy

* Update config.json

* Refactor user input handling and improve messaging in __init__.py

---------

Co-authored-by: None <62809003+Arrowar@users.noreply.github.com>
Co-authored-by: l1n00 <>

* Fix proxy __init__

* Update os.py

---------

Co-authored-by: l1n00 <delmolinonicola@gmail.com>
2025-05-18 14:16:44 +02:00
None
dfcc29078f
Fix proxy (#319)
* Add ENABLE_VIDEO

* Fix proxy

* Add error proxy

* Update config.json
2025-05-17 09:54:41 +02:00
None
c0f3d8619b Bump v3.0.6 2025-05-14 09:36:08 +02:00
None
8e323e83f9
Dev (#318)
* Fix telegram bot (issues #305 bug) (#316)

* fix create config.json

* fix messagge telegram_bot option 0 (Streamingcommunity)

* Update README.md

* Update domain

---------

Co-authored-by: GiuPic <47813665+GiuPic@users.noreply.github.com>
2025-05-14 09:34:30 +02:00
None
e75d8185f9 Site: Fix color map 2025-05-13 12:33:51 +02:00
44 changed files with 1832 additions and 983 deletions

360
.github/.domain/domain_update.py vendored Normal file
View File

@ -0,0 +1,360 @@
# 20.04.2024
import os
import json
from datetime import datetime
from urllib.parse import urlparse, unquote
# External libraries
import httpx
import tldextract
import ua_generator
import dns.resolver
# Variables
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
JSON_FILE_PATH = os.path.join(SCRIPT_DIR, "domains.json")
ua = ua_generator.generate(device='desktop', browser=('chrome', 'edge'))
def get_headers():
return ua.headers.get()
def get_tld(url_str):
try:
parsed = urlparse(unquote(url_str))
domain = parsed.netloc.lower().lstrip('www.')
parts = domain.split('.')
return parts[-1] if len(parts) >= 2 else None
except Exception:
return None
def get_base_domain(url_str):
try:
parsed = urlparse(url_str)
domain = parsed.netloc.lower().lstrip('www.')
parts = domain.split('.')
return '.'.join(parts[:-1]) if len(parts) > 2 else parts[0]
except Exception:
return None
def get_base_url(url_str):
try:
parsed = urlparse(url_str)
return f"{parsed.scheme}://{parsed.netloc}"
except Exception:
return None
def log(msg, level='INFO'):
levels = {
'INFO': '[ ]',
'SUCCESS': '[+]',
'WARNING': '[!]',
'ERROR': '[-]'
}
entry = f"{levels.get(level, '[?]')} {msg}"
print(entry)
def load_json_data(file_path):
if not os.path.exists(file_path):
log(f"Error: The file {file_path} was not found.", "ERROR")
return None
try:
with open(file_path, 'r', encoding='utf-8') as f:
return json.load(f)
except Exception as e:
log(f"Error reading the file {file_path}: {e}", "ERROR")
return None
def save_json_data(file_path, data):
try:
with open(file_path, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2, ensure_ascii=False)
log(f"Data successfully saved to {file_path}", "SUCCESS")
except Exception as e:
log(f"Error saving the file {file_path}: {e}", "ERROR")
def parse_url(url):
if not url.startswith(('http://', 'https://')):
url = 'https://' + url
try:
extracted = tldextract.extract(url)
parsed = urlparse(url)
clean_url = f"{parsed.scheme}://{parsed.netloc}/"
full_domain = f"{extracted.domain}.{extracted.suffix}" if extracted.domain else extracted.suffix
domain_tld = extracted.suffix
result = {
'url': clean_url,
'full_domain': full_domain,
'domain': domain_tld,
'suffix': extracted.suffix,
'subdomain': extracted.subdomain or None
}
return result
except Exception as e:
log(f"Error parsing URL: {e}", "ERROR")
return None
def check_dns_resolution(domain):
try:
resolver = dns.resolver.Resolver()
resolver.timeout = 2
resolver.lifetime = 2
try:
answers = resolver.resolve(domain, 'A')
return str(answers[0])
except:
try:
answers = resolver.resolve(domain, 'AAAA')
return str(answers[0])
except:
pass
return None
except:
return None
def find_new_domain(input_url, output_file=None, verbose=True, json_output=False):
log_buffer = []
original_info = parse_url(input_url)
if not original_info:
log(f"Could not parse original URL: {input_url}", "ERROR")
if json_output:
return {'full_url': input_url, 'domain': None}
return None
log(f"Starting analysis for: {original_info['full_domain']}")
orig_ip = check_dns_resolution(original_info['full_domain'])
if orig_ip:
log(f"Original domain resolves to: {orig_ip}", "SUCCESS")
else:
log(f"Original domain does not resolve to an IP address", "WARNING")
headers = get_headers()
new_domains = []
redirects = []
final_url = None
final_domain_info = None
url_to_test_in_loop = None
for protocol in ['https://', 'http://']:
try:
url_to_test_in_loop = f"{protocol}{original_info['full_domain']}"
log(f"Testing connectivity to {url_to_test_in_loop}")
redirect_chain = []
current_url = url_to_test_in_loop
max_redirects = 10
redirect_count = 0
while redirect_count < max_redirects:
with httpx.Client(verify=False, follow_redirects=False, timeout=5) as client:
response = client.get(current_url, headers=headers)
redirect_info = {'url': current_url, 'status_code': response.status_code}
redirect_chain.append(redirect_info)
log(f"Request to {current_url} - Status: {response.status_code}")
if response.status_code in (301, 302, 303, 307, 308):
if 'location' in response.headers:
next_url = response.headers['location']
if next_url.startswith('/'):
parsed_current = urlparse(current_url)
next_url = f"{parsed_current.scheme}://{parsed_current.netloc}{next_url}"
log(f"Redirect found: {next_url} (Status: {response.status_code})")
current_url = next_url
redirect_count += 1
redirect_domain_info_val = parse_url(next_url)
if redirect_domain_info_val and redirect_domain_info_val['full_domain'] != original_info['full_domain']:
new_domains.append({'domain': redirect_domain_info_val['full_domain'], 'url': next_url, 'source': 'redirect'})
else:
log(f"Redirect status code but no Location header", "WARNING")
break
else:
break
if redirect_chain:
final_url = redirect_chain[-1]['url']
final_domain_info = parse_url(final_url)
redirects.extend(redirect_chain)
log(f"Final URL after redirects: {final_url}", "SUCCESS")
if final_domain_info and final_domain_info['full_domain'] != original_info['full_domain']:
new_domains.append({'domain': final_domain_info['full_domain'], 'url': final_url, 'source': 'final_url'})
final_status = redirect_chain[-1]['status_code'] if redirect_chain else None
if final_status and final_status < 400 and final_status != 403:
break
if final_status == 403 and redirect_chain and len(redirect_chain) > 1:
log(f"Got 403 Forbidden, but captured {len(redirect_chain)-1} redirects before that", "SUCCESS")
break
except httpx.RequestError as e:
log(f"Error connecting to {protocol}{original_info['full_domain']}: {str(e)}", "ERROR")
url_for_auto_redirect = input_url
if url_to_test_in_loop:
url_for_auto_redirect = url_to_test_in_loop
elif original_info and original_info.get('url'):
url_for_auto_redirect = original_info['url']
if not redirects or not new_domains:
log("Trying alternate method with automatic redirect following")
try:
with httpx.Client(verify=False, follow_redirects=True, timeout=5) as client:
response_auto = client.get(url_for_auto_redirect, headers=headers)
log(f"Connected with auto-redirects: Status {response_auto.status_code}")
if response_auto.history:
log(f"Found {len(response_auto.history)} redirects with auto-following", "SUCCESS")
for r_hist in response_auto.history:
redirect_info_auto = {'url': str(r_hist.url), 'status_code': r_hist.status_code}
redirects.append(redirect_info_auto)
log(f"Auto-redirect: {r_hist.url} (Status: {r_hist.status_code})")
final_url = str(response_auto.url)
final_domain_info = parse_url(final_url)
for redirect_hist_item in response_auto.history:
redirect_domain_val = parse_url(str(redirect_hist_item.url))
if redirect_domain_val and original_info and redirect_domain_val['full_domain'] != original_info['full_domain']:
new_domains.append({'domain': redirect_domain_val['full_domain'], 'url': str(redirect_hist_item.url), 'source': 'auto-redirect'})
current_final_url_info = parse_url(str(response_auto.url))
if current_final_url_info and original_info and current_final_url_info['full_domain'] != original_info['full_domain']:
is_already_added = any(d['domain'] == current_final_url_info['full_domain'] and d['source'] == 'auto-redirect' for d in new_domains)
if not is_already_added:
new_domains.append({'domain': current_final_url_info['full_domain'], 'url': str(response_auto.url), 'source': 'final_url_auto'})
final_url = str(response_auto.url)
final_domain_info = current_final_url_info
log(f"Final URL from auto-redirect: {final_url}", "SUCCESS")
except httpx.RequestError as e:
log(f"Error with auto-redirect attempt: {str(e)}", "ERROR")
except NameError:
log(f"Error: URL for auto-redirect attempt was not defined.", "ERROR")
unique_domains = []
seen_domains = set()
for domain_info_item in new_domains:
if domain_info_item['domain'] not in seen_domains:
seen_domains.add(domain_info_item['domain'])
unique_domains.append(domain_info_item)
if not final_url:
final_url = input_url
if not final_domain_info:
final_domain_info = original_info
if final_domain_info:
parsed_final_url_info = parse_url(final_url)
if parsed_final_url_info:
final_url = parsed_final_url_info['url']
final_domain_info = parsed_final_url_info
else:
final_domain_info = original_info
final_url = original_info['url'] if original_info else input_url
results_original_domain = original_info['full_domain'] if original_info else None
results_final_domain_tld = final_domain_info['domain'] if final_domain_info and 'domain' in final_domain_info else None
results = {
'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
'original_url': input_url,
'original_domain': results_original_domain,
'original_ip': orig_ip,
'new_domains': unique_domains,
'redirects': redirects,
'log': log_buffer
}
simplified_json_output = {'full_url': final_url, 'domain': results_final_domain_tld}
if verbose:
log(f"DEBUG - Simplified output: {simplified_json_output}", "INFO")
if output_file:
try:
with open(output_file, 'w', encoding='utf-8') as f:
json.dump(results, f, indent=2, ensure_ascii=False)
log(f"Results saved to {output_file}", "SUCCESS")
except Exception as e:
log(f"Error writing to output file: {str(e)}", "ERROR")
if json_output:
return simplified_json_output
else:
return results
def update_site_entry(site_name: str, all_domains_data: dict):
site_config = all_domains_data.get(site_name, {})
log(f"Processing site: {site_name}", "INFO")
if not site_config.get('full_url'):
log(f"Site {site_name} has no full_url in config. Skipping.", "WARNING")
return False
current_full_url = site_config.get('full_url')
current_domain_tld = site_config.get('domain')
found_domain_info = find_new_domain(current_full_url, verbose=False, json_output=True)
if found_domain_info and found_domain_info.get('full_url') and found_domain_info.get('domain'):
new_full_url = found_domain_info['full_url']
new_domain_tld = found_domain_info['domain']
if new_full_url != current_full_url or new_domain_tld != current_domain_tld:
log(f"Update found for {site_name}: URL '{current_full_url}' -> '{new_full_url}', TLD '{current_domain_tld}' -> '{new_domain_tld}'", "SUCCESS")
updated_entry = site_config.copy()
updated_entry['full_url'] = new_full_url
updated_entry['domain'] = new_domain_tld
if new_domain_tld != current_domain_tld :
updated_entry['old_domain'] = current_domain_tld if current_domain_tld else ""
updated_entry['time_change'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
all_domains_data[site_name] = updated_entry
return True
else:
log(f"No changes detected for {site_name}.", "INFO")
return False
else:
log(f"Could not reliably find new domain info for {site_name} from URL: {current_full_url}. No search fallback.", "WARNING")
return False
def main():
log("Starting domain update script...")
all_domains_data = load_json_data(JSON_FILE_PATH)
if not all_domains_data:
log("Cannot proceed: Domain data is missing or could not be loaded.", "ERROR")
log("Script finished.")
return
any_updates_made = False
for site_name_key in list(all_domains_data.keys()):
if update_site_entry(site_name_key, all_domains_data):
any_updates_made = True
print("\n")
if any_updates_made:
save_json_data(JSON_FILE_PATH, all_domains_data)
log("Update complete. Some entries were modified.", "SUCCESS")
else:
log("Update complete. No domains were modified.", "INFO")
log("Script finished.")
if __name__ == "__main__":
main()

62
.github/.domain/domains.json vendored Normal file
View File

@ -0,0 +1,62 @@
{
"1337xx": {
"domain": "to",
"full_url": "https://www.1337xx.to/",
"old_domain": "to",
"time_change": "2025-03-19 12:20:19"
},
"cb01new": {
"domain": "life",
"full_url": "https://cb01net.life/",
"old_domain": "download",
"time_change": "2025-06-01 01:02:16"
},
"animeunity": {
"domain": "so",
"full_url": "https://www.animeunity.so/",
"old_domain": "so",
"time_change": "2025-03-19 12:20:23"
},
"animeworld": {
"domain": "ac",
"full_url": "https://www.animeworld.ac/",
"old_domain": "ac",
"time_change": "2025-03-21 12:20:27"
},
"guardaserie": {
"domain": "meme",
"full_url": "https://guardaserie.meme/",
"old_domain": "meme",
"time_change": "2025-03-19 12:20:24"
},
"ddlstreamitaly": {
"domain": "co",
"full_url": "https://ddlstreamitaly.co/",
"old_domain": "co",
"time_change": "2025-03-19 12:20:26"
},
"streamingwatch": {
"domain": "org",
"full_url": "https://www.streamingwatch.org/",
"old_domain": "org",
"time_change": "2025-04-29 12:30:30"
},
"altadefinizione": {
"domain": "spa",
"full_url": "https://altadefinizione.spa/",
"old_domain": "locker",
"time_change": "2025-05-26 23:22:45"
},
"streamingcommunity": {
"domain": "bid",
"full_url": "https://streamingunity.bid/",
"old_domain": "bio",
"time_change": "2025-06-03 15:27:02"
},
"altadefinizionegratis": {
"domain": "cc",
"full_url": "https://altadefinizionegratis.cc/",
"old_domain": "icu",
"time_change": "2025-06-02 10:35:25"
}
}

View File

@ -38,14 +38,11 @@ body {
flex-direction: column;
}
header {
background-color: var(--header-bg);
backdrop-filter: blur(10px);
position: fixed;
width: 100%;
padding: 15px 0;
z-index: 1000;
box-shadow: 0 2px 12px var(--shadow-color);
.container {
max-width: 1400px;
margin: 0 auto;
padding: 20px;
flex: 1;
}
.header-container {
@ -88,13 +85,6 @@ header {
font-size: 1.1rem;
}
.container {
max-width: 1400px;
margin: 0 auto;
padding: 20px;
flex: 1;
}
.site-grid {
display: grid;
grid-template-columns: repeat(auto-fill, minmax(300px, 1fr));
@ -166,78 +156,6 @@ header {
color: var(--accent-color);
}
.site-content {
text-align: center;
width: 100%;
}
.domain {
color: var(--text-color);
opacity: 0.8;
font-size: 0.9rem;
margin-bottom: 1.5rem;
word-break: break-all;
}
.site-item a {
margin-top: 1rem;
background: linear-gradient(135deg, var(--primary-color), var(--secondary-color));
color: white;
text-decoration: none;
font-weight: 500;
padding: 12px 28px;
border-radius: 8px;
width: fit-content;
margin: 0 auto;
display: flex;
align-items: center;
gap: 8px;
}
.site-item a:hover {
opacity: 0.9;
transform: translateY(-2px);
}
.site-title {
opacity: 0;
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
background: rgba(0, 0, 0, 0.8);
padding: 10px 20px;
border-radius: 8px;
transition: opacity 0.3s ease;
color: white;
font-size: 1.2rem;
text-align: center;
width: 80%;
pointer-events: none;
z-index: 2;
}
.site-item:hover .site-title {
opacity: 1;
}
.site-item::after {
content: '';
position: absolute;
top: 0;
left: 0;
right: 0;
bottom: 0;
background: rgba(0, 0, 0, 0.5);
opacity: 0;
transition: opacity 0.3s ease;
pointer-events: none;
}
.site-item:hover::after {
opacity: 1;
}
.site-info {
display: flex;
flex-direction: column;
@ -264,6 +182,211 @@ header {
opacity: 1;
}
.site-status {
position: absolute;
top: 10px;
right: 10px;
width: 12px;
height: 12px;
border-radius: 50%;
background: #4CAF50;
}
.site-status.offline {
background: #f44336;
}
.status-indicator {
position: fixed;
top: 20px;
right: 20px;
background: var(--card-background);
border: 1px solid var(--border-color);
border-radius: 12px;
padding: 15px 20px;
box-shadow: 0 4px 20px var(--shadow-color);
z-index: 1001;
min-width: 280px;
max-width: 400px;
transition: all 0.3s ease;
}
.status-indicator.hidden {
opacity: 0;
transform: translateY(-20px);
pointer-events: none;
}
.status-header {
display: flex;
align-items: center;
gap: 10px;
margin-bottom: 15px;
font-weight: 600;
color: var(--primary-color);
}
.status-icon {
width: 20px;
height: 20px;
border: 2px solid var(--primary-color);
border-radius: 50%;
border-top-color: transparent;
animation: spin 1s linear infinite;
}
.status-icon.ready {
border: none;
background: #4CAF50;
animation: none;
position: relative;
}
.status-icon.ready::after {
content: '✓';
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
color: white;
font-size: 12px;
font-weight: bold;
}
@keyframes spin {
0% { transform: rotate(0deg); }
100% { transform: rotate(360deg); }
}
.status-text {
color: var(--text-color);
font-size: 0.9rem;
margin-bottom: 10px;
}
.checking-sites {
max-height: 200px;
overflow-y: auto;
background: var(--background-color);
border-radius: 8px;
padding: 10px;
border: 1px solid var(--border-color);
}
.checking-site {
display: flex;
align-items: center;
justify-content: between;
gap: 10px;
padding: 6px 8px;
margin-bottom: 4px;
border-radius: 6px;
background: var(--card-background);
font-size: 0.8rem;
color: var(--text-color);
transition: all 0.2s ease;
}
.checking-site.completed {
opacity: 0.6;
background: var(--card-hover);
}
.checking-site.online {
border-left: 3px solid #4CAF50;
}
.checking-site.offline {
border-left: 3px solid #f44336;
}
.checking-site .site-name {
flex: 1;
font-weight: 500;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
.checking-site .site-status-icon {
width: 12px;
height: 12px;
border-radius: 50%;
flex-shrink: 0;
}
.checking-site .site-status-icon.checking {
background: var(--primary-color);
animation: pulse 1s infinite;
}
.checking-site .site-status-icon.online {
background: #4CAF50;
}
.checking-site .site-status-icon.offline {
background: #f44336;
}
@keyframes pulse {
0%, 100% { opacity: 1; }
50% { opacity: 0.5; }
}
.progress-bar {
width: 100%;
height: 6px;
background: var(--background-color);
border-radius: 3px;
overflow: hidden;
margin-top: 10px;
}
.progress-fill {
height: 100%;
background: linear-gradient(90deg, var(--primary-color), var(--accent-color));
width: 0%;
transition: width 0.3s ease;
border-radius: 3px;
}
.loader {
width: 48px;
height: 48px;
border: 3px solid var(--primary-color);
border-bottom-color: transparent;
border-radius: 50%;
display: inline-block;
position: relative;
box-sizing: border-box;
animation: rotation 1s linear infinite;
}
.loader::after {
content: '';
position: absolute;
box-sizing: border-box;
left: 0;
top: 0;
width: 48px;
height: 48px;
border-radius: 50%;
border: 3px solid transparent;
border-bottom-color: var(--accent-color);
animation: rotationBack 0.5s linear infinite;
transform: rotate(45deg);
}
@keyframes rotation {
0% { transform: rotate(0deg) }
100% { transform: rotate(360deg) }
}
@keyframes rotationBack {
0% { transform: rotate(0deg) }
100% { transform: rotate(-360deg) }
}
footer {
background: var(--card-background);
border-top: 1px solid var(--border-color);
@ -355,26 +478,6 @@ footer {
transform: scale(1.2);
}
.github-stats {
display: flex;
gap: 10px;
margin-top: 10px;
font-size: 0.8rem;
}
.github-badge {
background-color: var(--background-color);
padding: 4px 8px;
border-radius: 4px;
display: flex;
align-items: center;
gap: 4px;
}
.github-badge i {
color: var(--accent-color);
}
.footer-description {
margin-top: 15px;
font-size: 0.9rem;
@ -383,103 +486,13 @@ footer {
line-height: 1.5;
}
.update-info {
text-align: center;
margin-top: 30px;
padding-top: 30px;
border-top: 1px solid var(--border-color);
}
.update-note {
color: var(--accent-color);
font-size: 0.9rem;
opacity: 0.9;
}
.theme-toggle {
position: relative;
top: unset;
right: unset;
z-index: 1;
}
.theme-toggle input {
display: none;
}
.theme-toggle label {
cursor: pointer;
padding: 8px;
background: var(--background-color);
border-radius: 50%;
display: flex;
align-items: center;
justify-content: center;
box-shadow: 0 0 10px var(--shadow-color);
border: 1px solid var(--border-color);
transition: all 0.3s ease;
}
.theme-toggle label:hover {
border-color: var(--primary-color);
transform: translateY(-2px);
}
.theme-toggle .fa-sun {
display: none;
color: #ffd700;
}
.theme-toggle .fa-moon {
color: #8c52ff;
}
.theme-toggle input:checked ~ label .fa-sun {
display: block;
}
.theme-toggle input:checked ~ label .fa-moon {
display: none;
}
.loader {
width: 48px;
height: 48px;
border: 3px solid var(--primary-color);
border-bottom-color: transparent;
border-radius: 50%;
display: inline-block;
position: relative;
box-sizing: border-box;
animation: rotation 1s linear infinite;
}
.loader::after {
content: '';
position: absolute;
box-sizing: border-box;
left: 0;
top: 0;
width: 48px;
height: 48px;
border-radius: 50%;
border: 3px solid transparent;
border-bottom-color: var(--accent-color);
animation: rotationBack 0.5s linear infinite;
transform: rotate(45deg);
}
@keyframes rotation {
0% { transform: rotate(0deg) }
100% { transform: rotate(360deg) }
}
@keyframes rotationBack {
0% { transform: rotate(0deg) }
100% { transform: rotate(-360deg) }
}
/* Improved Responsiveness */
/* Responsiveness */
@media (max-width: 768px) {
.site-grid {
grid-template-columns: repeat(auto-fill, minmax(250px, 1fr));
@ -496,11 +509,7 @@ footer {
grid-template-columns: 1fr;
gap: 20px;
padding: 15px;
}
.theme-toggle {
top: 10px;
right: 10px;
text-align: center;
}
.header-container {
@ -517,27 +526,6 @@ footer {
width: 100%;
justify-content: center;
}
}
@media (max-width: 480px) {
.site-grid {
grid-template-columns: 1fr;
}
.site-item {
min-height: 220px;
}
.container {
padding: 10px;
}
}
@media (max-width: 768px) {
.footer-content {
grid-template-columns: 1fr;
text-align: center;
}
.footer-title::after {
left: 50%;
@ -557,83 +545,16 @@ footer {
}
}
.time-change {
color: var(--text-color);
opacity: 0.7;
font-size: 0.85rem;
margin-bottom: 0.5rem;
word-break: break-all;
}
@media (max-width: 480px) {
.site-grid {
grid-template-columns: 1fr;
}
.label {
color: var(--accent-color);
font-weight: 500;
}
.controls-container {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 20px;
padding: 15px 20px;
background: var(--card-background);
border-radius: 12px;
border: 1px solid var(--border-color);
}
.grid-controls {
display: flex;
align-items: center;
gap: 10px;
}
.grid-controls label {
color: var(--text-color);
font-weight: 500;
}
.grid-controls select {
padding: 8px 12px;
border-radius: 8px;
border: 1px solid var(--border-color);
background: var(--background-color);
color: var(--text-color);
cursor: pointer;
transition: all 0.3s ease;
}
.grid-controls select:hover {
border-color: var(--primary-color);
}
.sites-stats {
display: flex;
gap: 20px;
align-items: center;
}
.total-sites, .last-update-global {
display: flex;
align-items: center;
gap: 8px;
color: var(--text-color);
font-size: 0.9rem;
}
.total-sites i, .last-update-global i {
color: var(--primary-color);
}
.site-status {
position: absolute;
top: 10px;
right: 10px;
width: 12px;
height: 12px;
border-radius: 50%;
background: #4CAF50;
}
.site-status.offline {
background: #f44336;
.site-item {
min-height: 220px;
}
.container {
padding: 10px;
}
}

View File

@ -1,32 +1,82 @@
document.documentElement.setAttribute('data-theme', 'dark');
function initGridControls() {
const gridSize = document.getElementById('grid-size');
const siteGrid = document.querySelector('.site-grid');
gridSize.addEventListener('change', function() {
switch(this.value) {
case 'small':
siteGrid.style.gridTemplateColumns = 'repeat(auto-fill, minmax(200px, 1fr))';
break;
case 'medium':
siteGrid.style.gridTemplateColumns = 'repeat(auto-fill, minmax(300px, 1fr))';
break;
case 'large':
siteGrid.style.gridTemplateColumns = 'repeat(auto-fill, minmax(400px, 1fr))';
break;
}
localStorage.setItem('preferredGridSize', this.value);
});
let statusIndicator = null;
let checkingSites = new Map();
let totalSites = 0;
let completedSites = 0;
const savedSize = localStorage.getItem('preferredGridSize');
if (savedSize) {
gridSize.value = savedSize;
gridSize.dispatchEvent(new Event('change'));
function createStatusIndicator() {
statusIndicator = document.createElement('div');
statusIndicator.className = 'status-indicator';
statusIndicator.innerHTML = `
<div class="status-header">
<div class="status-icon"></div>
<span class="status-title">Loading Sites...</span>
</div>
<div class="status-text">Initializing site checks...</div>
<div class="progress-bar">
<div class="progress-fill"></div>
</div>
<div class="checking-sites"></div>
`;
document.body.appendChild(statusIndicator);
return statusIndicator;
}
function updateStatusIndicator(status, text, progress = 0) {
if (!statusIndicator) return;
const statusIcon = statusIndicator.querySelector('.status-icon');
const statusTitle = statusIndicator.querySelector('.status-title');
const statusText = statusIndicator.querySelector('.status-text');
const progressFill = statusIndicator.querySelector('.progress-fill');
statusTitle.textContent = status;
statusText.textContent = text;
progressFill.style.width = `${progress}%`;
if (status === 'Ready') {
statusIcon.classList.add('ready');
setTimeout(() => {
statusIndicator.classList.add('hidden');
setTimeout(() => statusIndicator.remove(), 300);
}, 2000);
}
}
async function checkSiteStatus(url) {
function addSiteToCheck(siteName, siteUrl) {
if (!statusIndicator) return;
const checkingSitesContainer = statusIndicator.querySelector('.checking-sites');
const siteElement = document.createElement('div');
siteElement.className = 'checking-site';
siteElement.innerHTML = `
<span class="site-name">${siteName}</span>
<div class="site-status-icon checking"></div>
`;
checkingSitesContainer.appendChild(siteElement);
checkingSites.set(siteName, siteElement);
}
function updateSiteStatus(siteName, isOnline) {
const siteElement = checkingSites.get(siteName);
if (!siteElement) return;
const statusIcon = siteElement.querySelector('.site-status-icon');
statusIcon.classList.remove('checking');
statusIcon.classList.add(isOnline ? 'online' : 'offline');
siteElement.classList.add('completed', isOnline ? 'online' : 'offline');
completedSites++;
const progress = (completedSites / totalSites) * 100;
updateStatusIndicator(
'Checking Sites...',
`Checked ${completedSites}/${totalSites} sites`,
progress
);
}
async function checkSiteStatus(url, siteName) {
try {
console.log(`Checking status for: ${url}`);
const controller = new AbortController();
@ -46,66 +96,75 @@ async function checkSiteStatus(url) {
const isOnline = response.type === 'opaque';
console.log(`Site ${url} is ${isOnline ? 'online' : 'offline'} (Type: ${response.type})`);
if (siteName) {
updateSiteStatus(siteName, isOnline);
}
return isOnline;
} catch (error) {
console.log(`Error checking ${url}:`, error.message);
if (siteName) {
updateSiteStatus(siteName, false);
}
return false;
}
}
const supabaseUrl = 'https://zvfngpoxwrgswnzytadh.supabase.co';
const supabaseKey = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6Inp2Zm5ncG94d3Jnc3duenl0YWRoIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NDAxNTIxNjMsImV4cCI6MjA1NTcyODE2M30.FNTCCMwi0QaKjOu8gtZsT5yQttUW8QiDDGXmzkn89QE';
const domainsJsonUrl = 'https://raw.githubusercontent.com/Arrowar/StreamingCommunity/refs/heads/main/.github/.domain/domains.json';
async function loadSiteData() {
try {
console.log('Starting to load site data...');
console.log('Starting to load site data from GitHub...');
createStatusIndicator();
updateStatusIndicator('Loading...', 'Fetching site data from GitHub repository...', 0);
const siteList = document.getElementById('site-list');
siteList.innerHTML = '<div class="loader"></div>';
const headers = {
'accept': '*/*',
'accept-language': 'it-IT,it;q=0.9,en-US;q=0.8,en;q=0.7',
'apikey': supabaseKey,
'authorization': `Bearer ${supabaseKey}`,
'content-type': 'application/json',
'cache-control': 'no-cache',
'pragma': 'no-cache',
'range': '0-9'
};
console.log('Fetching from Supabase with headers:', headers);
const response = await fetch(`${supabaseUrl}/rest/v1/public?select=*`, {
method: 'GET',
headers: headers
});
console.log(`Fetching from GitHub: ${domainsJsonUrl}`);
const response = await fetch(domainsJsonUrl);
if (!response.ok) throw new Error(`HTTP error! Status: ${response.status}`);
const data = await response.json();
const configSite = await response.json(); // Directly get the site data object
siteList.innerHTML = ''; if (data && data.length > 0) {
console.log('Raw data from Supabase:', data);
const configSite = data[0].data;
console.log('Parsed config site:', configSite);
let totalSites = Object.keys(configSite).length;
siteList.innerHTML = '';
if (configSite && Object.keys(configSite).length > 0) { // Check if configSite is a non-empty object
totalSites = Object.keys(configSite).length;
completedSites = 0;
let latestUpdate = new Date(0);
document.getElementById('sites-count').textContent = totalSites;
updateStatusIndicator('Checking Sites...', `Starting checks for ${totalSites} sites...`, 0);
Object.entries(configSite).forEach(([siteName, site]) => {
addSiteToCheck(siteName, site.full_url);
});
for (const siteName in configSite) {
const site = configSite[siteName];
const statusChecks = Object.entries(configSite).map(async ([siteName, site]) => {
const isOnline = await checkSiteStatus(site.full_url, siteName);
return { siteName, site, isOnline };
});
const results = await Promise.all(statusChecks);
updateStatusIndicator('Ready', 'All sites checked successfully!', 100);
results.forEach(({ siteName, site, isOnline }) => {
const siteItem = document.createElement('div');
siteItem.className = 'site-item';
siteItem.style.cursor = 'pointer';
// Add status indicator
const statusDot = document.createElement('div');
statusDot.className = 'site-status';
const isOnline = await checkSiteStatus(site.full_url);
if (!isOnline) statusDot.classList.add('offline');
siteItem.appendChild(statusDot);
// Update latest update time
const updateTime = new Date(site.time_change);
if (updateTime > latestUpdate) {
latestUpdate = updateTime;
@ -133,7 +192,9 @@ async function loadSiteData() {
oldDomain.className = 'old-domain';
oldDomain.innerHTML = `<i class="fas fa-history"></i> ${site.old_domain}`;
siteInfo.appendChild(oldDomain);
} siteItem.addEventListener('click', function() {
}
siteItem.addEventListener('click', function() {
window.open(site.full_url, '_blank', 'noopener,noreferrer');
});
@ -150,7 +211,7 @@ async function loadSiteData() {
siteItem.appendChild(siteTitle);
siteItem.appendChild(siteInfo);
siteList.appendChild(siteItem);
}
});
const formattedDate = latestUpdate.toLocaleDateString('it-IT', {
year: 'numeric',
@ -162,6 +223,7 @@ async function loadSiteData() {
document.getElementById('last-update-time').textContent = formattedDate;
} else {
siteList.innerHTML = '<div class="no-sites">No sites available</div>';
updateStatusIndicator('Ready', 'No sites found in the JSON file.', 100);
}
} catch (error) {
console.error('Errore:', error);
@ -171,6 +233,10 @@ async function loadSiteData() {
<button onclick="loadSiteData()" class="retry-button">Riprova</button>
</div>
`;
if (statusIndicator) {
updateStatusIndicator('Error', `Failed to load: ${error.message}`, 0);
statusIndicator.querySelector('.status-icon').style.background = '#f44336';
}
}
}

50
.github/workflows/update_domain.yml vendored Normal file
View File

@ -0,0 +1,50 @@
name: Update domains
on:
schedule:
- cron: "0 7-21 * * *"
workflow_dispatch:
jobs:
update-domains:
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: '3.12'
- name: Install dependencies
run: |
pip install httpx tldextract ua-generator dnspython
pip install --upgrade pip setuptools wheel
- name: Configure DNS
run: |
sudo sh -c 'echo "nameserver 9.9.9.9" > /etc/resolv.conf'
cat /etc/resolv.conf
- name: Execute domain update script
run: python .github/.domain/domain_update.py
- name: Commit and push changes (if any)
run: |
git config --global user.name 'github-actions[bot]'
git config --global user.email 'github-actions[bot]@users.noreply.github.com'
# Check if domains.json was modified
if ! git diff --quiet .github/.domain/domains.json; then
git add .github/.domain/domains.json
git commit -m "Automatic domain update [skip ci]"
echo "Changes committed. Attempting to push..."
git push
else
echo "No changes to .github/.domain/domains.json to commit."
fi

1
.gitignore vendored
View File

@ -52,5 +52,4 @@ cmd.txt
bot_config.json
scripts.json
active_requests.json
domains.json
working_proxies.json

View File

@ -1,5 +1,5 @@
<p align="center">
<img src="https://i.ibb.co/v6RnT0wY/s2.jpg" alt="Project Logo" width="600"/>
<img src="https://i.ibb.co/v6RnT0wY/s2.jpg" alt="Project Logo" width="450"/>
</p>
<p align="center">
@ -35,7 +35,7 @@
<summary>📦 Installation</summary>
- 🔄 [Update Domains](#update-domains)
- 🌐 [Available Sites](https://arrowar.github.io/StreamingDirectory/)
- 🌐 [Available Sites](https://arrowar.github.io/StreamingCommunity/)
- 🛠️ [Installation](#installation)
- 📦 [PyPI Installation](#1-pypi-installation)
- 🔄 [Automatic Installation](#2-automatic-installation)
@ -470,7 +470,11 @@ To enable qBittorrent integration, follow the setup guide [here](https://github.
"REQUESTS": {
"verify": false,
"timeout": 20,
"max_retry": 8
"max_retry": 8,
"proxy": {
"http": "http://username:password@host:port",
"https": "https://username:password@host:port"
}
}
}
```
@ -478,6 +482,22 @@ To enable qBittorrent integration, follow the setup guide [here](https://github.
- `verify`: Verifies SSL certificates
- `timeout`: Maximum timeout (in seconds) for each request
- `max_retry`: Number of retry attempts per segment during M3U8 index download
- `proxy`: Proxy configuration for HTTP/HTTPS requests
* Set to empty string `""` to disable proxies (default)
* Example with authentication:
```json
"proxy": {
"http": "http://username:password@host:port",
"https": "https://username:password@host:port"
}
```
* Example without authentication:
```json
"proxy": {
"http": "http://host:port",
"https": "https://host:port"
}
```
</details>
<details>
@ -728,26 +748,24 @@ The `run-container` command mounts also the `config.json` file, so any change to
The bot was created to replace terminal commands and allow interaction via Telegram. Each download runs within a screen session, enabling multiple downloads to run simultaneously.
To run the bot in the background, simply start it inside a screen session and then press Ctrl + A, followed by D, to detach from the session without stopping the bot.
</details>
<details>
<summary>🤖 Bot Commands</summary>
Command Functions:
🔹 /start Starts a new search for a download. This command performs the same operations as manually running the script in the terminal with test_run.py.
🔹 /list Displays the status of active downloads, with options to:
- Stop an incorrect download using /stop <ID>
- View the real-time output of a download using /screen <ID>
Stop an incorrect download using /stop <ID>.
View the real-time output of a download using /screen <ID>.
⚠ Warning: If a download is interrupted, incomplete files may remain in the folder specified in config.json. These files must be deleted manually to avoid storage or management issues.
</details>
<details>
<summary>🔧 Environment Setup</summary>
🛠 Configuration: Currently, the bot's settings are stored in the config.json file, which is located in the same directory as the telegram_bot.py script.
Create an `.env` file with:
## .env Example:
You need to create an .env file and enter your Telegram token and user ID to authorize only one user to use it
```
TOKEN_TELEGRAM=IlTuo2131TOKEN$12D3Telegram
@ -799,4 +817,4 @@ Tool per guardare o scaricare film dalla piattaforma StreamingCommunity.
# Disclaimer
This software is provided "as is", without warranty of any kind, express or implied, including but not limited to the warranties of merchantability, fitness for a particular purpose, and noninfringement. In no event shall the authors or copyright holders be liable for any claim, damages, or other liability, whether in an action of contract, tort, or otherwise, arising from, out of, or in connection with the software or the use or other dealings in the software.
This software is provided "as is", without warranty of any kind, express or implied, including but not limited to the warranties of merchantability, fitness for a particular purpose, and noninfringement. In no event shall the authors or copyright holders be liable for any claim, damages, or other liability, whether in an action of contract, tort, or otherwise, arising from, out of, or in connection with the software or the use or other dealings in the software.

View File

@ -0,0 +1,141 @@
# 05.07.24
# NOTE: NOT USED
import re
import logging
# External libraries
import httpx
import jsbeautifier
from bs4 import BeautifulSoup
# Internal utilities
from StreamingCommunity.Util.config_json import config_manager
from StreamingCommunity.Util.headers import get_userAgent
# Variable
MAX_TIMEOUT = config_manager.get_int("REQUESTS", "timeout")
class VideoSource:
def __init__(self, url: str):
"""
Sets up the video source with the provided URL.
Parameters:
- url (str): The URL of the video.
"""
self.url = url
self.redirect_url = None
self.maxstream_url = None
self.m3u8_url = None
self.headers = {'user-agent': get_userAgent()}
def get_redirect_url(self):
"""
Sends a request to the initial URL and extracts the redirect URL.
"""
try:
response = httpx.get(self.url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT)
response.raise_for_status()
# Extract the redirect URL from the HTML
soup = BeautifulSoup(response.text, "html.parser")
self.redirect_url = soup.find("div", id="iframen1").get("data-src")
logging.info(f"Redirect URL: {self.redirect_url}")
return self.redirect_url
except Exception as e:
logging.error(f"Error parsing HTML: {e}")
raise
def get_maxstream_url(self):
"""
Sends a request to the redirect URL and extracts the Maxstream URL.
"""
try:
response = httpx.get(self.redirect_url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT)
response.raise_for_status()
# Extract the Maxstream URL from the HTML
soup = BeautifulSoup(response.text, "html.parser")
maxstream_url = soup.find("a")
if maxstream_url is None:
# If no anchor tag is found, try the alternative method
logging.warning("Anchor tag not found. Trying the alternative method.")
headers = {
'origin': 'https://stayonline.pro',
'user-agent': get_userAgent(),
'x-requested-with': 'XMLHttpRequest',
}
# Make request to stayonline api
data = {'id': self.redirect_url.split("/")[-2], 'ref': ''}
response = httpx.post('https://stayonline.pro/ajax/linkEmbedView.php', headers=headers, data=data)
response.raise_for_status()
uprot_url = response.json()['data']['value']
# Retry getting maxtstream url
response = httpx.get(uprot_url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT)
response.raise_for_status()
soup = BeautifulSoup(response.text, "html.parser")
maxstream_url = soup.find("a").get("href")
else:
maxstream_url = maxstream_url.get("href")
self.maxstream_url = maxstream_url
logging.info(f"Maxstream URL: {self.maxstream_url}")
return self.maxstream_url
except Exception as e:
logging.error(f"Error during the request: {e}")
raise
def get_m3u8_url(self):
"""
Sends a request to the Maxstream URL and extracts the .m3u8 file URL.
"""
try:
response = httpx.get(self.maxstream_url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT)
response.raise_for_status()
soup = BeautifulSoup(response.text, "html.parser")
# Iterate over all script tags in the HTML
for script in soup.find_all("script"):
if "eval(function(p,a,c,k,e,d)" in script.text:
# Execute the script using
data_js = jsbeautifier.beautify(script.text)
# Extract the .m3u8 URL from the script's output
match = re.search(r'sources:\s*\[\{\s*src:\s*"([^"]+)"', data_js)
if match:
self.m3u8_url = match.group(1)
logging.info(f"M3U8 URL: {self.m3u8_url}")
break
else:
logging.error("Failed to find M3U8 URL: No match found")
return self.m3u8_url
except Exception as e:
logging.error(f"Error executing the Node.js script: {e}")
raise
def get_playlist(self):
"""
Executes the entire flow to obtain the final .m3u8 file URL.
"""
self.get_redirect_url()
self.get_maxstream_url()
return self.get_m3u8_url()

View File

@ -5,9 +5,9 @@ import logging
# External libraries
import httpx
import jsbeautifier
from bs4 import BeautifulSoup
from curl_cffi import requests
# Internal utilities
@ -28,7 +28,6 @@ class VideoSource:
- url (str): The URL of the video source.
"""
self.headers = get_headers()
self.client = httpx.Client()
self.url = url
def make_request(self, url: str) -> str:
@ -42,8 +41,10 @@ class VideoSource:
- str: The response content if successful, None otherwise.
"""
try:
response = self.client.get(url, headers=self.headers, timeout=MAX_TIMEOUT, follow_redirects=True)
response.raise_for_status()
response = requests.get(url, headers=self.headers, timeout=MAX_TIMEOUT, impersonate="chrome110")
if response.status_code >= 400:
logging.error(f"Request failed with status code: {response.status_code}")
return None
return response.text
except Exception as e:

View File

@ -1,6 +1,6 @@
# 01.03.24
import sys
import time
import logging
from urllib.parse import urlparse, parse_qs, urlencode, urlunparse
@ -24,7 +24,7 @@ console = Console()
class VideoSource:
def __init__(self, url: str, is_series: bool, media_id: int = None):
def __init__(self, url: str, is_series: bool, media_id: int = None, proxy: str = None):
"""
Initialize video source for streaming site.
@ -35,9 +35,11 @@ class VideoSource:
"""
self.headers = {'user-agent': get_userAgent()}
self.url = url
self.proxy = proxy
self.is_series = is_series
self.media_id = media_id
self.iframe_src = None
self.window_parameter = None
def get_iframe(self, episode_id: int) -> None:
"""
@ -55,7 +57,7 @@ class VideoSource:
}
try:
response = httpx.get(f"{self.url}/iframe/{self.media_id}", params=params, timeout=MAX_TIMEOUT)
response = httpx.get(f"{self.url}/iframe/{self.media_id}", headers=self.headers, params=params, timeout=MAX_TIMEOUT, proxy=self.proxy)
response.raise_for_status()
# Parse response with BeautifulSoup to get iframe source
@ -81,6 +83,7 @@ class VideoSource:
self.window_video = WindowVideo(converter.get('video'))
self.window_streams = StreamsCollection(converter.get('streams'))
self.window_parameter = WindowParameter(converter.get('masterPlaylist'))
time.sleep(0.5)
except Exception as e:
logging.error(f"Error parsing script: {e}")
@ -107,41 +110,45 @@ class VideoSource:
# Parse script to get video information
self.parse_script(script_text=script)
except httpx.HTTPStatusError as e:
if e.response.status_code == 404:
console.print("[yellow]This content will be available soon![/yellow]")
return
logging.error(f"Error getting content: {e}")
raise
except Exception as e:
logging.error(f"Error getting content: {e}")
raise
def get_playlist(self) -> str:
def get_playlist(self) -> str | None:
"""
Generate authenticated playlist URL.
Returns:
str: Fully constructed playlist URL with authentication parameters
str | None: Fully constructed playlist URL with authentication parameters, or None if content unavailable
"""
if not self.window_parameter:
return None
params = {}
# Add 'h' parameter if video quality is 1080p
if self.canPlayFHD:
params['h'] = 1
# Parse the original URL
parsed_url = urlparse(self.window_parameter.url)
query_params = parse_qs(parsed_url.query)
# Check specifically for 'b=1' in the query parameters
if 'b' in query_params and query_params['b'] == ['1']:
params['b'] = 1
# Add authentication parameters (token and expiration)
params.update({
"token": self.window_parameter.token,
"expires": self.window_parameter.expires
})
# Build the updated query string
query_string = urlencode(params)
# Construct the new URL with updated query parameters
return urlunparse(parsed_url._replace(query=query_string))

View File

@ -21,10 +21,10 @@ from .title import download_title
# Variable
indice = 3
_useFor = "film_serie"
_useFor = "Torrent"
_priority = 0
_engineDownload = "tor"
_deprecate = False
_engineDownload = "Torrent"
_deprecate = True
console = Console()
msg = Prompt()
@ -62,7 +62,7 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
return media_search_manager
if len_database > 0:
select_title = get_select_title(table_show_manager, media_search_manager)
select_title = get_select_title(table_show_manager, media_search_manager, len_database)
download_title(select_title)
else:

View File

@ -24,10 +24,10 @@ from .series import download_series
# Variable
indice = 2
_useFor = "film_serie"
_useFor = "Film_&_Serie"
_priority = 0
_engineDownload = "hls"
_deprecate = True
_deprecate = False
msg = Prompt()
console = Console()
@ -110,7 +110,7 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
bot = get_bot_instance()
if len_database > 0:
select_title = get_select_title(table_show_manager, media_search_manager)
select_title = get_select_title(table_show_manager, media_search_manager, len_database)
process_search_result(select_title, selections)
else:

View File

@ -1,6 +1,7 @@
# 16.03.25
import os
import re
# External library
@ -56,51 +57,38 @@ def download_film(select_title: MediaItem) -> str:
start_message()
console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [cyan]{select_title.name}[/cyan] \n")
# Extract mostraguarda link
# Extract mostraguarda URL
try:
response = httpx.get(select_title.url, headers=get_headers(), timeout=10)
response.raise_for_status()
except Exception as e:
console.print(f"[red]Error fetching the page: {e}")
if site_constant.TELEGRAM_BOT:
bot.send_message(f"ERRORE\n\nErrore durante il recupero della pagina.\n\n{e}", None)
return None
soup = BeautifulSoup(response.text, 'html.parser')
iframes = soup.find_all('iframe')
mostraguarda = iframes[0]['src']
# Create mostraguarda url
soup = BeautifulSoup(response.text, "html.parser")
iframe_tag = soup.find_all("iframe")
url_mostraGuarda = iframe_tag[0].get('data-src')
if not url_mostraGuarda:
console.print("Error: data-src attribute not found in iframe.")
if site_constant.TELEGRAM_BOT:
bot.send_message(f"ERRORE\n\nErrore: attributo data-src non trovato nell'iframe", None)
except Exception as e:
console.print(f"[red]Site: {site_constant.SITE_NAME}, request error: {e}, get mostraguarda")
return None
# Extract supervideo URL
supervideo_url = None
try:
response = httpx.get(url_mostraGuarda, headers=get_headers(), timeout=10)
response = httpx.get(mostraguarda, headers=get_headers(), timeout=10)
response.raise_for_status()
soup = BeautifulSoup(response.text, 'html.parser')
pattern = r'//supervideo\.[^/]+/[a-z]/[a-zA-Z0-9]+'
supervideo_match = re.search(pattern, response.text)
supervideo_url = 'https:' + supervideo_match.group(0)
except Exception as e:
console.print(f"[red]Error fetching mostraguarda link: {e}")
console.print("[yellow]Missing access credentials. This part of the code is still under development.")
if site_constant.TELEGRAM_BOT:
bot.send_message(f"ERRORE\n\nErrore durante il recupero del link mostra/guarda.\n\n{e}", None)
bot.send_message(f"ERRORE\n\nCredenziali di accesso mancanti.\nQuesta parte del codice è ancora in fase di sviluppo.", None)
console.print(f"[red]Site: {site_constant.SITE_NAME}, request error: {e}, get supervideo URL")
console.print("[yellow]This content will be available soon![/yellow]")
return None
# Create supervio URL
soup = BeautifulSoup(response.text, "html.parser")
player_links = soup.find("ul", class_="_player-mirrors")
player_items = player_links.find_all("li")
supervideo_url = "https:" + player_items[0].get("data-link")
if not supervideo_url:
return None
# Init class
video_source = VideoSource(url=supervideo_url)
video_source = VideoSource(supervideo_url)
master_playlist = video_source.get_playlist()
# Define the filename and path for the downloaded film

View File

@ -38,38 +38,52 @@ class GetSerieInfo:
soup = BeautifulSoup(response.text, "html.parser")
self.series_name = soup.find("title").get_text(strip=True).split(" - ")[0]
# Process all seasons
season_items = soup.find_all('div', class_='accordion-item')
for season_idx, season_item in enumerate(season_items, 1):
season_header = season_item.find('div', class_='accordion-header')
if not season_header:
continue
season_name = season_header.get_text(strip=True)
# Find all season dropdowns
seasons_dropdown = soup.find('div', class_='dropdown seasons')
if not seasons_dropdown:
return
# Get all season items
season_items = seasons_dropdown.find_all('span', {'data-season': True})
for season_item in season_items:
season_num = int(season_item['data-season'])
season_name = season_item.get_text(strip=True)
# Create a new season and get a reference to it
# Create a new season
current_season = self.seasons_manager.add_season({
'number': season_idx,
'number': season_num,
'name': season_name
})
# Find episodes for this season
episode_divs = season_item.find_all('div', class_='down-episode')
for ep_idx, ep_div in enumerate(episode_divs, 1):
episode_name_tag = ep_div.find('b')
if not episode_name_tag:
# Find all episodes for this season
episodes_container = soup.find('div', {'class': 'dropdown mirrors', 'data-season': str(season_num)})
if not episodes_container:
continue
# Get all episode mirrors for this season
episode_mirrors = soup.find_all('div', {'class': 'dropdown mirrors',
'data-season': str(season_num)})
for mirror in episode_mirrors:
episode_data = mirror.get('data-episode', '').split('-')
if len(episode_data) != 2:
continue
episode_name = episode_name_tag.get_text(strip=True)
link_tag = ep_div.find('a', string=lambda text: text and "Supervideo" in text)
episode_url = link_tag['href'] if link_tag else None
ep_num = int(episode_data[1])
# Find supervideo link
supervideo_span = mirror.find('span', {'data-id': 'supervideo'})
if not supervideo_span:
continue
episode_url = supervideo_span.get('data-link', '')
# Add episode to the season
if current_season:
current_season.episodes.add({
'number': ep_idx,
'name': episode_name,
'number': ep_num,
'name': f"Episodio {ep_num}",
'url': episode_url
})

View File

@ -24,7 +24,7 @@ from .serie import download_series
# Variable
indice = 1
_useFor = "anime"
_useFor = "Anime"
_priority = 0
_engineDownload = "mp4"
_deprecate = False
@ -109,7 +109,7 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
bot = get_bot_instance()
if len_database > 0:
select_title = get_select_title(table_show_manager, media_search_manager)
select_title = get_select_title(table_show_manager, media_search_manager,len_database)
process_search_result(select_title, selections)
else:

View File

@ -43,40 +43,38 @@ class ScrapeSerieAnime:
def get_count_episodes(self):
"""
Retrieve total number of episodes for the selected media.
This includes partial episodes (like episode 6.5).
Returns:
int: Total episode count
int: Total episode count including partial episodes
"""
try:
response = httpx.get(
url=f"{self.url}/info_api/{self.media_id}/",
headers=self.headers,
timeout=max_timeout
)
response.raise_for_status()
# Parse JSON response and return episode count
return response.json()["episodes_count"]
except Exception as e:
logging.error(f"Error fetching episode count: {e}")
return None
if self.episodes_cache is None:
self._fetch_all_episodes()
if self.episodes_cache:
return len(self.episodes_cache)
return None
def _fetch_all_episodes(self):
"""
Fetch all episodes data at once and cache it
"""
try:
all_episodes = []
count = self.get_count_episodes()
if not count:
return
# Get initial episode count
response = httpx.get(
url=f"{self.url}/info_api/{self.media_id}/",
headers=self.headers,
timeout=max_timeout
)
response.raise_for_status()
initial_count = response.json()["episodes_count"]
# Fetch episodes
all_episodes = []
start_range = 1
while start_range <= count:
end_range = min(start_range + 119, count)
# Fetch episodes in chunks
while start_range <= initial_count:
end_range = min(start_range + 119, initial_count)
response = httpx.get(
url=f"{self.url}/info_api/{self.media_id}/1",

View File

@ -18,8 +18,8 @@ from .film import download_film
# Variable
indice = 8
_useFor = "anime"
indice = 6
_useFor = "Anime"
_priority = 0
_engineDownload = "mp4"
_deprecate = False
@ -75,7 +75,7 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
return media_search_manager
if len_database > 0:
select_title = get_select_title(table_show_manager, media_search_manager)
select_title = get_select_title(table_show_manager, media_search_manager,len_database)
process_search_result(select_title, selections)
else:

View File

@ -31,7 +31,8 @@ class ScrapSerie:
self.client = httpx.Client(
cookies={"sessionId": self.session_id},
headers={"User-Agent": get_userAgent(), "csrf-token": self.csrf_token},
base_url=full_url
base_url=full_url,
verify=False
)
try:

View File

@ -0,0 +1,72 @@
# 09.06.24
from urllib.parse import quote_plus
# External library
from rich.console import Console
from rich.prompt import Prompt
# Internal utilities
from StreamingCommunity.Api.Template import get_select_title
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
# Logic class
from .site import title_search, media_search_manager, table_show_manager
from .film import download_film
# Variable
indice = -1
_useFor = "Film"
_priority = 0
_engineDownload = "mp4"
_deprecate = True
msg = Prompt()
console = Console()
def process_search_result(select_title):
"""
Handles the search result and initiates the download for either a film or series.
"""
# !!! ADD TYPE DONT WORK FOR SERIE
download_film(select_title)
def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_item: dict = None):
"""
Main function of the application for search.
Parameters:
string_to_search (str, optional): String to search for
get_onylDatabase (bool, optional): If True, return only the database object
direct_item (dict, optional): Direct item to process (bypass search)
"""
if direct_item:
select_title = MediaItem(**direct_item)
process_search_result(select_title)
return
if string_to_search is None:
string_to_search = msg.ask(f"\n[purple]Insert word to search in [green]{site_constant.SITE_NAME}").strip()
# Search on database
len_database = title_search(quote_plus(string_to_search))
## If only the database is needed, return the manager
if get_onlyDatabase:
return media_search_manager
if len_database > 0:
select_title = get_select_title(table_show_manager, media_search_manager,len_database)
process_search_result(select_title)
else:
# If no results are found, ask again
console.print(f"\n[red]Nothing matching was found for[white]: [purple]{string_to_search}")
search()

View File

@ -0,0 +1,62 @@
# 03.07.24
import os
# External library
from rich.console import Console
# Internal utilities
from StreamingCommunity.Util.os import os_manager
from StreamingCommunity.Util.message import start_message
from StreamingCommunity.Lib.Downloader import HLS_Downloader
# Logic class
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
# Player
from StreamingCommunity.Api.Player.maxstream import VideoSource
# Variable
console = Console()
def download_film(select_title: MediaItem) -> str:
"""
Downloads a film using the provided obj.
Parameters:
- select_title (MediaItem): The media item to be downloaded. This should be an instance of the MediaItem class, containing attributes like `name` and `url`.
Return:
- str: output path
"""
start_message()
console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [cyan]{select_title.name}[/cyan] \n")
# Setup api manger
video_source = VideoSource(select_title.url)
# Define output path
title_name = os_manager.get_sanitize_file(select_title.name) +".mp4"
mp4_path = os.path.join(site_constant.MOVIE_FOLDER, title_name.replace(".mp4", ""))
# Get m3u8 master playlist
master_playlist = video_source.get_playlist()
# Download the film using the m3u8 playlist, and output filename
r_proc = HLS_Downloader(
m3u8_url=master_playlist,
output_path=os.path.join(mp4_path, title_name)
).start()
if r_proc['error'] is not None:
try: os.remove(r_proc['path'])
except: pass
return r_proc['path']

View File

@ -0,0 +1,78 @@
# 03.07.24
# External libraries
import httpx
from bs4 import BeautifulSoup
from rich.console import Console
# Internal utilities
from StreamingCommunity.Util.config_json import config_manager
from StreamingCommunity.Util.headers import get_userAgent
from StreamingCommunity.Util.table import TVShowManager
# Logic class
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaManager
# Variable
console = Console()
media_search_manager = MediaManager()
table_show_manager = TVShowManager()
max_timeout = config_manager.get_int("REQUESTS", "timeout")
def title_search(query: str) -> int:
"""
Search for titles based on a search query.
Parameters:
- query (str): The query to search for.
Returns:
- int: The number of titles found.
"""
media_search_manager.clear()
table_show_manager.clear()
search_url = f"{site_constant.FULL_URL}/?s={query}"
console.print(f"[cyan]Search url: [yellow]{search_url}")
try:
response = httpx.get(
search_url,
headers={'user-agent': get_userAgent()},
timeout=max_timeout,
follow_redirects=True,
verify=False
)
response.raise_for_status()
except Exception as e:
console.print(f"Site: {site_constant.SITE_NAME}, request search error: {e}")
return 0
# Create soup and find table
soup = BeautifulSoup(response.text, "html.parser")
for card in soup.find_all("div", class_=["card", "mp-post", "horizontal"]):
try:
title_tag = card.find("h3", class_="card-title").find("a")
url = title_tag.get("href")
title = title_tag.get_text(strip=True)
title_info = {
'name': title,
'url': url,
'type': 'film'
}
media_search_manager.add_media(title_info)
except Exception as e:
print(f"Error parsing a film entry: {e}")
# Return the number of titles found
return media_search_manager.get_length()

View File

@ -20,8 +20,8 @@ from .series import download_series
# Variable
indice = 5
_useFor = "serie"
indice = 4
_useFor = "Serie"
_priority = 0
_engineDownload = "hls"
_deprecate = False
@ -75,7 +75,7 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
return media_search_manager
if len_database > 0:
select_title = get_select_title(table_show_manager, media_search_manager)
select_title = get_select_title(table_show_manager, media_search_manager,len_database)
process_search_result(select_title, selections)
else:

View File

@ -19,9 +19,9 @@ from .film import download_film
# Variable
indice = 8
_useFor = "film_serie"
_priority = 1 # NOTE: Site search need the use of tmbd obj
indice = 5
_useFor = "Film_&_Serie"
_priority = 0
_engineDownload = "hls"
_deprecate = False
@ -84,7 +84,7 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
return media_search_manager
if len_database > 0:
select_title = get_select_title(table_show_manager, media_search_manager)
select_title = get_select_title(table_show_manager, media_search_manager,len_database)
process_search_result(select_title, selections)
else:

View File

@ -1,9 +1,5 @@
# 21.05.24
import threading
import queue
# External libraries
import httpx
from rich.console import Console
@ -13,12 +9,9 @@ from rich.console import Console
from StreamingCommunity.Util.config_json import config_manager
from StreamingCommunity.Util.headers import get_userAgent
from StreamingCommunity.Util.table import TVShowManager
from StreamingCommunity.Lib.TMBD.tmdb import tmdb
# Logic class
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaManager
from .util.ScrapeSerie import GetSerieInfo
# Variable
@ -26,76 +19,33 @@ console = Console()
media_search_manager = MediaManager()
table_show_manager = TVShowManager()
max_timeout = config_manager.get_int("REQUESTS", "timeout")
MAX_THREADS = 12
def determine_media_type(title):
def determine_media_type(item):
"""
Use TMDB to determine if a title is a movie or TV show.
Determine if the item is a film or TV series by checking actual seasons count
using GetSerieInfo.
"""
try:
# First search as a movie
movie_results = tmdb._make_request("search/movie", {"query": title})
movie_count = len(movie_results.get("results", []))
# Then search as a TV show
tv_results = tmdb._make_request("search/tv", {"query": title})
tv_count = len(tv_results.get("results", []))
# If results found in only one category, use that
if movie_count > 0 and tv_count == 0:
return "film"
elif tv_count > 0 and movie_count == 0:
return "tv"
# If both have results, compare popularity
if movie_count > 0 and tv_count > 0:
top_movie = movie_results["results"][0]
top_tv = tv_results["results"][0]
return "film" if top_movie.get("popularity", 0) > top_tv.get("popularity", 0) else "tv"
# Extract program name from path_id
program_name = None
if item.get('path_id'):
parts = item['path_id'].strip('/').split('/')
if len(parts) >= 2:
program_name = parts[-1].split('.')[0]
return "film"
if not program_name:
return "film"
scraper = GetSerieInfo(program_name)
scraper.collect_info_title()
return "tv" if scraper.getNumberSeason() > 0 else "film"
except Exception as e:
console.log(f"Error determining media type with TMDB: {e}")
console.print(f"[red]Error determining media type: {e}[/red]")
return "film"
def worker_determine_type(work_queue, result_dict, worker_id):
"""
Worker function to process items from queue and determine media types.
Parameters:
- work_queue: Queue containing items to process
- result_dict: Dictionary to store results
- worker_id: ID of the worker thread
"""
while not work_queue.empty():
try:
index, item = work_queue.get(block=False)
title = item.get('titolo', '')
media_type = determine_media_type(title)
result_dict[index] = {
'id': item.get('id', ''),
'name': title,
'type': media_type,
'path_id': item.get('path_id', ''),
'url': f"https://www.raiplay.it{item.get('url', '')}",
'image': f"https://www.raiplay.it{item.get('immagine', '')}",
}
work_queue.task_done()
except queue.Empty:
break
except Exception as e:
console.log(f"Worker {worker_id} error: {e}")
work_queue.task_done()
def title_search(query: str) -> int:
"""
Search for titles based on a search query.
@ -141,33 +91,15 @@ def title_search(query: str) -> int:
data = response.json().get('agg').get('titoli').get('cards')
data = data[:15] if len(data) > 15 else data
# Use multithreading to determine media types in parallel
work_queue = queue.Queue()
result_dict = {}
# Add items to the work queue
for i, item in enumerate(data):
work_queue.put((i, item))
# Create and start worker threads
threads = []
for i in range(min(MAX_THREADS, len(data))):
thread = threading.Thread(
target=worker_determine_type,
args=(work_queue, result_dict, i),
daemon=True
)
threads.append(thread)
thread.start()
# Wait for all threads to complete
for thread in threads:
thread.join()
# Add all results to media manager in correct order
for i in range(len(data)):
if i in result_dict:
media_search_manager.add_media(result_dict[i])
# Process each item and add to media manager
for item in data:
media_search_manager.add_media({
'id': item.get('id', ''),
'name': item.get('titolo', ''),
'type': determine_media_type(item),
'path_id': item.get('path_id', ''),
'url': f"https://www.raiplay.it{item.get('url', '')}",
'image': f"https://www.raiplay.it{item.get('immagine', '')}",
})
# Return the number of titles found
return media_search_manager.get_length()

View File

@ -30,28 +30,48 @@ class GetSerieInfo:
try:
program_url = f"{self.base_url}/programmi/{self.program_name}.json"
response = httpx.get(url=program_url, headers=get_headers(), timeout=max_timeout)
# If 404, content is not yet available
if response.status_code == 404:
logging.info(f"Content not yet available: {self.program_name}")
return
response.raise_for_status()
json_data = response.json()
# Look for seasons in the 'blocks' property
for block in json_data.get('blocks'):
if block.get('type') == 'RaiPlay Multimedia Block' and block.get('name', '').lower() == 'episodi':
self.publishing_block_id = block.get('id')
# Extract seasons from sets array
for season_set in block.get('sets', []):
if 'stagione' in season_set.get('name', '').lower():
self.seasons_manager.add_season({
'id': season_set.get('id', ''),
'number': len(self.seasons_manager.seasons) + 1,
'name': season_set.get('name', ''),
'path': season_set.get('path_id', ''),
'episodes_count': season_set.get('episode_size', {}).get('number', 0)
})
for block in json_data.get('blocks', []):
except Exception as e:
# Check if block is a season block or episodi block
if block.get('type') == 'RaiPlay Multimedia Block':
if block.get('name', '').lower() == 'episodi':
self.publishing_block_id = block.get('id')
# Extract seasons from sets array
for season_set in block.get('sets', []):
if 'stagione' in season_set.get('name', '').lower():
self._add_season(season_set, block.get('id'))
elif 'stagione' in block.get('name', '').lower():
self.publishing_block_id = block.get('id')
# Extract season directly from block's sets
for season_set in block.get('sets', []):
self._add_season(season_set, block.get('id'))
except httpx.HTTPError as e:
logging.error(f"Error collecting series info: {e}")
except Exception as e:
logging.error(f"Unexpected error collecting series info: {e}")
def _add_season(self, season_set: dict, block_id: str):
self.seasons_manager.add_season({
'id': season_set.get('id', ''),
'number': len(self.seasons_manager.seasons) + 1,
'name': season_set.get('name', ''),
'path': season_set.get('path_id', ''),
'episodes_count': season_set.get('episode_size', {}).get('number', 0)
})
def collect_info_season(self, number_season: int) -> None:
"""Get episodes for a specific season."""

View File

@ -12,6 +12,7 @@ from rich.prompt import Prompt
# Internal utilities
from StreamingCommunity.Api.Template import get_select_title
from StreamingCommunity.Lib.Proxies.proxy import ProxyFinder
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
from StreamingCommunity.TelegramHelp.telegram_bot import get_bot_instance
@ -25,48 +26,72 @@ from .series import download_series
# Variable
indice = 0
_useFor = "film_serie"
_useFor = "Film_&_Serie" # "Movies_&_Series"
_priority = 0
_engineDownload = "hls"
_deprecate = False
msg = Prompt()
console = Console()
proxy = None
def get_user_input(string_to_search: str = None):
"""
Asks the user to input a search term.
Handles both Telegram bot input and direct input.
If string_to_search is provided, it's returned directly (after stripping).
"""
if string_to_search is None:
if site_constant.TELEGRAM_BOT:
bot = get_bot_instance()
string_to_search = bot.ask(
"key_search",
f"Enter the search term\nor type 'back' to return to the menu: ",
None
)
if string_to_search is not None:
return string_to_search.strip()
if string_to_search == 'back':
if site_constant.TELEGRAM_BOT:
bot = get_bot_instance()
user_response = bot.ask(
"key_search", # Request type
"Enter the search term\nor type 'back' to return to the menu: ",
None
)
if user_response is None:
bot.send_message("Timeout: No search term entered.", None)
return None
if user_response.lower() == 'back':
bot.send_message("Returning to the main menu...", None)
try:
# Restart the script
subprocess.Popen([sys.executable] + sys.argv)
sys.exit()
else:
string_to_search = msg.ask(f"\n[purple]Insert a word to search in [green]{site_constant.SITE_NAME}").strip()
except Exception as e:
bot.send_message(f"Error during restart attempt: {e}", None)
return None # Return None if restart fails
return user_response.strip()
else:
return msg.ask(f"\n[purple]Insert a word to search in [green]{site_constant.SITE_NAME}").strip()
return string_to_search
def process_search_result(select_title, selections=None):
def process_search_result(select_title, selections=None, proxy=None):
"""
Handles the search result and initiates the download for either a film or series.
Parameters:
select_title (MediaItem): The selected media item
select_title (MediaItem): The selected media item. Can be None if selection fails.
selections (dict, optional): Dictionary containing selection inputs that bypass manual input
{'season': season_selection, 'episode': episode_selection}
e.g., {'season': season_selection, 'episode': episode_selection}
proxy (str, optional): The proxy to use for downloads.
"""
if not select_title:
if site_constant.TELEGRAM_BOT:
bot = get_bot_instance()
bot.send_message("No title selected or selection cancelled.", None)
else:
console.print("[yellow]No title selected or selection cancelled.")
return
if select_title.type == 'tv':
season_selection = None
episode_selection = None
@ -75,42 +100,67 @@ def process_search_result(select_title, selections=None):
season_selection = selections.get('season')
episode_selection = selections.get('episode')
download_series(select_title, season_selection, episode_selection)
download_series(select_title, season_selection, episode_selection, proxy)
else:
download_film(select_title)
download_film(select_title, proxy)
def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_item: dict = None, selections: dict = None):
"""
Main function of the application for search.
Parameters:
string_to_search (str, optional): String to search for
get_onlyDatabase (bool, optional): If True, return only the database object
direct_item (dict, optional): Direct item to process (bypass search)
string_to_search (str, optional): String to search for. Can be passed from run.py.
If 'back', special handling might occur in get_user_input.
get_onlyDatabase (bool, optional): If True, return only the database search manager object.
direct_item (dict, optional): Direct item to process (bypasses search).
selections (dict, optional): Dictionary containing selection inputs that bypass manual input
{'season': season_selection, 'episode': episode_selection}
for series (season/episode).
"""
bot = None
if site_constant.TELEGRAM_BOT:
bot = get_bot_instance()
# Check proxy if not already set
finder = ProxyFinder(site_constant.FULL_URL)
proxy = finder.find_fast_proxy()
if direct_item:
select_title = MediaItem(**direct_item)
process_search_result(select_title, selections)
select_title_obj = MediaItem(**direct_item)
process_search_result(select_title_obj, selections, proxy)
return
actual_search_query = get_user_input(string_to_search)
# Handle cases where user input is empty, or 'back' was handled (sys.exit or None return)
if not actual_search_query:
if bot:
if actual_search_query is None: # Specifically for timeout from bot.ask or failed restart
bot.send_message("Search term not provided or operation cancelled. Returning.", None)
return
if string_to_search is None:
string_to_search = msg.ask(f"\n[purple]Insert a word to search in [green]{site_constant.SITE_NAME}").strip()
# Perform search on the database using the obtained query
finder = ProxyFinder(site_constant.FULL_URL)
proxy = finder.find_fast_proxy()
len_database = title_search(actual_search_query, proxy)
# Search on database
len_database = title_search(string_to_search)
# If only the database is needed, return the manager
# If only the database object (media_search_manager populated by title_search) is needed
if get_onlyDatabase:
return media_search_manager
return media_search_manager
if len_database > 0:
select_title = get_select_title(table_show_manager, media_search_manager)
process_search_result(select_title, selections)
select_title = get_select_title(table_show_manager, media_search_manager, len_database)
process_search_result(select_title, selections, proxy)
else:
# If no results are found, ask again
console.print(f"\n[red]Nothing matching was found for[white]: [purple]{string_to_search}")
search()
no_results_message = f"No results found for: '{actual_search_query}'"
if bot:
bot.send_message(no_results_message, None)
else:
console.print(f"\n[red]Nothing matching was found for[white]: [purple]{actual_search_query}")
# Do not call search() recursively here to avoid infinite loops on no results.
# The flow should return to the caller (e.g., main menu in run.py).
return

View File

@ -27,7 +27,7 @@ from StreamingCommunity.Api.Player.vixcloud import VideoSource
console = Console()
def download_film(select_title: MediaItem) -> str:
def download_film(select_title: MediaItem, proxy: str = None) -> str:
"""
Downloads a film using the provided film ID, title name, and domain.
@ -55,13 +55,17 @@ def download_film(select_title: MediaItem) -> str:
console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [cyan]{select_title.name}[/cyan] \n")
# Init class
video_source = VideoSource(site_constant.FULL_URL, False, select_title.id)
video_source = VideoSource(f"{site_constant.FULL_URL}/it", False, select_title.id, proxy)
# Retrieve scws and if available master playlist
video_source.get_iframe(select_title.id)
video_source.get_content()
master_playlist = video_source.get_playlist()
if master_playlist is None:
console.print(f"[red]Site: {site_constant.SITE_NAME}, error: No master playlist found[/red]")
return None
# Define the filename and path for the downloaded film
title_name = os_manager.get_sanitize_file(select_title.name) + ".mp4"
mp4_path = os.path.join(site_constant.MOVIE_FOLDER, title_name.replace(".mp4", ""))

View File

@ -142,7 +142,7 @@ def download_episode(index_season_selected: int, scrape_serie: GetSerieInfo, vid
break
def download_series(select_season: MediaItem, season_selection: str = None, episode_selection: str = None) -> None:
def download_series(select_season: MediaItem, season_selection: str = None, episode_selection: str = None, proxy = None) -> None:
"""
Handle downloading a complete series.
@ -154,8 +154,8 @@ def download_series(select_season: MediaItem, season_selection: str = None, epis
start_message()
# Init class
video_source = VideoSource(site_constant.FULL_URL, True, select_season.id)
scrape_serie = GetSerieInfo(site_constant.FULL_URL, select_season.id, select_season.slug)
video_source = VideoSource(f"{site_constant.FULL_URL}/it", True, select_season.id, proxy)
scrape_serie = GetSerieInfo(f"{site_constant.FULL_URL}/it", select_season.id, select_season.slug, proxy)
# Collect information about season
scrape_serie.getNumberSeason()
@ -219,4 +219,4 @@ def download_series(select_season: MediaItem, season_selection: str = None, epis
# Get script_id
script_id = TelegramSession.get_session()
if script_id != "unknown":
TelegramSession.deleteScriptId(script_id)
TelegramSession.deleteScriptId(script_id)

View File

@ -28,7 +28,7 @@ table_show_manager = TVShowManager()
max_timeout = config_manager.get_int("REQUESTS", "timeout")
def title_search(query: str) -> int:
def title_search(query: str, proxy: str) -> int:
"""
Search for titles based on a search query.
@ -46,9 +46,10 @@ def title_search(query: str) -> int:
try:
response = httpx.get(
site_constant.FULL_URL,
f"{site_constant.FULL_URL}/it",
headers={'user-agent': get_userAgent()},
timeout=max_timeout
timeout=max_timeout,
proxy=proxy
)
response.raise_for_status()
@ -56,10 +57,11 @@ def title_search(query: str) -> int:
version = json.loads(soup.find('div', {'id': "app"}).get("data-page"))['version']
except Exception as e:
if "WinError" in str(e) or "Errno" in str(e): console.print("\n[bold yellow]Please make sure you have enabled and configured a valid proxy.[/bold yellow]")
console.print(f"[red]Site: {site_constant.SITE_NAME} version, request error: {e}")
return 0
search_url = f"{site_constant.FULL_URL}/search?q={query}"
search_url = f"{site_constant.FULL_URL}/it/search?q={query}"
console.print(f"[cyan]Search url: [yellow]{search_url}")
try:
@ -71,7 +73,8 @@ def title_search(query: str) -> int:
'x-inertia': 'true',
'x-inertia-version': version
},
timeout=max_timeout
timeout=max_timeout,
proxy=proxy
)
response.raise_for_status()
@ -117,4 +120,4 @@ def title_search(query: str) -> int:
bot.send_message(f"Lista dei risultati:", choices)
# Return the number of titles found
return media_search_manager.get_length()
return media_search_manager.get_length()

View File

@ -20,7 +20,7 @@ max_timeout = config_manager.get_int("REQUESTS", "timeout")
class GetSerieInfo:
def __init__(self, url, media_id: int = None, series_name: str = None):
def __init__(self, url, media_id: int = None, series_name: str = None, proxy = None):
"""
Initialize the GetSerieInfo class for scraping TV series information.
@ -32,6 +32,7 @@ class GetSerieInfo:
self.is_series = False
self.headers = {'user-agent': get_userAgent()}
self.url = url
self.proxy = proxy
self.media_id = media_id
self.seasons_manager = SeasonManager()
@ -50,7 +51,8 @@ class GetSerieInfo:
response = httpx.get(
url=f"{self.url}/titles/{self.media_id}-{self.series_name}",
headers=self.headers,
timeout=max_timeout
timeout=max_timeout,
proxy=self.proxy
)
response.raise_for_status()
@ -104,7 +106,8 @@ class GetSerieInfo:
'x-inertia': 'true',
'x-inertia-version': self.version,
},
timeout=max_timeout
timeout=max_timeout,
proxy=self.proxy
)
# Extract episodes from JSON response

View File

@ -19,14 +19,15 @@ from .series import download_series
# Variable
indice = 8
_useFor = "film_serie"
_priority = 10 # !!! MOLTO LENTO
indice = 7
_useFor = "Film_&_Serie"
_priority = 0
_engineDownload = "hls"
_deprecate = False
msg = Prompt()
console = Console()
proxy = None
def get_user_input(string_to_search: str = None):
@ -74,20 +75,25 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
select_title = MediaItem(**direct_item)
process_search_result(select_title, selections) # DONT SUPPORT PROXY FOR NOW
return
# Check proxy if not already set
finder = ProxyFinder(site_constant.FULL_URL)
proxy = finder.find_fast_proxy()
if string_to_search is None:
string_to_search = msg.ask(f"\n[purple]Insert a word to search in [green]{site_constant.SITE_NAME}").strip()
# Perform search on the database using the obtained query
finder = ProxyFinder(url=f"{site_constant.FULL_URL}/serie/euphoria/")
proxy, response_serie, _ = finder.find_fast_proxy()
len_database = title_search(string_to_search, [proxy, response_serie])
proxy = finder.find_fast_proxy()
len_database = title_search(string_to_search, proxy)
# If only the database is needed, return the manager
if get_onlyDatabase:
return media_search_manager
if len_database > 0:
select_title = get_select_title(table_show_manager, media_search_manager)
select_title = get_select_title(table_show_manager, media_search_manager,len_database)
process_search_result(select_title, selections, proxy)
else:

View File

@ -27,9 +27,16 @@ table_show_manager = TVShowManager()
max_timeout = config_manager.get_int("REQUESTS", "timeout")
def extract_nonce(response_) -> str:
def extract_nonce(proxy) -> str:
"""Extract nonce value from the page script"""
soup = BeautifulSoup(response_.content, 'html.parser')
response = httpx.get(
site_constant.FULL_URL,
headers={'user-agent': get_userAgent()},
timeout=max_timeout,
proxy=proxy
)
soup = BeautifulSoup(response.content, 'html.parser')
script = soup.find('script', id='live-search-js-extra')
if script:
match = re.search(r'"admin_ajax_nonce":"([^"]+)"', script.text)
@ -38,7 +45,7 @@ def extract_nonce(response_) -> str:
return ""
def title_search(query: str, additionalData: list) -> int:
def title_search(query: str, proxy: str) -> int:
"""
Search for titles based on a search query.
@ -51,12 +58,11 @@ def title_search(query: str, additionalData: list) -> int:
media_search_manager.clear()
table_show_manager.clear()
proxy, response_serie = additionalData
search_url = f"{site_constant.FULL_URL}/wp-admin/admin-ajax.php"
console.print(f"[cyan]Search url: [yellow]{search_url}")
try:
_wpnonce = extract_nonce(response_serie)
_wpnonce = extract_nonce(proxy)
if not _wpnonce:
console.print("[red]Error: Failed to extract nonce")
@ -82,6 +88,7 @@ def title_search(query: str, additionalData: list) -> int:
soup = BeautifulSoup(response.text, 'html.parser')
except Exception as e:
if "WinError" in str(e) or "Errno" in str(e): console.print("\n[bold yellow]Please make sure you have enabled and configured a valid proxy.[/bold yellow]")
console.print(f"[red]Site: {site_constant.SITE_NAME}, request search error: {e}")
return 0

View File

@ -7,78 +7,123 @@ import sys
from rich.console import Console
# Internal utilities
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.TelegramHelp.telegram_bot import get_bot_instance
# Variable
console = Console()
available_colors = ['red', 'magenta', 'yellow', 'cyan', 'green', 'blue', 'white']
column_to_hide = ['Slug', 'Sub_ita', 'Last_air_date', 'Seasons_count', 'Url', 'Image', 'Path_id']
def get_select_title(table_show_manager, media_search_manager):
def get_select_title(table_show_manager, media_search_manager, num_results_available):
"""
Display a selection of titles and prompt the user to choose one.
Handles both console and Telegram bot input.
Parameters:
table_show_manager: Manager for console table display.
media_search_manager: Manager holding the list of media items.
num_results_available (int): The number of media items available for selection.
Returns:
MediaItem: The selected media item.
MediaItem: The selected media item, or None if no selection is made or an error occurs.
"""
# Determine column_info dynamically for (search site)
if not media_search_manager.media_list:
console.print("\n[red]No media items available.")
# console.print("\n[red]No media items available.")
return None
# Example of available colors for columns
available_colors = ['red', 'magenta', 'yellow', 'cyan', 'green', 'blue', 'white']
# Retrieve the keys of the first media item as column headers
first_media_item = media_search_manager.media_list[0]
column_info = {"Index": {'color': available_colors[0]}} # Always include Index with a fixed color
# Assign colors to the remaining keys dynamically
color_index = 1
for key in first_media_item.__dict__.keys():
if site_constant.TELEGRAM_BOT:
bot = get_bot_instance()
prompt_message = f"Inserisci il numero del titolo che vuoi selezionare (da 0 a {num_results_available - 1}):"
user_input_str = bot.ask(
"select_title_from_list_number",
prompt_message,
None
)
if key.capitalize() in column_to_hide:
continue
if user_input_str is None:
bot.send_message("Timeout: nessuna selezione ricevuta.", None)
return None
if key in ('id', 'type', 'name', 'score'): # Custom prioritization of colors
if key == 'type':
column_info["Type"] = {'color': 'yellow'}
elif key == 'name':
column_info["Name"] = {'color': 'magenta'}
elif key == 'score':
column_info["Score"] = {'color': 'cyan'}
try:
chosen_index = int(user_input_str)
if 0 <= chosen_index < num_results_available:
selected_item = media_search_manager.get(chosen_index)
if selected_item:
return selected_item
else:
bot.send_message(f"Errore interno: Impossibile recuperare il titolo con indice {chosen_index}.", None)
return None
else:
bot.send_message(f"Selezione '{chosen_index}' non valida. Inserisci un numero compreso tra 0 e {num_results_available - 1}.", None)
return None
except ValueError:
bot.send_message(f"Input '{user_input_str}' non valido. Devi inserire un numero.", None)
return None
except Exception as e:
bot.send_message(f"Si è verificato un errore durante la selezione: {e}", None)
return None
else:
column_info[key.capitalize()] = {'color': available_colors[color_index % len(available_colors)]}
color_index += 1
table_show_manager.add_column(column_info)
# Populate the table with title information
for i, media in enumerate(media_search_manager.media_list):
media_dict = {'Index': str(i)}
else:
# Logica originale per la console
if not media_search_manager.media_list:
console.print("\n[red]No media items available.")
return None
first_media_item = media_search_manager.media_list[0]
column_info = {"Index": {'color': available_colors[0]}}
color_index = 1
for key in first_media_item.__dict__.keys():
if key.capitalize() in column_to_hide:
continue
if key in ('id', 'type', 'name', 'score'):
if key == 'type': column_info["Type"] = {'color': 'yellow'}
elif key == 'name': column_info["Name"] = {'color': 'magenta'}
elif key == 'score': column_info["Score"] = {'color': 'cyan'}
else:
column_info[key.capitalize()] = {'color': available_colors[color_index % len(available_colors)]}
color_index += 1
# Ensure all values are strings for rich add table
media_dict[key.capitalize()] = str(getattr(media, key))
table_show_manager.clear()
table_show_manager.add_column(column_info)
table_show_manager.add_tv_show(media_dict)
for i, media in enumerate(media_search_manager.media_list):
media_dict = {'Index': str(i)}
for key in first_media_item.__dict__.keys():
if key.capitalize() in column_to_hide:
continue
media_dict[key.capitalize()] = str(getattr(media, key))
table_show_manager.add_tv_show(media_dict)
# Run the table and handle user input
last_command = table_show_manager.run(force_int_input=True, max_int_input=len(media_search_manager.media_list))
table_show_manager.clear()
last_command_str = table_show_manager.run(force_int_input=True, max_int_input=len(media_search_manager.media_list))
table_show_manager.clear()
# Handle user's quit command
if last_command == "q" or last_command == "quit":
console.print("\n[red]Quit ...")
sys.exit(0)
if last_command_str is None or last_command_str.lower() in ["q", "quit"]:
console.print("\n[red]Selezione annullata o uscita.")
return None
# Check if the selected index is within range
if 0 <= int(last_command) < len(media_search_manager.media_list):
return media_search_manager.get(int(last_command))
else:
console.print("\n[red]Wrong index")
sys.exit(0)
try:
selected_index = int(last_command_str)
if 0 <= selected_index < len(media_search_manager.media_list):
return media_search_manager.get(selected_index)
else:
console.print("\n[red]Indice errato o non valido.")
# sys.exit(0)
return None
except ValueError:
console.print("\n[red]Input non numerico ricevuto dalla tabella.")
# sys.exit(0)
return None

View File

@ -1,20 +1,15 @@
# 29.04.25
import os
import sys
import time
import json
import signal
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
from datetime import datetime, timedelta
from concurrent.futures import ThreadPoolExecutor, as_completed
# External library
import httpx
from rich import print
from rich.progress import Progress, SpinnerColumn, BarColumn, TextColumn, TimeRemainingColumn
# Internal utilities
@ -27,118 +22,18 @@ MAX_TIMEOUT = config_manager.get_int("REQUESTS", "timeout")
class ProxyFinder:
def __init__(self, url, timeout_threshold: float = 7.0, max_proxies: int = 150, max_workers: int = 12):
def __init__(self, url, timeout_threshold: float = 7.0):
self.url = url
self.timeout_threshold = timeout_threshold
self.max_proxies = max_proxies
self.max_workers = max_workers
self.found_proxy = None
self.shutdown_flag = False
self.json_file = os.path.join(os.path.dirname(__file__), 'working_proxies.json')
signal.signal(signal.SIGINT, self._handle_interrupt)
def load_saved_proxies(self) -> tuple:
"""Load saved proxies if they're not expired (2 hours old)"""
try:
if not os.path.exists(self.json_file):
return None, None
with open(self.json_file, 'r') as f:
data = json.load(f)
if not data.get('proxies') or not data.get('last_update'):
return None, None
last_update = datetime.fromisoformat(data['last_update'])
if datetime.now() - last_update > timedelta(hours=2):
return None, None
return data['proxies'], last_update
except Exception:
return None, None
def save_working_proxy(self, proxy: str, response_time: float):
"""Save working proxy to JSON file"""
data = {
'proxies': [{'proxy': proxy, 'response_time': response_time}],
'last_update': datetime.now().isoformat()
}
try:
with open(self.json_file, 'w') as f:
json.dump(data, f, indent=4)
except Exception as e:
print(f"[bold red]Error saving proxy:[/bold red] {str(e)}")
def fetch_geonode(self) -> list:
proxies = []
try:
response = httpx.get(
"https://proxylist.geonode.com/api/proxy-list?protocols=http%2Chttps&limit=100&page=1&sort_by=speed&sort_type=asc",
headers=get_headers(),
timeout=MAX_TIMEOUT
)
data = response.json()
proxies = [(f"http://{p['ip']}:{p['port']}", "Geonode") for p in data.get('data', [])]
except Exception as e:
print(f"[bold red]Error in Geonode:[/bold red] {str(e)[:100]}")
return proxies
def fetch_proxyscrape(self) -> list:
proxies = []
try:
response = httpx.get(
"https://api.proxyscrape.com/v4/free-proxy-list/get?request=get_proxies&protocol=http&skip=0&proxy_format=protocolipport&format=json&limit=100&timeout=1000",
headers=get_headers(),
timeout=MAX_TIMEOUT
)
data = response.json()
if 'proxies' in data and isinstance(data['proxies'], list):
proxies = [(proxy_data['proxy'], "ProxyScrape") for proxy_data in data['proxies'] if 'proxy' in proxy_data]
except Exception as e:
print(f"[bold red]Error in ProxyScrape:[/bold red] {str(e)[:100]}")
return proxies
def fetch_proxies_from_sources(self) -> list:
#print("[cyan]Fetching proxies from sources...[/cyan]")
with ThreadPoolExecutor(max_workers=3) as executor:
proxyscrape_future = executor.submit(self.fetch_proxyscrape)
geonode_future = executor.submit(self.fetch_geonode)
sources_proxies = {}
try:
proxyscrape_result = proxyscrape_future.result()
sources_proxies["proxyscrape"] = proxyscrape_result[:int(self.max_proxies/2)]
except Exception as e:
print(f"[bold red]Error fetching from proxyscrape:[/bold red] {str(e)[:100]}")
sources_proxies["proxyscrape"] = []
try:
geonode_result = geonode_future.result()
sources_proxies["geonode"] = geonode_result[:int(self.max_proxies/2)]
except Exception as e:
print(f"[bold red]Error fetching from geonode:[/bold red] {str(e)[:100]}")
sources_proxies["geonode"] = []
merged_proxies = []
if "proxyscrape" in sources_proxies:
merged_proxies.extend(sources_proxies["proxyscrape"])
if "geonode" in sources_proxies:
merged_proxies.extend(sources_proxies["geonode"])
proxy_list = merged_proxies[:self.max_proxies]
return proxy_list
def _test_single_request(self, proxy_info: tuple) -> tuple:
proxy, source = proxy_info
try:
start = time.time()
print(f"[yellow]Testing proxy for URL: {self.url}...")
with httpx.Client(proxy=proxy, timeout=self.timeout_threshold) as client:
response = client.get(self.url, headers=get_headers())
if response.status_code == 200:
@ -161,72 +56,17 @@ class ProxyFinder:
return (proxy, success2 and time2 <= self.timeout_threshold, avg_time, text1, source)
def _handle_interrupt(self, sig, frame):
print("\n[bold yellow]Received keyboard interrupt. Terminating...[/bold yellow]")
print("\n[red]Received keyboard interrupt. Terminating...")
self.shutdown_flag = True
sys.exit(0)
def find_fast_proxy(self) -> tuple:
saved_proxies, last_update = self.load_saved_proxies()
if saved_proxies:
print("[cyan]Testing saved proxy...[/cyan]")
for proxy_data in saved_proxies:
result = self.test_proxy((proxy_data['proxy'], 'cached'))
if result[1]:
return proxy_data['proxy'], result[3], result[2]
else:
print(f"[red]Saved proxy {proxy_data['proxy']} failed - response time: {result[2]:.2f}s[/red]")
proxies = self.fetch_proxies_from_sources()
if not proxies:
print("[bold red]No proxies fetched to test.[/bold red]")
return (None, None, None)
found_proxy = None
response_text = None
source = None
failed_count = 0
success_count = 0
#print(f"[cyan]Testing {len(proxies)} proxies...[/cyan]")
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
futures = {executor.submit(self.test_proxy, p): p for p in proxies}
with Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
BarColumn(),
TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
TextColumn("[cyan]{task.fields[success]}[/cyan]/[red]{task.fields[failed]}[/red]"),
TimeRemainingColumn(),
) as progress:
task = progress.add_task(
"[cyan]Testing Proxies",
total=len(futures),
success=success_count,
failed=failed_count
)
for future in as_completed(futures):
if self.shutdown_flag:
break
try:
proxy, success, elapsed, response, proxy_source = future.result()
if success:
success_count += 1
print(f"[bold green]Found valid proxy:[/bold green] {proxy} ({elapsed:.2f}s)")
found_proxy = proxy
response_text = response
self.save_working_proxy(proxy, elapsed)
self.shutdown_flag = True
break
else:
failed_count += 1
except Exception:
failed_count += 1
progress.update(task, advance=1, success=success_count, failed=failed_count)
if not found_proxy:
print("[bold red]No working proxies found[/bold red]")
return (found_proxy, response_text, source)
def find_fast_proxy(self) -> str:
try:
proxy_config = config_manager.get("REQUESTS", "proxy")
if proxy_config and isinstance(proxy_config, dict) and 'http' in proxy_config:
print("[cyan]Using configured proxy from config.json...[/cyan]")
return proxy_config['http']
except Exception as e:
print(f"[red]Error getting configured proxy: {str(e)}[/red]")
return None

View File

@ -0,0 +1,62 @@
{
"DEFAULT": {
"debug": false,
"show_message": true,
"clean_console": true,
"show_trending": true,
"use_api": true,
"not_close": false,
"telegram_bot": true,
"download_site_data": true,
"validate_github_config": true
},
"OUT_FOLDER": {
"root_path": "/mnt/data/media/",
"movie_folder_name": "films",
"serie_folder_name": "serie_tv",
"anime_folder_name": "Anime",
"map_episode_name": "E%(episode)_%(episode_name)",
"add_siteName": false
},
"QBIT_CONFIG": {
"host": "192.168.1.51",
"port": "6666",
"user": "admin",
"pass": "adminadmin"
},
"M3U8_DOWNLOAD": {
"tqdm_delay": 0.01,
"default_video_workser": 12,
"default_audio_workser": 12,
"segment_timeout": 8,
"download_audio": true,
"merge_audio": true,
"specific_list_audio": [
"ita"
],
"download_subtitle": true,
"merge_subs": true,
"specific_list_subtitles": [
"ita",
"eng"
],
"cleanup_tmp_folder": true
},
"M3U8_CONVERSION": {
"use_codec": false,
"use_vcodec": true,
"use_acodec": true,
"use_bitrate": true,
"use_gpu": false,
"default_preset": "ultrafast"
},
"M3U8_PARSER": {
"force_resolution": "Best",
"get_only_link": false
},
"REQUESTS": {
"verify": false,
"timeout": 20,
"max_retry": 8
}
}

View File

@ -575,6 +575,10 @@ class TelegramBot:
cleaned_output = cleaned_output.replace(
"\n\n", "\n"
) # Rimuovi newline multipli
# Inizializza le variabili
cleaned_output_0 = None # o ""
cleaned_output_1 = None # o ""
# Dentro cleaned_output c'è una stringa recupero quello che si trova tra ## ##
download_section = re.search(r"##(.*?)##", cleaned_output, re.DOTALL)

View File

@ -4,6 +4,7 @@ import os
import sys
import time
import asyncio
import importlib.metadata
# External library
import httpx
@ -11,7 +12,7 @@ from rich.console import Console
# Internal utilities
from .version import __version__, __author__, __title__
from .version import __version__ as source_code_version, __author__, __title__
from StreamingCommunity.Util.config_json import config_manager
from StreamingCommunity.Util.headers import get_userAgent
@ -75,7 +76,11 @@ def update():
percentual_stars = 0
# Get the current version (installed version)
current_version = __version__
try:
current_version = importlib.metadata.version(__title__)
except importlib.metadata.PackageNotFoundError:
#console.print(f"[yellow]Warning: Could not determine installed version for '{__title__}' via importlib.metadata. Falling back to source version.[/yellow]")
current_version = source_code_version
# Get commit details
latest_commit = response_commits[0] if response_commits else None

View File

@ -1,5 +1,5 @@
__title__ = 'StreamingCommunity'
__version__ = '3.0.5'
__version__ = '3.0.9'
__author__ = 'Arrowar'
__description__ = 'A command-line program to download film'
__copyright__ = 'Copyright 2024'
__copyright__ = 'Copyright 2025'

View File

@ -36,8 +36,10 @@ class ConfigManager:
base_path = os.path.dirname(sys.executable)
else:
# Use the current directory where the script is executed
base_path = os.getcwd()
# Get the actual path of the module file
current_file_path = os.path.abspath(__file__)
base_path = os.path.dirname(os.path.dirname(os.path.dirname(current_file_path)))
# Initialize file paths
self.file_path = os.path.join(base_path, file_name)
@ -266,33 +268,32 @@ class ConfigManager:
self._load_site_data_from_file()
def _load_site_data_from_api(self) -> None:
"""Load site data from API."""
"""Load site data from GitHub."""
domains_github_url = "https://raw.githubusercontent.com/Arrowar/StreamingCommunity/refs/heads/main/.github/.domain/domains.json"
headers = {
"apikey": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6Inp2Zm5ncG94d3Jnc3duenl0YWRoIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NDAxNTIxNjMsImV4cCI6MjA1NTcyODE2M30.FNTCCMwi0QaKjOu8gtZsT5yQttUW8QiDDGXmzkn89QE",
"Authorization": f"Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6Inp2Zm5ncG94d3Jnc3duenl0YWRoIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NDAxNTIxNjMsImV4cCI6MjA1NTcyODE2M30.FNTCCMwi0QaKjOu8gtZsT5yQttUW8QiDDGXmzkn89QE",
"Content-Type": "application/json",
"User-Agent": get_userAgent()
"User-Agent": get_userAgent()
}
try:
console.print("[bold cyan]Retrieving site data from API...[/bold cyan]")
response = requests.get("https://zvfngpoxwrgswnzytadh.supabase.co/rest/v1/public", timeout=8, headers=headers)
console.print(f"[bold cyan]Retrieving site data from GitHub:[/bold cyan] [green]{domains_github_url}[/green]")
response = requests.get(domains_github_url, timeout=8, headers=headers)
if response.ok:
data = response.json()
if data and len(data) > 0:
self.configSite = data[0]['data']
site_count = len(self.configSite) if isinstance(self.configSite, dict) else 0
else:
console.print("[bold yellow]API returned an empty data set[/bold yellow]")
self.configSite = response.json()
site_count = len(self.configSite) if isinstance(self.configSite, dict) else 0
console.print(f"[bold green]Site data loaded from GitHub:[/bold green] {site_count} streaming services found.")
else:
console.print(f"[bold red]API request failed:[/bold red] HTTP {response.status_code}, {response.text[:100]}")
console.print(f"[bold red]GitHub request failed:[/bold red] HTTP {response.status_code}, {response.text[:100]}")
self._handle_site_data_fallback()
except json.JSONDecodeError as e:
console.print(f"[bold red]Error parsing JSON from GitHub:[/bold red] {str(e)}")
self._handle_site_data_fallback()
except Exception as e:
console.print(f"[bold red]API connection error:[/bold red] {str(e)}")
console.print(f"[bold red]GitHub connection error:[/bold red] {str(e)}")
self._handle_site_data_fallback()
def _load_site_data_from_file(self) -> None:
@ -557,7 +558,6 @@ class ConfigManager:
return section in config_source
# Helper function to check the platform
def get_use_large_bar():
"""
Determine if the large bar feature should be enabled.

View File

@ -12,7 +12,7 @@ import inspect
import subprocess
import contextlib
import importlib.metadata
import socket
# External library
from unidecode import unidecode
@ -283,43 +283,61 @@ class InternManager():
else:
return f"{bytes / (1024 * 1024):.2f} MB/s"
def check_dns_provider(self):
# def check_dns_provider(self):
# """
# Check if the system's current DNS server matches any known DNS providers.
# Returns:
# bool: True if the current DNS server matches a known provider,
# False if no match is found or in case of errors
# """
# dns_providers = {
# "Cloudflare": ["1.1.1.1", "1.0.0.1"],
# "Google": ["8.8.8.8", "8.8.4.4"],
# "OpenDNS": ["208.67.222.222", "208.67.220.220"],
# "Quad9": ["9.9.9.9", "149.112.112.112"],
# "AdGuard": ["94.140.14.14", "94.140.15.15"],
# "Comodo": ["8.26.56.26", "8.20.247.20"],
# "Level3": ["209.244.0.3", "209.244.0.4"],
# "Norton": ["199.85.126.10", "199.85.127.10"],
# "CleanBrowsing": ["185.228.168.9", "185.228.169.9"],
# "Yandex": ["77.88.8.8", "77.88.8.1"]
# }
# try:
# resolver = dns.resolver.Resolver()
# nameservers = resolver.nameservers
# if not nameservers:
# return False
# for server in nameservers:
# for provider, ips in dns_providers.items():
# if server in ips:
# return True
# return False
# except Exception:
# return False
def check_dns_resolve(self):
"""
Check if the system's current DNS server matches any known DNS providers.
Check if the system's current DNS server can resolve a domain name.
Works on both Windows and Unix-like systems.
Returns:
bool: True if the current DNS server matches a known provider,
False if no match is found or in case of errors
bool: True if the current DNS server can resolve a domain name,
False if can't resolve or in case of errors
"""
dns_providers = {
"Cloudflare": ["1.1.1.1", "1.0.0.1"],
"Google": ["8.8.8.8", "8.8.4.4"],
"OpenDNS": ["208.67.222.222", "208.67.220.220"],
"Quad9": ["9.9.9.9", "149.112.112.112"],
"AdGuard": ["94.140.14.14", "94.140.15.15"],
"Comodo": ["8.26.56.26", "8.20.247.20"],
"Level3": ["209.244.0.3", "209.244.0.4"],
"Norton": ["199.85.126.10", "199.85.127.10"],
"CleanBrowsing": ["185.228.168.9", "185.228.169.9"],
"Yandex": ["77.88.8.8", "77.88.8.1"]
}
test_domains = ["github.com", "google.com", "microsoft.com", "amazon.com"]
try:
resolver = dns.resolver.Resolver()
nameservers = resolver.nameservers
if not nameservers:
return False
for server in nameservers:
for provider, ips in dns_providers.items():
if server in ips:
return True
for domain in test_domains:
# socket.gethostbyname() works consistently across all platforms
socket.gethostbyname(domain)
return True
except (socket.gaierror, socket.error):
return False
except Exception:
return False
class OsSummary:
def __init__(self):

View File

@ -193,6 +193,13 @@ def force_exit():
def main(script_id = 0):
color_map = {
"anime": "red",
"film_&_serie": "yellow",
"serie": "blue",
"torrent": "white"
}
if TELEGRAM_BOT:
bot = get_bot_instance()
bot.send_message(f"Avviato script {script_id}", None)
@ -203,7 +210,19 @@ def main(script_id = 0):
log_not = Logger()
initialize()
if not internet_manager.check_dns_provider():
# if not internet_manager.check_dns_provider():
# print()
# console.print("[red]❌ ERROR: DNS configuration is required!")
# console.print("[red]The program cannot function correctly without proper DNS settings.")
# console.print("[yellow]Please configure one of these DNS servers:")
# console.print("[blue]• Cloudflare (1.1.1.1) 'https://developers.cloudflare.com/1.1.1.1/setup/windows/'")
# console.print("[blue]• Quad9 (9.9.9.9) 'https://docs.quad9.net/Setup_Guides/Windows/Windows_10/'")
# console.print("\n[yellow]⚠️ The program will not work until you configure your DNS settings.")
# time.sleep(2)
# msg.ask("[yellow]Press Enter to continue ...")
if not internet_manager.check_dns_resolve():
print()
console.print("[red]❌ ERROR: DNS configuration is required!")
console.print("[red]The program cannot function correctly without proper DNS settings.")
@ -212,8 +231,7 @@ def main(script_id = 0):
console.print("[blue]• Quad9 (9.9.9.9) 'https://docs.quad9.net/Setup_Guides/Windows/Windows_10/'")
console.print("\n[yellow]⚠️ The program will not work until you configure your DNS settings.")
time.sleep(1)
msg.ask("[yellow]Press Enter to exit...")
os._exit(0)
# Load search functions
search_functions = load_search_functions()
@ -256,18 +274,6 @@ def main(script_id = 0):
)
# Add arguments for search functions
color_map = {
"anime": "red",
"film_serie": "yellow",
"film": "blue",
"serie": "green",
"other": "white"
}
# Add numeric arguments for each search module
for idx, (alias, (_, use_for)) in enumerate(search_functions.items()):
parser.add_argument(f'--{idx}', action='store_true', help=f'Search using {alias.split("_")[0]} ({use_for})')
parser.add_argument('-s', '--search', default=None, help='Search terms')
# Parse command-line arguments
@ -302,44 +308,41 @@ def main(script_id = 0):
global_search(search_terms)
return
# Check for numeric arguments
search_functions_list = list(search_functions.items())
for i in range(len(search_functions_list)):
if getattr(args, str(i)):
alias, (func, _) = search_functions_list[i]
run_function(func, search_terms=search_terms)
return
# Create mappings using module indice
input_to_function = {}
choice_labels = {}
for alias, (func, use_for) in search_functions.items():
module_name = alias.split("_")[0]
try:
mod = importlib.import_module(f'StreamingCommunity.Api.Site.{module_name}')
site_index = str(getattr(mod, 'indice'))
input_to_function[site_index] = func
choice_labels[site_index] = (module_name.capitalize(), use_for.lower())
except Exception as e:
console.print(f"[red]Error mapping module {module_name}: {str(e)}")
# Mapping user input to functions
input_to_function = {str(i): func for i, (alias, (func, _)) in enumerate(search_functions.items())}
# Create dynamic prompt message and choices
choice_labels = {str(i): (alias.split("_")[0].capitalize(), use_for) for i, (alias, (_, use_for)) in enumerate(search_functions.items())}
# Display the category legend in a single line
# Display the category legend
legend_text = " | ".join([f"[{color}]{category.capitalize()}[/{color}]" for category, color in color_map.items()])
console.print(f"\n[bold green]Category Legend:[/bold green] {legend_text}")
# Construct the prompt message with color-coded site names and aliases
# Construct prompt with proper color mapping
prompt_message = "[green]Insert category [white](" + ", ".join(
[f"{key}: [{color_map.get(label[1], 'white')}]{label[0]}"
[f"[{color_map.get(label[1], 'white')}]{key}: {label[0]}[/{color_map.get(label[1], 'white')}]"
for key, label in choice_labels.items()]
) + "[white])"
if TELEGRAM_BOT:
# Display the category legend in a single line
category_legend_str = "Categorie: \n" + " | ".join([
f"{category.capitalize()}" for category in color_map.keys()
])
# Build message with aliases
prompt_message = "Inserisci il sito:\n" + "\n".join(
[f"{key}: {label[0]}" for key, label in choice_labels.items()]
)
console.print(f"\n{prompt_message}")
# Chiedi la scelta all'utente con il bot Telegram
category = bot.ask(
"select_provider",
f"{category_legend_str}\n\n{prompt_message}",
@ -351,13 +354,6 @@ def main(script_id = 0):
# Run the corresponding function based on user input
if category in input_to_function:
"""if category == global_search_key:
# Run global search
run_function(input_to_function[category], search_terms=search_terms)
else:"""
# Run normal site-specific search
run_function(input_to_function[category], search_terms=search_terms)
else:

View File

@ -57,6 +57,7 @@
"REQUESTS": {
"verify": false,
"timeout": 20,
"max_retry": 8
"max_retry": 8,
"proxy": ""
}
}

View File

@ -6,6 +6,7 @@ m3u8
certifi
psutil
unidecode
curl_cffi
dnspython
jsbeautifier
pathvalidate
@ -13,3 +14,4 @@ pycryptodomex
ua-generator
qbittorrent-api
pyTelegramBotAPI
beautifulsoup4

View File

@ -1,4 +1,5 @@
import os
import re
from setuptools import setup, find_packages
def read_readme():
@ -8,9 +9,21 @@ def read_readme():
with open(os.path.join(os.path.dirname(__file__), "requirements.txt"), "r", encoding="utf-8-sig") as f:
required_packages = f.read().splitlines()
def get_version():
try:
import pkg_resources
return pkg_resources.get_distribution('StreamingCommunity').version
except:
version_file_path = os.path.join(os.path.dirname(__file__), "StreamingCommunity", "Upload", "version.py")
with open(version_file_path, "r", encoding="utf-8") as f:
version_match = re.search(r"^__version__\s*=\s*['\"]([^'\"]*)['\"]", f.read(), re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string in StreamingCommunity/Upload/version.py.")
setup(
name="StreamingCommunity",
version="3.0.5",
version=get_version(),
long_description=read_readme(),
long_description_content_type="text/markdown",
author="Lovi-0",
@ -29,4 +42,4 @@ setup(
"Bug Reports": "https://github.com/Lovi-0/StreamingCommunity/issues",
"Source": "https://github.com/Lovi-0/StreamingCommunity",
}
)
)