Compare commits

...

27 Commits
null ... main

Author SHA1 Message Date
None
f4529e5f05
Update schedule 2025-06-03 17:30:27 +02:00
github-actions[bot]
dcfd22bc2b Automatic domain update [skip ci] 2025-06-03 15:27:02 +00:00
Lovi
3cbabfb98b core: Fix requirements 2025-06-02 18:14:36 +02:00
None
6efeb96201
Update update_domain.yml 2025-06-02 12:58:38 +02:00
Lovi
d0207b3669 Fix wrong version pip 2025-06-02 11:08:46 +02:00
Lovi
6713de4ecc Bump v3.0.9 2025-06-01 16:31:24 +02:00
github-actions[bot]
b8e28a30c0 Automatic domain update [skip ci] 2025-06-01 01:02:20 +00:00
Alessandro Perazzetta
a45fd0d37e
Dns check (#332)
* refactor: streamline proxy checking in search function

* refactor: update DNS check method, try a real dns resolution instead of checking dns provider

* refactor: enhance DNS resolution check to support multiple domains across platforms

* refactor: replace os.socket with socket for DNS resolution consistency

---------

Co-authored-by: None <62809003+Arrowar@users.noreply.github.com>
2025-05-31 20:07:30 +02:00
github-actions[bot]
4b40b8ce22 Automatic domain update [skip ci] 2025-05-31 12:17:33 +00:00
Alessandro Perazzetta
73cc2662b8
Dns check refactor (#328)
* refactor: streamline proxy checking in search function

* refactor: update DNS check method, try a real dns resolution instead of checking dns provider

* refactor: enhance DNS resolution check to support multiple domains across platforms

* refactor: replace os.socket with socket for DNS resolution consistency

---------

Co-authored-by: None <62809003+Arrowar@users.noreply.github.com>
2025-05-31 11:30:59 +02:00
Lovi
1776538c6c github: Update domains 2025-05-31 11:28:38 +02:00
None
884bcf656c
Create update_domain.yml 2025-05-31 10:59:11 +02:00
Lovi
71e97c2c65 Site: Update endpoint 2025-05-31 10:58:12 +02:00
Lovi
ded66f446e Remove database of domain 2025-05-31 10:52:16 +02:00
Lovi
86c7293779 Bump v3.0.8 2025-05-25 16:59:29 +02:00
Lovi
ef6c8c9cb3 api: Fix tipo raiplay 2025-05-25 15:37:53 +02:00
Alessandro Perazzetta
c01945fdbc
refactor: streamline proxy checking in search function (#326) 2025-05-22 08:36:44 +02:00
Lovi
4f0c58f14d api: fix actual_search_query 2025-05-18 16:31:15 +02:00
Lovi
b3db6aa8c1 Bump v3.0.7 2025-05-18 14:36:55 +02:00
None
1c89398054
Fix telegram and proxy (#322)
* Add ENABLE_VIDEO

* Fix proxy

* Add error proxy

* Update config.json

* Fix telegram_bot (#312)

* Update config.json

* Fix telegram_bot

* fix bug

* Fix StreamingCommunity site

* Delete console.log

* fix doppio string_to_search

* Update __init__.py

* Update site.py

* Update config.json

* Update site.py

* Update config.json

* Update __init__.py

* Update __init__.py

* Fix proxy (#319)

* Add ENABLE_VIDEO

* Fix proxy

* Add error proxy

* Update config.json

* Refactor user input handling and improve messaging in __init__.py

---------

Co-authored-by: None <62809003+Arrowar@users.noreply.github.com>
Co-authored-by: l1n00 <>

* Fix proxy __init__

* Update os.py

---------

Co-authored-by: l1n00 <delmolinonicola@gmail.com>
2025-05-18 14:16:44 +02:00
None
dfcc29078f
Fix proxy (#319)
* Add ENABLE_VIDEO

* Fix proxy

* Add error proxy

* Update config.json
2025-05-17 09:54:41 +02:00
None
c0f3d8619b Bump v3.0.6 2025-05-14 09:36:08 +02:00
None
8e323e83f9
Dev (#318)
* Fix telegram bot (issues #305 bug) (#316)

* fix create config.json

* fix messagge telegram_bot option 0 (Streamingcommunity)

* Update README.md

* Update domain

---------

Co-authored-by: GiuPic <47813665+GiuPic@users.noreply.github.com>
2025-05-14 09:34:30 +02:00
None
e75d8185f9 Site: Fix color map 2025-05-13 12:33:51 +02:00
None
a071d0d2c4 Bump v3.0.5 2025-05-13 11:37:56 +02:00
None
bfed63bd41 Site: add _deprecate 2025-05-13 11:04:42 +02:00
None
fab21e572c Fix cert path 2025-05-12 17:12:37 +02:00
55 changed files with 2581 additions and 1839 deletions

360
.github/.domain/domain_update.py vendored Normal file
View File

@ -0,0 +1,360 @@
# 20.04.2024
import os
import json
from datetime import datetime
from urllib.parse import urlparse, unquote
# External libraries
import httpx
import tldextract
import ua_generator
import dns.resolver
# Variables
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
JSON_FILE_PATH = os.path.join(SCRIPT_DIR, "domains.json")
ua = ua_generator.generate(device='desktop', browser=('chrome', 'edge'))
def get_headers():
return ua.headers.get()
def get_tld(url_str):
try:
parsed = urlparse(unquote(url_str))
domain = parsed.netloc.lower().lstrip('www.')
parts = domain.split('.')
return parts[-1] if len(parts) >= 2 else None
except Exception:
return None
def get_base_domain(url_str):
try:
parsed = urlparse(url_str)
domain = parsed.netloc.lower().lstrip('www.')
parts = domain.split('.')
return '.'.join(parts[:-1]) if len(parts) > 2 else parts[0]
except Exception:
return None
def get_base_url(url_str):
try:
parsed = urlparse(url_str)
return f"{parsed.scheme}://{parsed.netloc}"
except Exception:
return None
def log(msg, level='INFO'):
levels = {
'INFO': '[ ]',
'SUCCESS': '[+]',
'WARNING': '[!]',
'ERROR': '[-]'
}
entry = f"{levels.get(level, '[?]')} {msg}"
print(entry)
def load_json_data(file_path):
if not os.path.exists(file_path):
log(f"Error: The file {file_path} was not found.", "ERROR")
return None
try:
with open(file_path, 'r', encoding='utf-8') as f:
return json.load(f)
except Exception as e:
log(f"Error reading the file {file_path}: {e}", "ERROR")
return None
def save_json_data(file_path, data):
try:
with open(file_path, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2, ensure_ascii=False)
log(f"Data successfully saved to {file_path}", "SUCCESS")
except Exception as e:
log(f"Error saving the file {file_path}: {e}", "ERROR")
def parse_url(url):
if not url.startswith(('http://', 'https://')):
url = 'https://' + url
try:
extracted = tldextract.extract(url)
parsed = urlparse(url)
clean_url = f"{parsed.scheme}://{parsed.netloc}/"
full_domain = f"{extracted.domain}.{extracted.suffix}" if extracted.domain else extracted.suffix
domain_tld = extracted.suffix
result = {
'url': clean_url,
'full_domain': full_domain,
'domain': domain_tld,
'suffix': extracted.suffix,
'subdomain': extracted.subdomain or None
}
return result
except Exception as e:
log(f"Error parsing URL: {e}", "ERROR")
return None
def check_dns_resolution(domain):
try:
resolver = dns.resolver.Resolver()
resolver.timeout = 2
resolver.lifetime = 2
try:
answers = resolver.resolve(domain, 'A')
return str(answers[0])
except:
try:
answers = resolver.resolve(domain, 'AAAA')
return str(answers[0])
except:
pass
return None
except:
return None
def find_new_domain(input_url, output_file=None, verbose=True, json_output=False):
log_buffer = []
original_info = parse_url(input_url)
if not original_info:
log(f"Could not parse original URL: {input_url}", "ERROR")
if json_output:
return {'full_url': input_url, 'domain': None}
return None
log(f"Starting analysis for: {original_info['full_domain']}")
orig_ip = check_dns_resolution(original_info['full_domain'])
if orig_ip:
log(f"Original domain resolves to: {orig_ip}", "SUCCESS")
else:
log(f"Original domain does not resolve to an IP address", "WARNING")
headers = get_headers()
new_domains = []
redirects = []
final_url = None
final_domain_info = None
url_to_test_in_loop = None
for protocol in ['https://', 'http://']:
try:
url_to_test_in_loop = f"{protocol}{original_info['full_domain']}"
log(f"Testing connectivity to {url_to_test_in_loop}")
redirect_chain = []
current_url = url_to_test_in_loop
max_redirects = 10
redirect_count = 0
while redirect_count < max_redirects:
with httpx.Client(verify=False, follow_redirects=False, timeout=5) as client:
response = client.get(current_url, headers=headers)
redirect_info = {'url': current_url, 'status_code': response.status_code}
redirect_chain.append(redirect_info)
log(f"Request to {current_url} - Status: {response.status_code}")
if response.status_code in (301, 302, 303, 307, 308):
if 'location' in response.headers:
next_url = response.headers['location']
if next_url.startswith('/'):
parsed_current = urlparse(current_url)
next_url = f"{parsed_current.scheme}://{parsed_current.netloc}{next_url}"
log(f"Redirect found: {next_url} (Status: {response.status_code})")
current_url = next_url
redirect_count += 1
redirect_domain_info_val = parse_url(next_url)
if redirect_domain_info_val and redirect_domain_info_val['full_domain'] != original_info['full_domain']:
new_domains.append({'domain': redirect_domain_info_val['full_domain'], 'url': next_url, 'source': 'redirect'})
else:
log(f"Redirect status code but no Location header", "WARNING")
break
else:
break
if redirect_chain:
final_url = redirect_chain[-1]['url']
final_domain_info = parse_url(final_url)
redirects.extend(redirect_chain)
log(f"Final URL after redirects: {final_url}", "SUCCESS")
if final_domain_info and final_domain_info['full_domain'] != original_info['full_domain']:
new_domains.append({'domain': final_domain_info['full_domain'], 'url': final_url, 'source': 'final_url'})
final_status = redirect_chain[-1]['status_code'] if redirect_chain else None
if final_status and final_status < 400 and final_status != 403:
break
if final_status == 403 and redirect_chain and len(redirect_chain) > 1:
log(f"Got 403 Forbidden, but captured {len(redirect_chain)-1} redirects before that", "SUCCESS")
break
except httpx.RequestError as e:
log(f"Error connecting to {protocol}{original_info['full_domain']}: {str(e)}", "ERROR")
url_for_auto_redirect = input_url
if url_to_test_in_loop:
url_for_auto_redirect = url_to_test_in_loop
elif original_info and original_info.get('url'):
url_for_auto_redirect = original_info['url']
if not redirects or not new_domains:
log("Trying alternate method with automatic redirect following")
try:
with httpx.Client(verify=False, follow_redirects=True, timeout=5) as client:
response_auto = client.get(url_for_auto_redirect, headers=headers)
log(f"Connected with auto-redirects: Status {response_auto.status_code}")
if response_auto.history:
log(f"Found {len(response_auto.history)} redirects with auto-following", "SUCCESS")
for r_hist in response_auto.history:
redirect_info_auto = {'url': str(r_hist.url), 'status_code': r_hist.status_code}
redirects.append(redirect_info_auto)
log(f"Auto-redirect: {r_hist.url} (Status: {r_hist.status_code})")
final_url = str(response_auto.url)
final_domain_info = parse_url(final_url)
for redirect_hist_item in response_auto.history:
redirect_domain_val = parse_url(str(redirect_hist_item.url))
if redirect_domain_val and original_info and redirect_domain_val['full_domain'] != original_info['full_domain']:
new_domains.append({'domain': redirect_domain_val['full_domain'], 'url': str(redirect_hist_item.url), 'source': 'auto-redirect'})
current_final_url_info = parse_url(str(response_auto.url))
if current_final_url_info and original_info and current_final_url_info['full_domain'] != original_info['full_domain']:
is_already_added = any(d['domain'] == current_final_url_info['full_domain'] and d['source'] == 'auto-redirect' for d in new_domains)
if not is_already_added:
new_domains.append({'domain': current_final_url_info['full_domain'], 'url': str(response_auto.url), 'source': 'final_url_auto'})
final_url = str(response_auto.url)
final_domain_info = current_final_url_info
log(f"Final URL from auto-redirect: {final_url}", "SUCCESS")
except httpx.RequestError as e:
log(f"Error with auto-redirect attempt: {str(e)}", "ERROR")
except NameError:
log(f"Error: URL for auto-redirect attempt was not defined.", "ERROR")
unique_domains = []
seen_domains = set()
for domain_info_item in new_domains:
if domain_info_item['domain'] not in seen_domains:
seen_domains.add(domain_info_item['domain'])
unique_domains.append(domain_info_item)
if not final_url:
final_url = input_url
if not final_domain_info:
final_domain_info = original_info
if final_domain_info:
parsed_final_url_info = parse_url(final_url)
if parsed_final_url_info:
final_url = parsed_final_url_info['url']
final_domain_info = parsed_final_url_info
else:
final_domain_info = original_info
final_url = original_info['url'] if original_info else input_url
results_original_domain = original_info['full_domain'] if original_info else None
results_final_domain_tld = final_domain_info['domain'] if final_domain_info and 'domain' in final_domain_info else None
results = {
'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
'original_url': input_url,
'original_domain': results_original_domain,
'original_ip': orig_ip,
'new_domains': unique_domains,
'redirects': redirects,
'log': log_buffer
}
simplified_json_output = {'full_url': final_url, 'domain': results_final_domain_tld}
if verbose:
log(f"DEBUG - Simplified output: {simplified_json_output}", "INFO")
if output_file:
try:
with open(output_file, 'w', encoding='utf-8') as f:
json.dump(results, f, indent=2, ensure_ascii=False)
log(f"Results saved to {output_file}", "SUCCESS")
except Exception as e:
log(f"Error writing to output file: {str(e)}", "ERROR")
if json_output:
return simplified_json_output
else:
return results
def update_site_entry(site_name: str, all_domains_data: dict):
site_config = all_domains_data.get(site_name, {})
log(f"Processing site: {site_name}", "INFO")
if not site_config.get('full_url'):
log(f"Site {site_name} has no full_url in config. Skipping.", "WARNING")
return False
current_full_url = site_config.get('full_url')
current_domain_tld = site_config.get('domain')
found_domain_info = find_new_domain(current_full_url, verbose=False, json_output=True)
if found_domain_info and found_domain_info.get('full_url') and found_domain_info.get('domain'):
new_full_url = found_domain_info['full_url']
new_domain_tld = found_domain_info['domain']
if new_full_url != current_full_url or new_domain_tld != current_domain_tld:
log(f"Update found for {site_name}: URL '{current_full_url}' -> '{new_full_url}', TLD '{current_domain_tld}' -> '{new_domain_tld}'", "SUCCESS")
updated_entry = site_config.copy()
updated_entry['full_url'] = new_full_url
updated_entry['domain'] = new_domain_tld
if new_domain_tld != current_domain_tld :
updated_entry['old_domain'] = current_domain_tld if current_domain_tld else ""
updated_entry['time_change'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
all_domains_data[site_name] = updated_entry
return True
else:
log(f"No changes detected for {site_name}.", "INFO")
return False
else:
log(f"Could not reliably find new domain info for {site_name} from URL: {current_full_url}. No search fallback.", "WARNING")
return False
def main():
log("Starting domain update script...")
all_domains_data = load_json_data(JSON_FILE_PATH)
if not all_domains_data:
log("Cannot proceed: Domain data is missing or could not be loaded.", "ERROR")
log("Script finished.")
return
any_updates_made = False
for site_name_key in list(all_domains_data.keys()):
if update_site_entry(site_name_key, all_domains_data):
any_updates_made = True
print("\n")
if any_updates_made:
save_json_data(JSON_FILE_PATH, all_domains_data)
log("Update complete. Some entries were modified.", "SUCCESS")
else:
log("Update complete. No domains were modified.", "INFO")
log("Script finished.")
if __name__ == "__main__":
main()

62
.github/.domain/domains.json vendored Normal file
View File

@ -0,0 +1,62 @@
{
"1337xx": {
"domain": "to",
"full_url": "https://www.1337xx.to/",
"old_domain": "to",
"time_change": "2025-03-19 12:20:19"
},
"cb01new": {
"domain": "life",
"full_url": "https://cb01net.life/",
"old_domain": "download",
"time_change": "2025-06-01 01:02:16"
},
"animeunity": {
"domain": "so",
"full_url": "https://www.animeunity.so/",
"old_domain": "so",
"time_change": "2025-03-19 12:20:23"
},
"animeworld": {
"domain": "ac",
"full_url": "https://www.animeworld.ac/",
"old_domain": "ac",
"time_change": "2025-03-21 12:20:27"
},
"guardaserie": {
"domain": "meme",
"full_url": "https://guardaserie.meme/",
"old_domain": "meme",
"time_change": "2025-03-19 12:20:24"
},
"ddlstreamitaly": {
"domain": "co",
"full_url": "https://ddlstreamitaly.co/",
"old_domain": "co",
"time_change": "2025-03-19 12:20:26"
},
"streamingwatch": {
"domain": "org",
"full_url": "https://www.streamingwatch.org/",
"old_domain": "org",
"time_change": "2025-04-29 12:30:30"
},
"altadefinizione": {
"domain": "spa",
"full_url": "https://altadefinizione.spa/",
"old_domain": "locker",
"time_change": "2025-05-26 23:22:45"
},
"streamingcommunity": {
"domain": "bid",
"full_url": "https://streamingunity.bid/",
"old_domain": "bio",
"time_change": "2025-06-03 15:27:02"
},
"altadefinizionegratis": {
"domain": "cc",
"full_url": "https://altadefinizionegratis.cc/",
"old_domain": "icu",
"time_change": "2025-06-02 10:35:25"
}
}

560
.github/.site/css/style.css vendored Normal file
View File

@ -0,0 +1,560 @@
@import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap');
:root {
--primary-color: #8c52ff;
--secondary-color: #6930c3;
--accent-color: #00e5ff;
--background-color: #121212;
--card-background: #1e1e1e;
--text-color: #f8f9fa;
--shadow-color: rgba(0, 0, 0, 0.25);
--card-hover: #2a2a2a;
--border-color: #333333;
}
[data-theme="light"] {
--background-color: #ffffff;
--card-background: #f8f9fa;
--text-color: #212529;
--shadow-color: rgba(0, 0, 0, 0.1);
--card-hover: #e9ecef;
--border-color: #dee2e6;
}
* {
margin: 0;
padding: 0;
box-sizing: border-box;
transition: all 0.2s ease;
}
body {
font-family: 'Inter', 'Segoe UI', sans-serif;
background-color: var(--background-color);
color: var(--text-color);
line-height: 1.6;
min-height: 100vh;
display: flex;
flex-direction: column;
}
.container {
max-width: 1400px;
margin: 0 auto;
padding: 20px;
flex: 1;
}
.header-container {
display: flex;
justify-content: space-between;
align-items: center;
padding: 15px 20px;
background: var(--card-background);
border-radius: 12px;
border: 1px solid var(--border-color);
margin-bottom: 20px;
}
.sites-stats {
display: flex;
gap: 20px;
align-items: center;
}
.total-sites, .last-update-global {
display: flex;
align-items: center;
gap: 8px;
color: var(--text-color);
font-size: 0.95rem;
background: var(--background-color);
padding: 8px 16px;
border-radius: 8px;
border: 1px solid var(--border-color);
transition: all 0.3s ease;
}
.total-sites:hover, .last-update-global:hover {
border-color: var(--primary-color);
transform: translateY(-2px);
}
.total-sites i, .last-update-global i {
color: var(--primary-color);
font-size: 1.1rem;
}
.site-grid {
display: grid;
grid-template-columns: repeat(auto-fill, minmax(300px, 1fr));
gap: 24px;
padding: 2rem 0;
}
.site-item {
min-height: 220px;
background-color: var(--card-background);
border-radius: 16px;
padding: 30px;
box-shadow: 0 6px 20px var(--shadow-color);
transition: all 0.3s ease;
display: flex;
flex-direction: column;
align-items: center;
border: 1px solid var(--border-color);
position: relative;
overflow: hidden;
cursor: pointer;
}
.site-item::before {
content: '';
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 4px;
background: linear-gradient(90deg, var(--primary-color), var(--accent-color));
transition: height 0.3s ease;
}
.site-item:hover {
transform: translateY(-5px);
box-shadow: 0 12px 30px var(--shadow-color);
border-color: var(--primary-color);
}
.site-item:hover::before {
height: 6px;
}
.site-item img {
width: 80px;
height: 80px;
margin-bottom: 1.5rem;
border-radius: 16px;
object-fit: cover;
border: 2px solid var(--border-color);
transition: transform 0.3s ease;
}
.site-item:hover img {
transform: scale(1.05);
}
.site-item h3 {
font-size: 1.4rem;
font-weight: 600;
margin-bottom: 0.5rem;
color: var(--primary-color);
text-align: center;
transition: color 0.3s ease;
}
.site-item:hover h3 {
color: var(--accent-color);
}
.site-info {
display: flex;
flex-direction: column;
align-items: center;
gap: 8px;
margin-top: 10px;
text-align: center;
font-size: 0.85rem;
color: var(--text-color);
opacity: 0.8;
}
.last-update, .old-domain {
display: flex;
align-items: center;
gap: 6px;
}
.last-update i, .old-domain i {
color: var(--primary-color);
}
.site-item:hover .site-info {
opacity: 1;
}
.site-status {
position: absolute;
top: 10px;
right: 10px;
width: 12px;
height: 12px;
border-radius: 50%;
background: #4CAF50;
}
.site-status.offline {
background: #f44336;
}
.status-indicator {
position: fixed;
top: 20px;
right: 20px;
background: var(--card-background);
border: 1px solid var(--border-color);
border-radius: 12px;
padding: 15px 20px;
box-shadow: 0 4px 20px var(--shadow-color);
z-index: 1001;
min-width: 280px;
max-width: 400px;
transition: all 0.3s ease;
}
.status-indicator.hidden {
opacity: 0;
transform: translateY(-20px);
pointer-events: none;
}
.status-header {
display: flex;
align-items: center;
gap: 10px;
margin-bottom: 15px;
font-weight: 600;
color: var(--primary-color);
}
.status-icon {
width: 20px;
height: 20px;
border: 2px solid var(--primary-color);
border-radius: 50%;
border-top-color: transparent;
animation: spin 1s linear infinite;
}
.status-icon.ready {
border: none;
background: #4CAF50;
animation: none;
position: relative;
}
.status-icon.ready::after {
content: '✓';
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
color: white;
font-size: 12px;
font-weight: bold;
}
@keyframes spin {
0% { transform: rotate(0deg); }
100% { transform: rotate(360deg); }
}
.status-text {
color: var(--text-color);
font-size: 0.9rem;
margin-bottom: 10px;
}
.checking-sites {
max-height: 200px;
overflow-y: auto;
background: var(--background-color);
border-radius: 8px;
padding: 10px;
border: 1px solid var(--border-color);
}
.checking-site {
display: flex;
align-items: center;
justify-content: between;
gap: 10px;
padding: 6px 8px;
margin-bottom: 4px;
border-radius: 6px;
background: var(--card-background);
font-size: 0.8rem;
color: var(--text-color);
transition: all 0.2s ease;
}
.checking-site.completed {
opacity: 0.6;
background: var(--card-hover);
}
.checking-site.online {
border-left: 3px solid #4CAF50;
}
.checking-site.offline {
border-left: 3px solid #f44336;
}
.checking-site .site-name {
flex: 1;
font-weight: 500;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
.checking-site .site-status-icon {
width: 12px;
height: 12px;
border-radius: 50%;
flex-shrink: 0;
}
.checking-site .site-status-icon.checking {
background: var(--primary-color);
animation: pulse 1s infinite;
}
.checking-site .site-status-icon.online {
background: #4CAF50;
}
.checking-site .site-status-icon.offline {
background: #f44336;
}
@keyframes pulse {
0%, 100% { opacity: 1; }
50% { opacity: 0.5; }
}
.progress-bar {
width: 100%;
height: 6px;
background: var(--background-color);
border-radius: 3px;
overflow: hidden;
margin-top: 10px;
}
.progress-fill {
height: 100%;
background: linear-gradient(90deg, var(--primary-color), var(--accent-color));
width: 0%;
transition: width 0.3s ease;
border-radius: 3px;
}
.loader {
width: 48px;
height: 48px;
border: 3px solid var(--primary-color);
border-bottom-color: transparent;
border-radius: 50%;
display: inline-block;
position: relative;
box-sizing: border-box;
animation: rotation 1s linear infinite;
}
.loader::after {
content: '';
position: absolute;
box-sizing: border-box;
left: 0;
top: 0;
width: 48px;
height: 48px;
border-radius: 50%;
border: 3px solid transparent;
border-bottom-color: var(--accent-color);
animation: rotationBack 0.5s linear infinite;
transform: rotate(45deg);
}
@keyframes rotation {
0% { transform: rotate(0deg) }
100% { transform: rotate(360deg) }
}
@keyframes rotationBack {
0% { transform: rotate(0deg) }
100% { transform: rotate(-360deg) }
}
footer {
background: var(--card-background);
border-top: 1px solid var(--border-color);
margin-top: auto;
padding: 40px 20px;
position: relative;
}
.footer-content {
max-width: 1200px;
margin: 0 auto;
display: grid;
grid-template-columns: repeat(3, 1fr);
gap: 30px;
position: relative;
padding: 20px;
}
.footer-section {
padding: 20px;
border-radius: 12px;
transition: transform 0.3s ease, background-color 0.3s ease;
background-color: var(--card-background);
border: 1px solid var(--border-color);
}
.footer-section:hover {
transform: translateY(-5px);
background-color: var(--card-hover);
}
.footer-title {
color: var(--accent-color);
font-size: 1.3rem;
margin-bottom: 1.5rem;
padding-bottom: 0.5rem;
position: relative;
letter-spacing: 0.5px;
}
.footer-title::after {
content: '';
position: absolute;
bottom: 0;
left: 0;
width: 60px;
height: 3px;
border-radius: 2px;
background: linear-gradient(90deg, var(--primary-color), var(--accent-color));
}
.footer-links {
list-style: none;
}
.footer-links li {
margin-bottom: 0.8rem;
}
.footer-links a {
color: var(--text-color);
text-decoration: none;
display: flex;
align-items: center;
gap: 8px;
opacity: 0.8;
transition: all 0.3s ease;
padding: 8px 12px;
border-radius: 8px;
background-color: transparent;
}
.footer-links a:hover {
opacity: 1;
color: var(--accent-color);
transform: translateX(8px);
background-color: rgba(140, 82, 255, 0.1);
}
.footer-links i {
width: 20px;
text-align: center;
font-size: 1.2rem;
color: var(--primary-color);
transition: transform 0.3s ease;
}
.footer-links a:hover i {
transform: scale(1.2);
}
.footer-description {
margin-top: 15px;
font-size: 0.9rem;
color: var(--text-color);
opacity: 0.8;
line-height: 1.5;
}
.update-note {
color: var(--accent-color);
font-size: 0.9rem;
opacity: 0.9;
}
/* Responsiveness */
@media (max-width: 768px) {
.site-grid {
grid-template-columns: repeat(auto-fill, minmax(250px, 1fr));
gap: 15px;
padding: 1rem;
}
.site-item {
min-height: 250px;
padding: 20px;
}
.footer-content {
grid-template-columns: 1fr;
gap: 20px;
padding: 15px;
text-align: center;
}
.header-container {
flex-direction: column;
gap: 15px;
}
.sites-stats {
flex-direction: column;
width: 100%;
}
.total-sites, .last-update-global {
width: 100%;
justify-content: center;
}
.footer-title::after {
left: 50%;
transform: translateX(-50%);
}
.footer-links a {
justify-content: center;
}
.footer-links a:hover {
transform: translateY(-5px);
}
.footer-section {
margin-bottom: 20px;
}
}
@media (max-width: 480px) {
.site-grid {
grid-template-columns: 1fr;
}
.site-item {
min-height: 220px;
}
.container {
padding: 10px;
}
}

View File

@ -9,9 +9,19 @@
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0-beta3/css/all.min.css">
</head>
<body>
<main>
<section class="container">
<body> <main>
<section class="container"> <div class="header-container">
<div class="sites-stats">
<span class="total-sites">
<i class="fas fa-globe"></i>
Total Sites: <span id="sites-count">0</span>
</span>
<span class="last-update-global">
<i class="fas fa-clock"></i>
Last Update: <span id="last-update-time">-</span>
</span>
</div>
</div>
<div class="sites-container">
<div id="site-list" class="site-grid">
<div class="loader"></div>
@ -21,8 +31,7 @@
</main>
<footer>
<div class="footer-content">
<div class="footer-section">
<div class="footer-content"> <div class="footer-section">
<h3 class="footer-title">Repository</h3>
<ul class="footer-links">
<li>
@ -33,22 +42,22 @@
</li>
</ul>
<p class="footer-description">
An updated collection of streaming sites. Contribute to the project on GitHub!
An open-source script for downloading movies, TV shows, and anime from various websites.
</p>
</div>
<div class="footer-section">
<h3 class="footer-title">Author</h3>
<h3 class="footer-title">Support</h3>
<ul class="footer-links">
<li>
<a href="https://github.com/Arrowar" target="_blank" rel="noopener noreferrer">
<i class="fas fa-user-tie"></i>
Arrowar Profile
<a href="https://www.paypal.com/donate/?hosted_button_id=UXTWMT8P6HE2C" target="_blank" rel="noopener noreferrer">
<i class="fab fa-paypal"></i>
Donate with PayPal
</a>
</li>
</ul>
<p class="footer-description">
Developer of the project.
Support the development of this project through donations.
</p>
</div>
@ -71,4 +80,4 @@
<script src="js/script.js"></script>
</body>
</html>
</html>

245
.github/.site/js/script.js vendored Normal file
View File

@ -0,0 +1,245 @@
document.documentElement.setAttribute('data-theme', 'dark');
let statusIndicator = null;
let checkingSites = new Map();
let totalSites = 0;
let completedSites = 0;
function createStatusIndicator() {
statusIndicator = document.createElement('div');
statusIndicator.className = 'status-indicator';
statusIndicator.innerHTML = `
<div class="status-header">
<div class="status-icon"></div>
<span class="status-title">Loading Sites...</span>
</div>
<div class="status-text">Initializing site checks...</div>
<div class="progress-bar">
<div class="progress-fill"></div>
</div>
<div class="checking-sites"></div>
`;
document.body.appendChild(statusIndicator);
return statusIndicator;
}
function updateStatusIndicator(status, text, progress = 0) {
if (!statusIndicator) return;
const statusIcon = statusIndicator.querySelector('.status-icon');
const statusTitle = statusIndicator.querySelector('.status-title');
const statusText = statusIndicator.querySelector('.status-text');
const progressFill = statusIndicator.querySelector('.progress-fill');
statusTitle.textContent = status;
statusText.textContent = text;
progressFill.style.width = `${progress}%`;
if (status === 'Ready') {
statusIcon.classList.add('ready');
setTimeout(() => {
statusIndicator.classList.add('hidden');
setTimeout(() => statusIndicator.remove(), 300);
}, 2000);
}
}
function addSiteToCheck(siteName, siteUrl) {
if (!statusIndicator) return;
const checkingSitesContainer = statusIndicator.querySelector('.checking-sites');
const siteElement = document.createElement('div');
siteElement.className = 'checking-site';
siteElement.innerHTML = `
<span class="site-name">${siteName}</span>
<div class="site-status-icon checking"></div>
`;
checkingSitesContainer.appendChild(siteElement);
checkingSites.set(siteName, siteElement);
}
function updateSiteStatus(siteName, isOnline) {
const siteElement = checkingSites.get(siteName);
if (!siteElement) return;
const statusIcon = siteElement.querySelector('.site-status-icon');
statusIcon.classList.remove('checking');
statusIcon.classList.add(isOnline ? 'online' : 'offline');
siteElement.classList.add('completed', isOnline ? 'online' : 'offline');
completedSites++;
const progress = (completedSites / totalSites) * 100;
updateStatusIndicator(
'Checking Sites...',
`Checked ${completedSites}/${totalSites} sites`,
progress
);
}
async function checkSiteStatus(url, siteName) {
try {
console.log(`Checking status for: ${url}`);
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), 3000);
const response = await fetch(url, {
method: 'HEAD',
mode: 'no-cors',
signal: controller.signal,
headers: {
'Accept': 'text/html',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) Chrome/133.0.0.0'
}
});
clearTimeout(timeoutId);
const isOnline = response.type === 'opaque';
console.log(`Site ${url} is ${isOnline ? 'online' : 'offline'} (Type: ${response.type})`);
if (siteName) {
updateSiteStatus(siteName, isOnline);
}
return isOnline;
} catch (error) {
console.log(`Error checking ${url}:`, error.message);
if (siteName) {
updateSiteStatus(siteName, false);
}
return false;
}
}
const domainsJsonUrl = 'https://raw.githubusercontent.com/Arrowar/StreamingCommunity/refs/heads/main/.github/.domain/domains.json';
async function loadSiteData() {
try {
console.log('Starting to load site data from GitHub...');
createStatusIndicator();
updateStatusIndicator('Loading...', 'Fetching site data from GitHub repository...', 0);
const siteList = document.getElementById('site-list');
console.log(`Fetching from GitHub: ${domainsJsonUrl}`);
const response = await fetch(domainsJsonUrl);
if (!response.ok) throw new Error(`HTTP error! Status: ${response.status}`);
const configSite = await response.json(); // Directly get the site data object
siteList.innerHTML = '';
if (configSite && Object.keys(configSite).length > 0) { // Check if configSite is a non-empty object
totalSites = Object.keys(configSite).length;
completedSites = 0;
let latestUpdate = new Date(0);
document.getElementById('sites-count').textContent = totalSites;
updateStatusIndicator('Checking Sites...', `Starting checks for ${totalSites} sites...`, 0);
Object.entries(configSite).forEach(([siteName, site]) => {
addSiteToCheck(siteName, site.full_url);
});
const statusChecks = Object.entries(configSite).map(async ([siteName, site]) => {
const isOnline = await checkSiteStatus(site.full_url, siteName);
return { siteName, site, isOnline };
});
const results = await Promise.all(statusChecks);
updateStatusIndicator('Ready', 'All sites checked successfully!', 100);
results.forEach(({ siteName, site, isOnline }) => {
const siteItem = document.createElement('div');
siteItem.className = 'site-item';
siteItem.style.cursor = 'pointer';
const statusDot = document.createElement('div');
statusDot.className = 'site-status';
if (!isOnline) statusDot.classList.add('offline');
siteItem.appendChild(statusDot);
const updateTime = new Date(site.time_change);
if (updateTime > latestUpdate) {
latestUpdate = updateTime;
}
const siteInfo = document.createElement('div');
siteInfo.className = 'site-info';
if (site.time_change) {
const updateDate = new Date(site.time_change);
const formattedDate = updateDate.toLocaleDateString('it-IT', {
year: 'numeric',
month: '2-digit',
day: '2-digit',
hour: '2-digit',
minute: '2-digit'
});
const lastUpdate = document.createElement('span');
lastUpdate.className = 'last-update';
lastUpdate.innerHTML = `<i class="fas fa-clock"></i> ${formattedDate}`;
siteInfo.appendChild(lastUpdate);
}
if (site.old_domain) {
const oldDomain = document.createElement('span');
oldDomain.className = 'old-domain';
oldDomain.innerHTML = `<i class="fas fa-history"></i> ${site.old_domain}`;
siteInfo.appendChild(oldDomain);
}
siteItem.addEventListener('click', function() {
window.open(site.full_url, '_blank', 'noopener,noreferrer');
});
const siteIcon = document.createElement('img');
siteIcon.src = `https://t2.gstatic.com/faviconV2?client=SOCIAL&type=FAVICON&fallback_opts=TYPE,SIZE,URL&url=${site.full_url}&size=128`;
siteIcon.alt = `${siteName} icon`;
siteIcon.onerror = function() {
this.src = 'data:image/svg+xml;utf8,<svg xmlns="http://www.w3.org/2000/svg" width="100" height="100" viewBox="0 0 24 24" fill="none" stroke="%238c52ff" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><path d="M21 12.79A9 9 0 1 1 11.21 3 7 7 0 0 0 21 12.79z"></path></svg>';
};
const siteTitle = document.createElement('h3');
siteTitle.textContent = siteName;
siteItem.appendChild(siteIcon);
siteItem.appendChild(siteTitle);
siteItem.appendChild(siteInfo);
siteList.appendChild(siteItem);
});
const formattedDate = latestUpdate.toLocaleDateString('it-IT', {
year: 'numeric',
month: '2-digit',
day: '2-digit',
hour: '2-digit',
minute: '2-digit'
});
document.getElementById('last-update-time').textContent = formattedDate;
} else {
siteList.innerHTML = '<div class="no-sites">No sites available</div>';
updateStatusIndicator('Ready', 'No sites found in the JSON file.', 100);
}
} catch (error) {
console.error('Errore:', error);
siteList.innerHTML = `
<div class="error-message">
<p>Errore nel caricamento</p>
<button onclick="loadSiteData()" class="retry-button">Riprova</button>
</div>
`;
if (statusIndicator) {
updateStatusIndicator('Error', `Failed to load: ${error.message}`, 0);
statusIndicator.querySelector('.status-icon').style.background = '#f44336';
}
}
}
document.addEventListener('DOMContentLoaded', () => {
loadSiteData();
});

View File

@ -1,92 +0,0 @@
name: Build Dev Branch
on:
workflow_dispatch:
push:
branches:
- "dev"
jobs:
build:
strategy:
matrix:
include:
- os: windows-latest
artifact_name: dev_StreamingCommunity_win
executable: dev_StreamingCommunity_win.exe
separator: ';'
- os: macos-latest
artifact_name: dev_StreamingCommunity_mac
executable: dev_StreamingCommunity_mac
separator: ':'
- os: ubuntu-latest
artifact_name: dev_StreamingCommunity_linux_latest
executable: dev_StreamingCommunity_linux_latest
separator: ':'
- os: ubuntu-22.04
artifact_name: dev_StreamingCommunity_linux_previous
executable: dev_StreamingCommunity_linux_previous
separator: ':'
runs-on: ${{ matrix.os }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Get the latest release tag
id: get_latest_release
shell: bash
run: |
latest_tag=$(curl -s https://api.github.com/repos/${{ github.repository }}/releases/latest | jq -r .tag_name)
echo "latest_tag=$latest_tag" >> $GITHUB_ENV
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.12'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
python -m pip install --upgrade certifi
python -m pip install -r requirements.txt
python -m pip install pyinstaller
- name: Build executable with PyInstaller
shell: bash
run: |
pyinstaller --onefile --hidden-import=pycryptodomex --hidden-import=ua_generator \
--hidden-import=qbittorrentapi --hidden-import=qbittorrent \
--hidden-import=bs4 --hidden-import=httpx --hidden-import=rich --hidden-import=tqdm \
--hidden-import=m3u8 --hidden-import=psutil --hidden-import=unidecode \
--hidden-import=jsbeautifier --hidden-import=jsbeautifier.core \
--hidden-import=jsbeautifier.javascript --hidden-import=jsbeautifier.javascript.beautifier \
--hidden-import=jsbeautifier.unpackers --hidden-import=jsbeautifier.unpackers.packer \
--hidden-import=jsbeautifier.unpackers.javascriptobfuscator \
--hidden-import=jsbeautifier.unpackers.myobfuscate \
--hidden-import=jsbeautifier.unpackers.urlencode \
--hidden-import=jsbeautifier.unpackers.meshim \
--hidden-import=editorconfig --hidden-import=editorconfig.handlers \
--hidden-import=six --hidden-import=pathvalidate \
--hidden-import=Cryptodome.Cipher --hidden-import=Cryptodome.Cipher.AES \
--hidden-import=Cryptodome.Util --hidden-import=Cryptodome.Util.Padding \
--hidden-import=Cryptodome.Random \
--hidden-import=telebot \
--additional-hooks-dir=pyinstaller/hooks \
--add-data "StreamingCommunity${{ matrix.separator }}StreamingCommunity" \
--name=${{ matrix.artifact_name }} test_run.py
- name: Upload executable to latest release
uses: softprops/action-gh-release@v1
with:
tag_name: ${{ env.latest_tag }}
files: dist/${{ matrix.executable }}
prerelease: true
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@ -51,7 +51,7 @@ jobs:
build:
if: startsWith(github.ref_name, 'v') || (github.event_name == 'workflow_dispatch' && github.event.inputs.publish_pypi == 'false')
strategy:
matrix:
include:
@ -59,17 +59,17 @@ jobs:
artifact_name: StreamingCommunity_win
executable: StreamingCommunity_win.exe
separator: ';'
- os: macos-latest
artifact_name: StreamingCommunity_mac
executable: StreamingCommunity_mac
separator: ':'
- os: ubuntu-latest
artifact_name: StreamingCommunity_linux_latest
executable: StreamingCommunity_linux_latest
separator: ':'
- os: ubuntu-22.04
artifact_name: StreamingCommunity_linux_previous
executable: StreamingCommunity_linux_previous
@ -138,4 +138,4 @@ jobs:
tag_name: ${{ env.latest_tag }}
files: dist/${{ matrix.executable }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@ -25,7 +25,7 @@ jobs:
- name: Copy site files
run: |
mkdir -p _site
cp -r Test/.site/* _site/
cp -r .github/.site/* _site/
ls -la _site/
- name: Upload artifact

50
.github/workflows/update_domain.yml vendored Normal file
View File

@ -0,0 +1,50 @@
name: Update domains
on:
schedule:
- cron: "0 7-21 * * *"
workflow_dispatch:
jobs:
update-domains:
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: '3.12'
- name: Install dependencies
run: |
pip install httpx tldextract ua-generator dnspython
pip install --upgrade pip setuptools wheel
- name: Configure DNS
run: |
sudo sh -c 'echo "nameserver 9.9.9.9" > /etc/resolv.conf'
cat /etc/resolv.conf
- name: Execute domain update script
run: python .github/.domain/domain_update.py
- name: Commit and push changes (if any)
run: |
git config --global user.name 'github-actions[bot]'
git config --global user.email 'github-actions[bot]@users.noreply.github.com'
# Check if domains.json was modified
if ! git diff --quiet .github/.domain/domains.json; then
git add .github/.domain/domains.json
git commit -m "Automatic domain update [skip ci]"
echo "Changes committed. Attempting to push..."
git push
else
echo "No changes to .github/.domain/domains.json to commit."
fi

1
.gitignore vendored
View File

@ -52,5 +52,4 @@ cmd.txt
bot_config.json
scripts.json
active_requests.json
domains.json
working_proxies.json

View File

@ -2,4 +2,4 @@ build-container:
docker build -t streaming-community-api .
run-container:
docker run --rm -it -p 8000:8000 -v ${LOCAL_DIR}:/app/Video -v ./config.json:/app/config.json streaming-community-api
docker run --rm -it --dns 9.9.9.9 -p 8000:8000 -v ${LOCAL_DIR}:/app/Video -v ./config.json:/app/config.json streaming-community-api

749
README.md
View File

@ -1,5 +1,5 @@
<p align="center">
<img src="https://i.ibb.co/v6RnT0wY/s2.jpg" alt="Project Logo" width="600"/>
<img src="https://i.ibb.co/v6RnT0wY/s2.jpg" alt="Project Logo" width="450"/>
</p>
<p align="center">
@ -31,8 +31,11 @@
# 📋 Table of Contents
<details>
<summary>📦 Installation</summary>
- 🔄 [Update Domains](#update-domains)
- 🌐 [Available Sites](https://arrowar.github.io/StreamingDirectory/)
- 🌐 [Available Sites](https://arrowar.github.io/StreamingCommunity/)
- 🛠️ [Installation](#installation)
- 📦 [PyPI Installation](#1-pypi-installation)
- 🔄 [Automatic Installation](#2-automatic-installation)
@ -40,6 +43,11 @@
- 📝 [Manual Installation](#3-manual-installation)
- 💻 [Win 7](https://github.com/Ghost6446/StreamingCommunity_api/wiki/Installation#win-7)
- 📱 [Termux](https://github.com/Ghost6446/StreamingCommunity_api/wiki/Termux)
</details>
<details>
<summary>⚙️ Configuration & Usage</summary>
- ⚙️ [Configuration](#configuration)
- 🔧 [Default](#default-settings)
- 📩 [Request](#requests-settings)
@ -48,15 +56,23 @@
- 📝 [Command](#command)
- 🔍 [Global search](#global-search)
- 💻 [Examples of terminal](#examples-of-terminal-usage)
</details>
<details>
<summary>🔧 Advanced Features</summary>
- 🔧 [Manual domain configuration](#update-domains)
- 🐳 [Docker](#docker)
- 📝 [Telegram Usage](#telegram-usage)
</details>
<details>
<summary> Help & Support</summary>
- 🎓 [Tutorial](#tutorials)
- 📝 [To do](#to-do)
- 💬 [Support](#support)
- 🤝 [Contribute](#contributing)
- ⚠️ [Disclaimer](#disclaimer)
- ⚡ [Contributors](#contributors)
</details>
# Installation
@ -111,7 +127,8 @@ python run_streaming.py
## Modules
### HLS Downloader
<details>
<summary>📥 HLS Downloader</summary>
Download HTTP Live Streaming (HLS) content from m3u8 URLs.
@ -129,8 +146,10 @@ downloader.download()
```
See [HLS example](./Test/Download/HLS.py) for complete usage.
</details>
### MP4 Downloader
<details>
<summary>📽️ MP4 Downloader</summary>
Direct MP4 file downloader with support for custom headers and referrer.
@ -159,8 +178,10 @@ downloader.download()
```
See [MP4 example](./Test/Download/MP4.py) for complete usage.
</details>
### Torrent Client
<details>
<summary>🧲 Torrent Client</summary>
Download content via torrent magnet links.
@ -178,67 +199,21 @@ client.start_download()
```
See [Torrent example](./Test/Download/TOR.py) for complete usage.
## 2. Automatic Installation
### Supported Operating Systems 💿
| OS | Automatic Installation Support |
|:----------------|:------------------------------:|
| Windows 10/11 | ✔️ |
| Windows 7 | ❌ |
| Debian Linux | ✔️ |
| Arch Linux | ✔️ |
| CentOS Stream 9 | ✔️ |
| FreeBSD | ⏳ |
| MacOS | ✔️ |
| Termux | ❌ |
### Installation Steps
#### On Windows:
```powershell
.\Installer\win_install.bat
```
#### On Linux/MacOS/BSD:
```bash
sudo chmod +x Installer/unix_install.sh && ./Installer/unix_install.sh
```
### Usage
#### On Windows:
```powershell
python .\test_run.py
```
or
```powershell
source .venv/bin/activate && python test_run.py && deactivate
```
#### On Linux/MacOS/BSD:
```bash
./test_run.py
```
</details>
## Binary Location
### Default Locations
<details>
<summary>📂 Default Locations</summary>
- **Windows**: `C:\binary`
- **MacOS**: `~/Applications/binary`
- **Linux**: `~/.local/bin/binary`
</details>
You can customize these locations by following these steps for your operating system:
<details>
<summary>🪟 Windows Configuration</summary>
#### Windows
1. Move the binary folder from `C:\binary` to your desired location
2. Add the new path to Windows environment variables:
- Open Start menu and search for "Environment Variables"
@ -250,8 +225,11 @@ You can customize these locations by following these steps for your operating sy
- Click "OK" to save changes
For detailed Windows PATH instructions, see the [Windows PATH guide](https://www.eukhost.com/kb/how-to-add-to-the-path-on-windows-10-and-windows-11/).
</details>
<details>
<summary>🍎 MacOS Configuration</summary>
#### MacOS
1. Move the binary folder from `~/Applications/binary` to your desired location
2. Add the new path to your shell's configuration file:
```bash
@ -269,8 +247,11 @@ For detailed Windows PATH instructions, see the [Windows PATH guide](https://www
# For zsh
source ~/.zshrc
```
</details>
<details>
<summary>🐧 Linux Configuration</summary>
#### Linux
1. Move the binary folder from `~/.local/bin/binary` to your desired location
2. Add the new path to your shell's configuration file:
```bash
@ -286,6 +267,7 @@ For detailed Windows PATH instructions, see the [Windows PATH guide](https://www
# or
source ~/.zshrc # for zsh
```
</details>
> [!IMPORTANT]
> After moving the binary folder, ensure that all executables (ffmpeg, ffprobe, ffplay) are present in the new location and have the correct permissions:
@ -294,19 +276,24 @@ For detailed Windows PATH instructions, see the [Windows PATH guide](https://www
## 3. Manual Installation
### Requirements 📋
<details>
<summary>📋 Requirements</summary>
Prerequisites:
* [Python](https://www.python.org/downloads/) > 3.8
* [FFmpeg](https://www.gyan.dev/ffmpeg/builds/)
</details>
### Install Python Dependencies
<details>
<summary>⚙️ Python Dependencies</summary>
```bash
pip install -r requirements.txt
```
</details>
### Usage
<details>
<summary>🚀 Usage</summary>
#### On Windows:
@ -319,6 +306,7 @@ python test_run.py
```bash
python3 test_run.py
```
</details>
## Update
@ -338,278 +326,11 @@ python3 update.py
<br>
# Configuration
You can change some behaviors by tweaking the configuration file.
The configuration file is divided into several main sections:
## DEFAULT Settings
```json
{
"DEFAULT": {
"debug": false,
"show_message": true,
"clean_console": true,
"show_trending": true,
"use_api": true,
"not_close": false,
"telegram_bot": false,
"download_site_data": false,
"validate_github_config": false
}
}
```
- `debug`: Enables debug logging
- `show_message`: Displays informational messages
- `clean_console`: Clears the console between operations
- `show_trending`: Shows trending content
- `use_api`: Uses API for domain updates instead of local configuration
- `not_close`: If set to true, keeps the program running after download is complete
* Can be changed from terminal with `--not_close true/false`
- `telegram_bot`: Enables Telegram bot integration
- `download_site_data`: If set to false, disables automatic site data download
- `validate_github_config`: If set to false, disables validation and updating of configuration from GitHub
## OUT_FOLDER Settings
```json
{
"OUT_FOLDER": {
"root_path": "Video",
"movie_folder_name": "Movie",
"serie_folder_name": "Serie",
"anime_folder_name": "Anime",
"map_episode_name": "E%(episode)_%(episode_name)",
"add_siteName": false
}
}
```
- `root_path`: Directory where all videos will be saved
### Path examples:
* Windows: `C:\\MyLibrary\\Folder` or `\\\\MyServer\\MyLibrary` (if you want to use a network folder)
* Linux/MacOS: `Desktop/MyLibrary/Folder`
<br/><br/>
- `movie_folder_name`: The name of the subdirectory where movies will be stored
* Can be changed from terminal with `--movie_folder_name`
<br/><br/>
- `serie_folder_name`: The name of the subdirectory where TV series will be stored
* Can be changed from terminal with `--serie_folder_name`
<br/><br/>
- `anime_folder_name`: The name of the subdirectory where anime will be stored
* Can be changed from terminal with `--anime_folder_name`
<br/><br/>
- `map_episode_name`: Template for episode filenames
### Episode name usage:
You can choose different vars:
* `%(tv_name)` : Is the name of TV Show
* `%(season)` : Is the number of the season
* `%(episode)` : Is the number of the episode
* `%(episode_name)` : Is the name of the episode
* Can be changed from terminal with `--map_episode_name`
<br><br>
- `add_siteName`: If set to true, appends the site_name to the root path before the movie and serie folders
* Can be changed from terminal with `--add_siteName true/false`
<br/><br/>
## QBIT_CONFIG Settings
```json
{
"QBIT_CONFIG": {
"host": "192.168.1.51",
"port": "6666",
"user": "admin",
"pass": "adminadmin"
}
}
```
To enable qBittorrent integration, follow the setup guide [here](https://github.com/lgallard/qBittorrent-Controller/wiki/How-to-enable-the-qBittorrent-Web-UI).
## REQUESTS Settings
```json
{
"REQUESTS": {
"verify": false,
"timeout": 20,
"max_retry": 8
}
}
```
- `verify`: Verifies SSL certificates
- `timeout`: Maximum timeout (in seconds) for each request
- `max_retry`: Number of retry attempts per segment during M3U8 index download
## M3U8_DOWNLOAD Settings
```json
{
"M3U8_DOWNLOAD": {
"tqdm_delay": 0.01,
"default_video_workser": 12,
"default_audio_workser": 12,
"segment_timeout": 8,
"download_audio": true,
"merge_audio": true,
"specific_list_audio": [
"ita"
],
"download_subtitle": true,
"merge_subs": true,
"specific_list_subtitles": [
"ita",
"eng"
],
"cleanup_tmp_folder": true
}
}
```
- `tqdm_delay`: Delay between progress bar updates
- `default_video_workser`: Number of threads for video download
* Can be changed from terminal with `--default_video_worker <number>`
<br/><br/>
- `default_audio_workser`: Number of threads for audio download
* Can be changed from terminal with `--default_audio_worker <number>`
<br/><br/>
- `segment_timeout`: Timeout for downloading individual segments
- `download_audio`: Whether to download audio tracks
- `merge_audio`: Whether to merge audio with video
- `specific_list_audio`: List of audio languages to download
* Can be changed from terminal with `--specific_list_audio ita,eng`
<br/><br/>
- `download_subtitle`: Whether to download subtitles
- `merge_subs`: Whether to merge subtitles with video
- `specific_list_subtitles`: List of subtitle languages to download
* Can be changed from terminal with `--specific_list_subtitles ita,eng`
<br/><br/>
- `cleanup_tmp_folder`: Remove temporary .ts files after download
## Available Language Codes
| European | Asian | Middle Eastern | Others |
|-----------------|-----------------|-----------------|-----------------|
| ita - Italian | chi - Chinese | ara - Arabic | eng - English |
| spa - Spanish | jpn - Japanese | heb - Hebrew | por - Portuguese|
| fre - French | kor - Korean | tur - Turkish | fil - Filipino |
| ger - German | hin - Hindi | | ind - Indonesian|
| rus - Russian | mal - Malayalam | | may - Malay |
| swe - Swedish | tam - Tamil | | vie - Vietnamese|
| pol - Polish | tel - Telugu | | |
| ukr - Ukrainian | tha - Thai | | |
## M3U8_CONVERSION Settings
```json
{
"M3U8_CONVERSION": {
"use_codec": false,
"use_vcodec": true,
"use_acodec": true,
"use_bitrate": true,
"use_gpu": false,
"default_preset": "ultrafast"
}
}
```
- `use_codec`: Use specific codec settings
- `use_vcodec`: Use specific video codec
- `use_acodec`: Use specific audio codec
- `use_bitrate`: Apply bitrate settings
- `use_gpu`: Enable GPU acceleration (if available)
- `default_preset`: FFmpeg encoding preset (ultrafast, fast, medium, slow, etc.)
### Advanced M3U8 Conversion Options
The software supports various advanced encoding options via FFmpeg:
#### Encoding Presets
The `default_preset` configuration can be set to one of the following values:
- `ultrafast`: Extremely fast conversion but larger file size
- `superfast`: Very fast with good quality/size ratio
- `veryfast`: Fast with good compression
- `faster`: Optimal balance for most users
- `fast`: Good compression, moderate time
- `medium`: FFmpeg default setting
- `slow`: High quality, slower process
- `slower`: Very high quality, slow process
- `veryslow`: Maximum quality, very slow process
#### GPU Acceleration
When `use_gpu` is enabled, the system will use available hardware acceleration:
- NVIDIA: NVENC
- AMD: AMF
- Intel: QSV
You need to have updated drivers and FFmpeg compiled with hardware acceleration support.
## M3U8_PARSER Settings
```json
{
"M3U8_PARSER": {
"force_resolution": "Best",
"get_only_link": false
}
}
```
- `force_resolution`: Choose the video resolution for downloading:
* `"Best"`: Highest available resolution
* `"Worst"`: Lowest available resolution
* `"720p"`: Force 720p resolution
* Or specify one of these resolutions:
- 1080p (1920x1080)
- 720p (1280x720)
- 480p (640x480)
- 360p (640x360)
- 320p (480x320)
- 240p (426x240)
- 240p (320x240)
- 144p (256x144)
- `get_only_link`: Return M3U8 playlist/index URL instead of downloading
## SITE_EXTRA Settings
```json
{
"SITE_EXTRA": {
"ddlstreamitaly": {
"ips4_device_key": "",
"ips4_member_id": "",
"ips4_login_key": ""
}
}
}
```
- Site-specific configuration for `ddlstreamitaly`:
- `ips4_device_key`: Device key for authentication
- `ips4_member_id`: Member ID for authentication
- `ips4_login_key`: Login key for authentication
## Update Domains
<details>
<summary>🌐 Domain Configuration Methods</summary>
There are two ways to update the domains for the supported websites:
### 1. Using Local Configuration
@ -645,23 +366,301 @@ Note: If `use_api` is set to `false` and no `domains.json` file is found, the sc
#### 💡 Adding a New Site to the Legacy API
If you want to add a new site to the legacy API, just message me on the Discord server, and I'll add it!
</details>
# Configuration
<details>
<summary>⚙️ Overview</summary>
You can change some behaviors by tweaking the configuration file. The configuration file is divided into several main sections.
</details>
<details>
<summary>🔧 DEFAULT Settings</summary>
```json
{
"DEFAULT": {
"debug": false,
"show_message": true,
"clean_console": true,
"show_trending": true,
"use_api": true,
"not_close": false,
"telegram_bot": false,
"download_site_data": false,
"validate_github_config": false
}
}
```
- `debug`: Enables debug logging
- `show_message`: Displays informational messages
- `clean_console`: Clears the console between operations
- `show_trending`: Shows trending content
- `use_api`: Uses API for domain updates instead of local configuration
- `not_close`: If set to true, keeps the program running after download is complete
* Can be changed from terminal with `--not_close true/false`
- `telegram_bot`: Enables Telegram bot integration
- `download_site_data`: If set to false, disables automatic site data download
- `validate_github_config`: If set to false, disables validation and updating of configuration from GitHub
</details>
<details>
<summary>📁 OUT_FOLDER Settings</summary>
```json
{
"OUT_FOLDER": {
"root_path": "Video",
"movie_folder_name": "Movie",
"serie_folder_name": "Serie",
"anime_folder_name": "Anime",
"map_episode_name": "E%(episode)_%(episode_name)",
"add_siteName": false
}
}
```
#### Directory Configuration
- `root_path`: Directory where all videos will be saved
* Windows: `C:\\MyLibrary\\Folder` or `\\\\MyServer\\MyLibrary` (network folder)
* Linux/MacOS: `Desktop/MyLibrary/Folder`
#### Folder Names
- `movie_folder_name`: Subdirectory for movies (can be changed with `--movie_folder_name`)
- `serie_folder_name`: Subdirectory for TV series (can be changed with `--serie_folder_name`)
- `anime_folder_name`: Subdirectory for anime (can be changed with `--anime_folder_name`)
#### Episode Naming
- `map_episode_name`: Template for episode filenames
* `%(tv_name)`: Name of TV Show
* `%(season)`: Season number
* `%(episode)`: Episode number
* `%(episode_name)`: Episode name
* Can be changed with `--map_episode_name`
#### Additional Options
- `add_siteName`: Appends site_name to root path (can be changed with `--add_siteName true/false`)
</details>
<details>
<summary>🔄 QBIT_CONFIG Settings</summary>
```json
{
"QBIT_CONFIG": {
"host": "192.168.1.51",
"port": "6666",
"user": "admin",
"pass": "adminadmin"
}
}
```
To enable qBittorrent integration, follow the setup guide [here](https://github.com/lgallard/qBittorrent-Controller/wiki/How-to-enable-the-qBittorrent-Web-UI).
</details>
<details>
<summary>📡 REQUESTS Settings</summary>
```json
{
"REQUESTS": {
"verify": false,
"timeout": 20,
"max_retry": 8,
"proxy": {
"http": "http://username:password@host:port",
"https": "https://username:password@host:port"
}
}
}
```
- `verify`: Verifies SSL certificates
- `timeout`: Maximum timeout (in seconds) for each request
- `max_retry`: Number of retry attempts per segment during M3U8 index download
- `proxy`: Proxy configuration for HTTP/HTTPS requests
* Set to empty string `""` to disable proxies (default)
* Example with authentication:
```json
"proxy": {
"http": "http://username:password@host:port",
"https": "https://username:password@host:port"
}
```
* Example without authentication:
```json
"proxy": {
"http": "http://host:port",
"https": "https://host:port"
}
```
</details>
<details>
<summary>📥 M3U8_DOWNLOAD Settings</summary>
```json
{
"M3U8_DOWNLOAD": {
"tqdm_delay": 0.01,
"default_video_workser": 12,
"default_audio_workser": 12,
"segment_timeout": 8,
"download_audio": true,
"merge_audio": true,
"specific_list_audio": [
"ita"
],
"download_subtitle": true,
"merge_subs": true,
"specific_list_subtitles": [
"ita",
"eng"
],
"cleanup_tmp_folder": true
}
}
```
#### Performance Settings
- `tqdm_delay`: Delay between progress bar updates
- `default_video_workser`: Number of threads for video download
* Can be changed with `--default_video_worker <number>`
- `default_audio_workser`: Number of threads for audio download
* Can be changed with `--default_audio_worker <number>`
- `segment_timeout`: Timeout for downloading individual segments
#### Audio Settings
- `download_audio`: Whether to download audio tracks
- `merge_audio`: Whether to merge audio with video
- `specific_list_audio`: List of audio languages to download
* Can be changed with `--specific_list_audio ita,eng`
#### Subtitle Settings
- `download_subtitle`: Whether to download subtitles
- `merge_subs`: Whether to merge subtitles with video
- `specific_list_subtitles`: List of subtitle languages to download
* Can be changed with `--specific_list_subtitles ita,eng`
#### Cleanup
- `cleanup_tmp_folder`: Remove temporary .ts files after download
</details>
<details>
<summary>🌍 Available Language Codes</summary>
| European | Asian | Middle Eastern | Others |
|-----------------|-----------------|-----------------|-----------------|
| ita - Italian | chi - Chinese | ara - Arabic | eng - English |
| spa - Spanish | jpn - Japanese | heb - Hebrew | por - Portuguese|
| fre - French | kor - Korean | tur - Turkish | fil - Filipino |
| ger - German | hin - Hindi | | ind - Indonesian|
| rus - Russian | mal - Malayalam | | may - Malay |
| swe - Swedish | tam - Tamil | | vie - Vietnamese|
| pol - Polish | tel - Telugu | | |
| ukr - Ukrainian | tha - Thai | | |
</details>
<details>
<summary>🎥 M3U8_CONVERSION Settings</summary>
```json
{
"M3U8_CONVERSION": {
"use_codec": false,
"use_vcodec": true,
"use_acodec": true,
"use_bitrate": true,
"use_gpu": false,
"default_preset": "ultrafast"
}
}
```
#### Basic Settings
- `use_codec`: Use specific codec settings
- `use_vcodec`: Use specific video codec
- `use_acodec`: Use specific audio codec
- `use_bitrate`: Apply bitrate settings
- `use_gpu`: Enable GPU acceleration (if available)
- `default_preset`: FFmpeg encoding preset
#### Encoding Presets
The `default_preset` configuration can be set to:
- `ultrafast`: Extremely fast conversion but larger file size
- `superfast`: Very fast with good quality/size ratio
- `veryfast`: Fast with good compression
- `faster`: Optimal balance for most users
- `fast`: Good compression, moderate time
- `medium`: FFmpeg default setting
- `slow`: High quality, slower process
- `slower`: Very high quality, slow process
- `veryslow`: Maximum quality, very slow process
#### GPU Acceleration
When `use_gpu` is enabled, supports:
- NVIDIA: NVENC
- AMD: AMF
- Intel: QSV
Note: Requires updated drivers and FFmpeg with hardware acceleration support.
</details>
<details>
<summary>🔍 M3U8_PARSER Settings</summary>
```json
{
"M3U8_PARSER": {
"force_resolution": "Best",
"get_only_link": false
}
}
```
#### Resolution Options
- `force_resolution`: Choose video resolution:
* `"Best"`: Highest available resolution
* `"Worst"`: Lowest available resolution
* `"720p"`: Force 720p resolution
* Specific resolutions:
- 1080p (1920x1080)
- 720p (1280x720)
- 480p (640x480)
- 360p (640x360)
- 320p (480x320)
- 240p (426x240)
- 240p (320x240)
- 144p (256x144)
#### Link Options
- `get_only_link`: Return M3U8 playlist/index URL instead of downloading
</details>
# Global Search
<details>
<summary>🔍 Feature Overview</summary>
You can now search across multiple streaming sites at once using the Global Search feature. This allows you to find content more efficiently without having to search each site individually.
</details>
## Using Global Search
The Global Search feature provides a unified interface to search across all supported sites:
## Search Options
<details>
<summary>🎯 Search Options</summary>
When using Global Search, you have three ways to select which sites to search:
1. **Search all sites** - Searches across all available streaming sites
2. **Search by category** - Group sites by their categories (movies, series, anime, etc.)
3. **Select specific sites** - Choose individual sites to include in your search
</details>
## Navigation and Selection
<details>
<summary>📝 Navigation and Selection</summary>
After performing a search:
@ -673,13 +672,16 @@ After performing a search:
2. Select an item by number to view details or download
3. The system will automatically use the appropriate site's API to handle the download
</details>
## Command Line Arguments
<details>
<summary>⌨️ Command Line Arguments</summary>
The Global Search can be configured from the command line:
- `--global` - Perform a global search across multiple sites.
- `-s`, `--search` - Specify the search terms.
</details>
# Examples of terminal usage
@ -699,25 +701,32 @@ python test_run.py --global -s "cars"
# Docker
You can run the script in a docker container, to build the image just run
<details>
<summary>🐳 Basic Setup</summary>
Build the image:
```
docker build -t streaming-community-api .
```
and to run it use
Run the container with Cloudflare DNS for better connectivity:
```
docker run -it --dns 1.1.1.1 -p 8000:8000 streaming-community-api
```
</details>
<details>
<summary>💾 Custom Storage Location</summary>
By default the videos will be saved in `/app/Video` inside the container. To save them on your machine:
```
docker run -it -p 8000:8000 streaming-community-api
docker run -it --dns 9.9.9.9 -p 8000:8000 -v /path/to/download:/app/Video streaming-community-api
```
</details>
By default the videos will be saved in `/app/Video` inside the container, if you want to to save them in your machine instead of the container just run
```
docker run -it -p 8000:8000 -v /path/to/download:/app/Video streaming-community-api
```
### Docker quick setup with Make
<details>
<summary>🛠️ Quick Setup with Make</summary>
Inside the Makefile (install `make`) are already configured two commands to build and run the container:
@ -729,10 +738,12 @@ make LOCAL_DIR=/path/to/download run-container
```
The `run-container` command mounts also the `config.json` file, so any change to the configuration file is reflected immediately without having to rebuild the image.
</details>
# Telegram Usage
## Configuration
<details>
<summary>⚙️ Basic Configuration</summary>
The bot was created to replace terminal commands and allow interaction via Telegram. Each download runs within a screen session, enabling multiple downloads to run simultaneously.
@ -761,20 +772,21 @@ TOKEN_TELEGRAM=IlTuo2131TOKEN$12D3Telegram
AUTHORIZED_USER_ID=12345678
DEBUG=False
```
</details>
## Install Python Dependencies
<details>
<summary>📥 Dependencies & Launch</summary>
Install dependencies:
```bash
pip install -r requirements.txt
```
## On Linux/MacOS:
Start the bot from the folder /StreamingCommunity/TelegramHelp
Start the bot (from /StreamingCommunity/TelegramHelp):
```bash
python3 telegram_bot.py
```
</details>
# Tutorials
@ -788,19 +800,6 @@ python3 telegram_bot.py
- To Finish [website API](https://github.com/Arrowar/StreamingCommunity/tree/test_gui_1)
- To finish [website API 2](https://github.com/hydrosh/StreamingCommunity/tree/test_gui_1)
# Contributing
Contributions are welcome! Steps:
1. Fork the repository
2. Create feature branch (`git checkout -b feature/AmazingFeature`)
3. Commit changes (`git commit -m 'Add some AmazingFeature'`)
4. Push to branch (`git push origin feature/AmazingFeature`)
5. Open Pull Request
# Disclaimer
This software is provided "as is", without warranty of any kind, express or implied, including but not limited to the warranties of merchantability, fitness for a particular purpose, and noninfringement. In no event shall the authors or copyright holders be liable for any claim, damages, or other liability, whether in an action of contract, tort, or otherwise, arising from, out of, or in connection with the software or the use or other dealings in the software.
## Useful Project
### 🎯 [Unit3Dup](https://github.com/31December99/Unit3Dup)
@ -816,8 +815,6 @@ API non ufficiale per accedere ai contenuti del sito italiano StreamingCommunity
### 🎥 [stream-buddy](https://github.com/Bbalduzz/stream-buddy)
Tool per guardare o scaricare film dalla piattaforma StreamingCommunity.
## Contributors
# Disclaimer
<a href="https://github.com/Arrowar/StreamingCommunity/graphs/contributors" alt="View Contributors">
<img src="https://contrib.rocks/image?repo=Arrowar/StreamingCommunity&max=1000&columns=10" alt="Contributors" />
</a>
This software is provided "as is", without warranty of any kind, express or implied, including but not limited to the warranties of merchantability, fitness for a particular purpose, and noninfringement. In no event shall the authors or copyright holders be liable for any claim, damages, or other liability, whether in an action of contract, tort, or otherwise, arising from, out of, or in connection with the software or the use or other dealings in the software.

View File

@ -0,0 +1,141 @@
# 05.07.24
# NOTE: NOT USED
import re
import logging
# External libraries
import httpx
import jsbeautifier
from bs4 import BeautifulSoup
# Internal utilities
from StreamingCommunity.Util.config_json import config_manager
from StreamingCommunity.Util.headers import get_userAgent
# Variable
MAX_TIMEOUT = config_manager.get_int("REQUESTS", "timeout")
class VideoSource:
def __init__(self, url: str):
"""
Sets up the video source with the provided URL.
Parameters:
- url (str): The URL of the video.
"""
self.url = url
self.redirect_url = None
self.maxstream_url = None
self.m3u8_url = None
self.headers = {'user-agent': get_userAgent()}
def get_redirect_url(self):
"""
Sends a request to the initial URL and extracts the redirect URL.
"""
try:
response = httpx.get(self.url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT)
response.raise_for_status()
# Extract the redirect URL from the HTML
soup = BeautifulSoup(response.text, "html.parser")
self.redirect_url = soup.find("div", id="iframen1").get("data-src")
logging.info(f"Redirect URL: {self.redirect_url}")
return self.redirect_url
except Exception as e:
logging.error(f"Error parsing HTML: {e}")
raise
def get_maxstream_url(self):
"""
Sends a request to the redirect URL and extracts the Maxstream URL.
"""
try:
response = httpx.get(self.redirect_url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT)
response.raise_for_status()
# Extract the Maxstream URL from the HTML
soup = BeautifulSoup(response.text, "html.parser")
maxstream_url = soup.find("a")
if maxstream_url is None:
# If no anchor tag is found, try the alternative method
logging.warning("Anchor tag not found. Trying the alternative method.")
headers = {
'origin': 'https://stayonline.pro',
'user-agent': get_userAgent(),
'x-requested-with': 'XMLHttpRequest',
}
# Make request to stayonline api
data = {'id': self.redirect_url.split("/")[-2], 'ref': ''}
response = httpx.post('https://stayonline.pro/ajax/linkEmbedView.php', headers=headers, data=data)
response.raise_for_status()
uprot_url = response.json()['data']['value']
# Retry getting maxtstream url
response = httpx.get(uprot_url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT)
response.raise_for_status()
soup = BeautifulSoup(response.text, "html.parser")
maxstream_url = soup.find("a").get("href")
else:
maxstream_url = maxstream_url.get("href")
self.maxstream_url = maxstream_url
logging.info(f"Maxstream URL: {self.maxstream_url}")
return self.maxstream_url
except Exception as e:
logging.error(f"Error during the request: {e}")
raise
def get_m3u8_url(self):
"""
Sends a request to the Maxstream URL and extracts the .m3u8 file URL.
"""
try:
response = httpx.get(self.maxstream_url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT)
response.raise_for_status()
soup = BeautifulSoup(response.text, "html.parser")
# Iterate over all script tags in the HTML
for script in soup.find_all("script"):
if "eval(function(p,a,c,k,e,d)" in script.text:
# Execute the script using
data_js = jsbeautifier.beautify(script.text)
# Extract the .m3u8 URL from the script's output
match = re.search(r'sources:\s*\[\{\s*src:\s*"([^"]+)"', data_js)
if match:
self.m3u8_url = match.group(1)
logging.info(f"M3U8 URL: {self.m3u8_url}")
break
else:
logging.error("Failed to find M3U8 URL: No match found")
return self.m3u8_url
except Exception as e:
logging.error(f"Error executing the Node.js script: {e}")
raise
def get_playlist(self):
"""
Executes the entire flow to obtain the final .m3u8 file URL.
"""
self.get_redirect_url()
self.get_maxstream_url()
return self.get_m3u8_url()

View File

@ -5,9 +5,9 @@ import logging
# External libraries
import httpx
import jsbeautifier
from bs4 import BeautifulSoup
from curl_cffi import requests
# Internal utilities
@ -28,7 +28,6 @@ class VideoSource:
- url (str): The URL of the video source.
"""
self.headers = get_headers()
self.client = httpx.Client()
self.url = url
def make_request(self, url: str) -> str:
@ -42,8 +41,10 @@ class VideoSource:
- str: The response content if successful, None otherwise.
"""
try:
response = self.client.get(url, headers=self.headers, timeout=MAX_TIMEOUT, follow_redirects=True)
response.raise_for_status()
response = requests.get(url, headers=self.headers, timeout=MAX_TIMEOUT, impersonate="chrome110")
if response.status_code >= 400:
logging.error(f"Request failed with status code: {response.status_code}")
return None
return response.text
except Exception as e:

View File

@ -1,6 +1,6 @@
# 01.03.24
import sys
import time
import logging
from urllib.parse import urlparse, parse_qs, urlencode, urlunparse
@ -24,7 +24,7 @@ console = Console()
class VideoSource:
def __init__(self, url: str, is_series: bool, media_id: int = None):
def __init__(self, url: str, is_series: bool, media_id: int = None, proxy: str = None):
"""
Initialize video source for streaming site.
@ -35,9 +35,11 @@ class VideoSource:
"""
self.headers = {'user-agent': get_userAgent()}
self.url = url
self.proxy = proxy
self.is_series = is_series
self.media_id = media_id
self.iframe_src = None
self.window_parameter = None
def get_iframe(self, episode_id: int) -> None:
"""
@ -55,7 +57,7 @@ class VideoSource:
}
try:
response = httpx.get(f"{self.url}/iframe/{self.media_id}", params=params, timeout=MAX_TIMEOUT)
response = httpx.get(f"{self.url}/iframe/{self.media_id}", headers=self.headers, params=params, timeout=MAX_TIMEOUT, proxy=self.proxy)
response.raise_for_status()
# Parse response with BeautifulSoup to get iframe source
@ -81,6 +83,7 @@ class VideoSource:
self.window_video = WindowVideo(converter.get('video'))
self.window_streams = StreamsCollection(converter.get('streams'))
self.window_parameter = WindowParameter(converter.get('masterPlaylist'))
time.sleep(0.5)
except Exception as e:
logging.error(f"Error parsing script: {e}")
@ -107,41 +110,45 @@ class VideoSource:
# Parse script to get video information
self.parse_script(script_text=script)
except httpx.HTTPStatusError as e:
if e.response.status_code == 404:
console.print("[yellow]This content will be available soon![/yellow]")
return
logging.error(f"Error getting content: {e}")
raise
except Exception as e:
logging.error(f"Error getting content: {e}")
raise
def get_playlist(self) -> str:
def get_playlist(self) -> str | None:
"""
Generate authenticated playlist URL.
Returns:
str: Fully constructed playlist URL with authentication parameters
str | None: Fully constructed playlist URL with authentication parameters, or None if content unavailable
"""
if not self.window_parameter:
return None
params = {}
# Add 'h' parameter if video quality is 1080p
if self.canPlayFHD:
params['h'] = 1
# Parse the original URL
parsed_url = urlparse(self.window_parameter.url)
query_params = parse_qs(parsed_url.query)
# Check specifically for 'b=1' in the query parameters
if 'b' in query_params and query_params['b'] == ['1']:
params['b'] = 1
# Add authentication parameters (token and expiration)
params.update({
"token": self.window_parameter.token,
"expires": self.window_parameter.expires
})
# Build the updated query string
query_string = urlencode(params)
# Construct the new URL with updated query parameters
return urlunparse(parsed_url._replace(query=query_string))

View File

@ -21,9 +21,10 @@ from .title import download_title
# Variable
indice = 3
_useFor = "film_serie"
_useFor = "Torrent"
_priority = 0
_engineDownload = "tor"
_engineDownload = "Torrent"
_deprecate = True
console = Console()
msg = Prompt()
@ -61,7 +62,7 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
return media_search_manager
if len_database > 0:
select_title = get_select_title(table_show_manager, media_search_manager)
select_title = get_select_title(table_show_manager, media_search_manager, len_database)
download_title(select_title)
else:

View File

@ -24,9 +24,10 @@ from .series import download_series
# Variable
indice = 2
_useFor = "film_serie"
_useFor = "Film_&_Serie"
_priority = 0
_engineDownload = "hls"
_deprecate = False
msg = Prompt()
console = Console()
@ -109,7 +110,7 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
bot = get_bot_instance()
if len_database > 0:
select_title = get_select_title(table_show_manager, media_search_manager)
select_title = get_select_title(table_show_manager, media_search_manager, len_database)
process_search_result(select_title, selections)
else:

View File

@ -1,6 +1,7 @@
# 16.03.25
import os
import re
# External library
@ -56,51 +57,38 @@ def download_film(select_title: MediaItem) -> str:
start_message()
console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [cyan]{select_title.name}[/cyan] \n")
# Extract mostraguarda link
# Extract mostraguarda URL
try:
response = httpx.get(select_title.url, headers=get_headers(), timeout=10)
response.raise_for_status()
except Exception as e:
console.print(f"[red]Error fetching the page: {e}")
if site_constant.TELEGRAM_BOT:
bot.send_message(f"ERRORE\n\nErrore durante il recupero della pagina.\n\n{e}", None)
return None
soup = BeautifulSoup(response.text, 'html.parser')
iframes = soup.find_all('iframe')
mostraguarda = iframes[0]['src']
# Create mostraguarda url
soup = BeautifulSoup(response.text, "html.parser")
iframe_tag = soup.find_all("iframe")
url_mostraGuarda = iframe_tag[0].get('data-src')
if not url_mostraGuarda:
console.print("Error: data-src attribute not found in iframe.")
if site_constant.TELEGRAM_BOT:
bot.send_message(f"ERRORE\n\nErrore: attributo data-src non trovato nell'iframe", None)
except Exception as e:
console.print(f"[red]Site: {site_constant.SITE_NAME}, request error: {e}, get mostraguarda")
return None
# Extract supervideo URL
supervideo_url = None
try:
response = httpx.get(url_mostraGuarda, headers=get_headers(), timeout=10)
response = httpx.get(mostraguarda, headers=get_headers(), timeout=10)
response.raise_for_status()
soup = BeautifulSoup(response.text, 'html.parser')
pattern = r'//supervideo\.[^/]+/[a-z]/[a-zA-Z0-9]+'
supervideo_match = re.search(pattern, response.text)
supervideo_url = 'https:' + supervideo_match.group(0)
except Exception as e:
console.print(f"[red]Error fetching mostraguarda link: {e}")
console.print("[yellow]Missing access credentials. This part of the code is still under development.")
if site_constant.TELEGRAM_BOT:
bot.send_message(f"ERRORE\n\nErrore durante il recupero del link mostra/guarda.\n\n{e}", None)
bot.send_message(f"ERRORE\n\nCredenziali di accesso mancanti.\nQuesta parte del codice è ancora in fase di sviluppo.", None)
console.print(f"[red]Site: {site_constant.SITE_NAME}, request error: {e}, get supervideo URL")
console.print("[yellow]This content will be available soon![/yellow]")
return None
# Create supervio URL
soup = BeautifulSoup(response.text, "html.parser")
player_links = soup.find("ul", class_="_player-mirrors")
player_items = player_links.find_all("li")
supervideo_url = "https:" + player_items[0].get("data-link")
if not supervideo_url:
return None
# Init class
video_source = VideoSource(url=supervideo_url)
video_source = VideoSource(supervideo_url)
master_playlist = video_source.get_playlist()
# Define the filename and path for the downloaded film

View File

@ -38,38 +38,52 @@ class GetSerieInfo:
soup = BeautifulSoup(response.text, "html.parser")
self.series_name = soup.find("title").get_text(strip=True).split(" - ")[0]
# Process all seasons
season_items = soup.find_all('div', class_='accordion-item')
for season_idx, season_item in enumerate(season_items, 1):
season_header = season_item.find('div', class_='accordion-header')
if not season_header:
continue
season_name = season_header.get_text(strip=True)
# Find all season dropdowns
seasons_dropdown = soup.find('div', class_='dropdown seasons')
if not seasons_dropdown:
return
# Get all season items
season_items = seasons_dropdown.find_all('span', {'data-season': True})
for season_item in season_items:
season_num = int(season_item['data-season'])
season_name = season_item.get_text(strip=True)
# Create a new season and get a reference to it
# Create a new season
current_season = self.seasons_manager.add_season({
'number': season_idx,
'number': season_num,
'name': season_name
})
# Find episodes for this season
episode_divs = season_item.find_all('div', class_='down-episode')
for ep_idx, ep_div in enumerate(episode_divs, 1):
episode_name_tag = ep_div.find('b')
if not episode_name_tag:
# Find all episodes for this season
episodes_container = soup.find('div', {'class': 'dropdown mirrors', 'data-season': str(season_num)})
if not episodes_container:
continue
# Get all episode mirrors for this season
episode_mirrors = soup.find_all('div', {'class': 'dropdown mirrors',
'data-season': str(season_num)})
for mirror in episode_mirrors:
episode_data = mirror.get('data-episode', '').split('-')
if len(episode_data) != 2:
continue
episode_name = episode_name_tag.get_text(strip=True)
link_tag = ep_div.find('a', string=lambda text: text and "Supervideo" in text)
episode_url = link_tag['href'] if link_tag else None
ep_num = int(episode_data[1])
# Find supervideo link
supervideo_span = mirror.find('span', {'data-id': 'supervideo'})
if not supervideo_span:
continue
episode_url = supervideo_span.get('data-link', '')
# Add episode to the season
if current_season:
current_season.episodes.add({
'number': ep_idx,
'name': episode_name,
'number': ep_num,
'name': f"Episodio {ep_num}",
'url': episode_url
})

View File

@ -24,9 +24,10 @@ from .serie import download_series
# Variable
indice = 1
_useFor = "anime"
_useFor = "Anime"
_priority = 0
_engineDownload = "mp4"
_deprecate = False
msg = Prompt()
console = Console()
@ -108,7 +109,7 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
bot = get_bot_instance()
if len_database > 0:
select_title = get_select_title(table_show_manager, media_search_manager)
select_title = get_select_title(table_show_manager, media_search_manager,len_database)
process_search_result(select_title, selections)
else:

View File

@ -43,40 +43,38 @@ class ScrapeSerieAnime:
def get_count_episodes(self):
"""
Retrieve total number of episodes for the selected media.
This includes partial episodes (like episode 6.5).
Returns:
int: Total episode count
int: Total episode count including partial episodes
"""
try:
response = httpx.get(
url=f"{self.url}/info_api/{self.media_id}/",
headers=self.headers,
timeout=max_timeout
)
response.raise_for_status()
# Parse JSON response and return episode count
return response.json()["episodes_count"]
except Exception as e:
logging.error(f"Error fetching episode count: {e}")
return None
if self.episodes_cache is None:
self._fetch_all_episodes()
if self.episodes_cache:
return len(self.episodes_cache)
return None
def _fetch_all_episodes(self):
"""
Fetch all episodes data at once and cache it
"""
try:
all_episodes = []
count = self.get_count_episodes()
if not count:
return
# Get initial episode count
response = httpx.get(
url=f"{self.url}/info_api/{self.media_id}/",
headers=self.headers,
timeout=max_timeout
)
response.raise_for_status()
initial_count = response.json()["episodes_count"]
# Fetch episodes
all_episodes = []
start_range = 1
while start_range <= count:
end_range = min(start_range + 119, count)
# Fetch episodes in chunks
while start_range <= initial_count:
end_range = min(start_range + 119, initial_count)
response = httpx.get(
url=f"{self.url}/info_api/{self.media_id}/1",

View File

@ -18,10 +18,11 @@ from .film import download_film
# Variable
indice = 8
_useFor = "anime"
indice = 6
_useFor = "Anime"
_priority = 0
_engineDownload = "mp4"
_deprecate = False
msg = Prompt()
console = Console()
@ -74,7 +75,7 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
return media_search_manager
if len_database > 0:
select_title = get_select_title(table_show_manager, media_search_manager)
select_title = get_select_title(table_show_manager, media_search_manager,len_database)
process_search_result(select_title, selections)
else:

View File

@ -31,7 +31,8 @@ class ScrapSerie:
self.client = httpx.Client(
cookies={"sessionId": self.session_id},
headers={"User-Agent": get_userAgent(), "csrf-token": self.csrf_token},
base_url=full_url
base_url=full_url,
verify=False
)
try:

View File

@ -1,6 +1,5 @@
# 09.06.24
import logging
from urllib.parse import quote_plus
@ -17,14 +16,15 @@ from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
# Logic class
from .site import title_search, media_search_manager, table_show_manager
from .series import download_thread
from .film import download_film
# Variable
indice = 6
_useFor = "serie"
indice = -1
_useFor = "Film"
_priority = 0
_engineDownload = "mp4"
_deprecate = True
msg = Prompt()
console = Console()
@ -34,10 +34,8 @@ def process_search_result(select_title):
"""
Handles the search result and initiates the download for either a film or series.
"""
if "Serie TV" in str(select_title.type):
download_thread(select_title)
else:
logging.error(f"Not supported: {select_title.type}")
# !!! ADD TYPE DONT WORK FOR SERIE
download_film(select_title)
def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_item: dict = None):
"""
@ -55,20 +53,20 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
if string_to_search is None:
string_to_search = msg.ask(f"\n[purple]Insert word to search in [green]{site_constant.SITE_NAME}").strip()
# Search on database
len_database = title_search(quote_plus(string_to_search))
# If only the database is needed, return the manager
## If only the database is needed, return the manager
if get_onlyDatabase:
return media_search_manager
if len_database > 0:
select_title = get_select_title(table_show_manager, media_search_manager)
select_title = get_select_title(table_show_manager, media_search_manager,len_database)
process_search_result(select_title)
else:
else:
# If no results are found, ask again
console.print(f"\n[red]Nothing matching was found for[white]: [purple]{string_to_search}")
search()

View File

@ -0,0 +1,62 @@
# 03.07.24
import os
# External library
from rich.console import Console
# Internal utilities
from StreamingCommunity.Util.os import os_manager
from StreamingCommunity.Util.message import start_message
from StreamingCommunity.Lib.Downloader import HLS_Downloader
# Logic class
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
# Player
from StreamingCommunity.Api.Player.maxstream import VideoSource
# Variable
console = Console()
def download_film(select_title: MediaItem) -> str:
"""
Downloads a film using the provided obj.
Parameters:
- select_title (MediaItem): The media item to be downloaded. This should be an instance of the MediaItem class, containing attributes like `name` and `url`.
Return:
- str: output path
"""
start_message()
console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [cyan]{select_title.name}[/cyan] \n")
# Setup api manger
video_source = VideoSource(select_title.url)
# Define output path
title_name = os_manager.get_sanitize_file(select_title.name) +".mp4"
mp4_path = os.path.join(site_constant.MOVIE_FOLDER, title_name.replace(".mp4", ""))
# Get m3u8 master playlist
master_playlist = video_source.get_playlist()
# Download the film using the m3u8 playlist, and output filename
r_proc = HLS_Downloader(
m3u8_url=master_playlist,
output_path=os.path.join(mp4_path, title_name)
).start()
if r_proc['error'] is not None:
try: os.remove(r_proc['path'])
except: pass
return r_proc['path']

View File

@ -1,7 +1,4 @@
# 09.06.24
import logging
# 03.07.24
# External libraries
import httpx
@ -40,7 +37,7 @@ def title_search(query: str) -> int:
media_search_manager.clear()
table_show_manager.clear()
search_url = f"{site_constant.FULL_URL}/search/?&q={query}&quick=1&type=videobox_video&nodes=11"
search_url = f"{site_constant.FULL_URL}/?s={query}"
console.print(f"[cyan]Search url: [yellow]{search_url}")
try:
@ -48,40 +45,34 @@ def title_search(query: str) -> int:
search_url,
headers={'user-agent': get_userAgent()},
timeout=max_timeout,
follow_redirects=True
follow_redirects=True,
verify=False
)
response.raise_for_status()
except Exception as e:
console.print(f"[red]Site: {site_constant.SITE_NAME}, request search error: {e}")
console.print(f"Site: {site_constant.SITE_NAME}, request search error: {e}")
return 0
# Create soup and find table
soup = BeautifulSoup(response.text, "html.parser")
table_content = soup.find('ol', class_="ipsStream")
if table_content:
for title_div in table_content.find_all('li', class_='ipsStreamItem'):
try:
for card in soup.find_all("div", class_=["card", "mp-post", "horizontal"]):
try:
title_tag = card.find("h3", class_="card-title").find("a")
url = title_tag.get("href")
title = title_tag.get_text(strip=True)
title_type = title_div.find("p", class_="ipsType_reset").find_all("a")[-1].get_text(strip=True)
name = title_div.find("span", class_="ipsContained").find("a").get_text(strip=True)
link = title_div.find("span", class_="ipsContained").find("a").get("href")
title_info = {
'name': title,
'url': url,
'type': 'film'
}
title_info = {
'name': name,
'url': link,
'type': title_type,
'image': title_div.find("div", class_="ipsColumn").find("img").get("src")
}
media_search_manager.add_media(title_info)
media_search_manager.add_media(title_info)
except Exception as e:
print(f"Error parsing a film entry: {e}")
except Exception as e:
print(f"Error parsing a film entry: {e}")
return media_search_manager.get_length()
else:
logging.error("No table content found.")
return -999
# Return the number of titles found
return media_search_manager.get_length()

View File

@ -1,118 +0,0 @@
# 13.06.24
import os
from urllib.parse import urlparse
from typing import Tuple
# External library
from rich.console import Console
# Internal utilities
from StreamingCommunity.Util.message import start_message
from StreamingCommunity.Util.os import os_manager
from StreamingCommunity.Lib.Downloader import MP4_downloader
# Logic class
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
from StreamingCommunity.Api.Template.Util import (
manage_selection,
map_episode_title,
validate_episode_selection,
display_episodes_list
)
from StreamingCommunity.Api.Template.config_loader import site_constant
# Player
from .util.ScrapeSerie import GetSerieInfo
from StreamingCommunity.Api.Player.ddl import VideoSource
# Variable
console = Console()
def download_video(index_episode_selected: int, scape_info_serie: GetSerieInfo) -> Tuple[str,bool]:
"""
Downloads a specific episode.
Parameters:
- index_episode_selected (int): Episode index
- scape_info_serie (GetSerieInfo): Scraper object with series information
Returns:
- str: Path to downloaded file
- bool: Whether download was stopped
"""
start_message()
# Get episode information
obj_episode = scape_info_serie.selectEpisode(1, index_episode_selected-1)
console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [bold magenta]{obj_episode.get('name')}[/bold magenta] ([cyan]E{index_episode_selected}[/cyan]) \n")
# Define filename and path for the downloaded video
title_name = os_manager.get_sanitize_file(
f"{map_episode_title(scape_info_serie.tv_name, None, index_episode_selected, obj_episode.get('name'))}.mp4"
)
mp4_path = os.path.join(site_constant.SERIES_FOLDER, scape_info_serie.tv_name)
# Create output folder
os_manager.create_path(mp4_path)
# Setup video source
video_source = VideoSource(site_constant.COOKIE, obj_episode.get('url'))
# Get m3u8 master playlist
master_playlist = video_source.get_playlist()
# Parse start page url
parsed_url = urlparse(obj_episode.get('url'))
# Start download
r_proc = MP4_downloader(
url=master_playlist,
path=os.path.join(mp4_path, title_name),
referer=f"{parsed_url.scheme}://{parsed_url.netloc}/",
)
if r_proc != None:
console.print("[green]Result: ")
console.print(r_proc)
return os.path.join(mp4_path, title_name), False
def download_thread(dict_serie: MediaItem, episode_selection: str = None):
"""
Download all episode of a thread
Parameters:
dict_serie (MediaItem): The selected media item
episode_selection (str, optional): Episode selection input that bypasses manual input
"""
scrape_serie = GetSerieInfo(dict_serie, site_constant.COOKIE)
# Get episode list
episodes = scrape_serie.getEpisodeSeasons()
episodes_count = len(episodes)
# Display episodes list and manage user selection
if episode_selection is None:
last_command = display_episodes_list(scrape_serie.list_episodes)
else:
last_command = episode_selection
console.print(f"\n[cyan]Using provided episode selection: [yellow]{episode_selection}")
# Validate episode selection
list_episode_select = manage_selection(last_command, episodes_count)
list_episode_select = validate_episode_selection(list_episode_select, episodes_count)
# Download selected episodes
kill_handler = bool(False)
for i_episode in list_episode_select:
if kill_handler:
break
kill_handler = download_video(i_episode, scrape_serie)[1]

View File

@ -1,112 +0,0 @@
# 13.06.24
import sys
import logging
from typing import List, Dict
# External libraries
import httpx
from bs4 import BeautifulSoup
# Internal utilities
from StreamingCommunity.Util.config_json import config_manager
from StreamingCommunity.Util.headers import get_userAgent
# Logic class
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
# Variable
max_timeout = config_manager.get_int("REQUESTS", "timeout")
class GetSerieInfo:
def __init__(self, dict_serie: MediaItem, cookies) -> None:
"""
Initializes the GetSerieInfo object with default values.
Parameters:
- dict_serie (MediaItem): Dictionary containing series information (optional).
"""
self.headers = {'user-agent': get_userAgent()}
self.cookies = cookies
self.url = dict_serie.url
self.tv_name = None
self.list_episodes = None
def get_episode_number(self) -> List[Dict[str, str]]:
"""
Retrieves the number of episodes for a specific season.
Parameters:
n_season (int): The season number.
Returns:
List[Dict[str, str]]: List of dictionaries containing episode information.
"""
try:
response = httpx.get(f"{self.url}?area=online", cookies=self.cookies, headers=self.headers, timeout=max_timeout)
response.raise_for_status()
except Exception as e:
logging.error(f"Insert value for [ips4_device_key, ips4_member_id, ips4_login_key] in config.json file SITE \\ ddlstreamitaly \\ cookie. Use browser debug and cookie request with a valid account, filter by DOC. Error: {e}")
sys.exit(0)
# Parse HTML content of the page
soup = BeautifulSoup(response.text, "html.parser")
# Get tv name
self.tv_name = soup.find("span", class_= "ipsType_break").get_text(strip=True)
# Find the container of episodes for the specified season
table_content = soup.find('div', class_='ipsMargin_bottom:half')
list_dict_episode = []
for episode_div in table_content.find_all('a', href=True):
# Get text of episode
part_name = episode_div.get_text(strip=True)
if part_name:
obj_episode = {
'name': part_name,
'url': episode_div['href']
}
list_dict_episode.append(obj_episode)
self.list_episodes = list_dict_episode
return list_dict_episode
# ------------- FOR GUI -------------
def getNumberSeason(self) -> int:
"""
Get the total number of seasons available for the series.
Note: DDLStreamItaly typically provides content organized as threads, not seasons.
"""
return 1
def getEpisodeSeasons(self, season_number: int = 1) -> list:
"""
Get all episodes for a specific season.
Note: For DDLStreamItaly, this returns all episodes as they're typically in one list.
"""
if not self.list_episodes:
self.list_episodes = self.get_episode_number()
return self.list_episodes
def selectEpisode(self, season_number: int = 1, episode_index: int = 0) -> dict:
"""
Get information for a specific episode.
"""
episodes = self.getEpisodeSeasons()
if not episodes or episode_index < 0 or episode_index >= len(episodes):
logging.error(f"Episode index {episode_index} is out of range")
return None
return episodes[episode_index]

View File

@ -20,10 +20,11 @@ from .series import download_series
# Variable
indice = 5
_useFor = "serie"
indice = 4
_useFor = "Serie"
_priority = 0
_engineDownload = "hls"
_deprecate = False
msg = Prompt()
console = Console()
@ -74,7 +75,7 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
return media_search_manager
if len_database > 0:
select_title = get_select_title(table_show_manager, media_search_manager)
select_title = get_select_title(table_show_manager, media_search_manager,len_database)
process_search_result(select_title, selections)
else:

View File

@ -19,10 +19,11 @@ from .film import download_film
# Variable
indice = 8
_useFor = "film_serie"
_priority = 1 # NOTE: Site search need the use of tmbd obj
indice = 5
_useFor = "Film_&_Serie"
_priority = 0
_engineDownload = "hls"
_deprecate = False
msg = Prompt()
console = Console()
@ -83,7 +84,7 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
return media_search_manager
if len_database > 0:
select_title = get_select_title(table_show_manager, media_search_manager)
select_title = get_select_title(table_show_manager, media_search_manager,len_database)
process_search_result(select_title, selections)
else:

View File

@ -1,9 +1,5 @@
# 21.05.24
import threading
import queue
# External libraries
import httpx
from rich.console import Console
@ -13,12 +9,9 @@ from rich.console import Console
from StreamingCommunity.Util.config_json import config_manager
from StreamingCommunity.Util.headers import get_userAgent
from StreamingCommunity.Util.table import TVShowManager
from StreamingCommunity.Lib.TMBD.tmdb import tmdb
# Logic class
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaManager
from .util.ScrapeSerie import GetSerieInfo
# Variable
@ -26,76 +19,33 @@ console = Console()
media_search_manager = MediaManager()
table_show_manager = TVShowManager()
max_timeout = config_manager.get_int("REQUESTS", "timeout")
MAX_THREADS = 12
def determine_media_type(title):
def determine_media_type(item):
"""
Use TMDB to determine if a title is a movie or TV show.
Determine if the item is a film or TV series by checking actual seasons count
using GetSerieInfo.
"""
try:
# First search as a movie
movie_results = tmdb._make_request("search/movie", {"query": title})
movie_count = len(movie_results.get("results", []))
# Then search as a TV show
tv_results = tmdb._make_request("search/tv", {"query": title})
tv_count = len(tv_results.get("results", []))
# If results found in only one category, use that
if movie_count > 0 and tv_count == 0:
return "film"
elif tv_count > 0 and movie_count == 0:
return "tv"
# If both have results, compare popularity
if movie_count > 0 and tv_count > 0:
top_movie = movie_results["results"][0]
top_tv = tv_results["results"][0]
return "film" if top_movie.get("popularity", 0) > top_tv.get("popularity", 0) else "tv"
# Extract program name from path_id
program_name = None
if item.get('path_id'):
parts = item['path_id'].strip('/').split('/')
if len(parts) >= 2:
program_name = parts[-1].split('.')[0]
return "film"
if not program_name:
return "film"
scraper = GetSerieInfo(program_name)
scraper.collect_info_title()
return "tv" if scraper.getNumberSeason() > 0 else "film"
except Exception as e:
console.log(f"Error determining media type with TMDB: {e}")
console.print(f"[red]Error determining media type: {e}[/red]")
return "film"
def worker_determine_type(work_queue, result_dict, worker_id):
"""
Worker function to process items from queue and determine media types.
Parameters:
- work_queue: Queue containing items to process
- result_dict: Dictionary to store results
- worker_id: ID of the worker thread
"""
while not work_queue.empty():
try:
index, item = work_queue.get(block=False)
title = item.get('titolo', '')
media_type = determine_media_type(title)
result_dict[index] = {
'id': item.get('id', ''),
'name': title,
'type': media_type,
'path_id': item.get('path_id', ''),
'url': f"https://www.raiplay.it{item.get('url', '')}",
'image': f"https://www.raiplay.it{item.get('immagine', '')}",
}
work_queue.task_done()
except queue.Empty:
break
except Exception as e:
console.log(f"Worker {worker_id} error: {e}")
work_queue.task_done()
def title_search(query: str) -> int:
"""
Search for titles based on a search query.
@ -141,33 +91,15 @@ def title_search(query: str) -> int:
data = response.json().get('agg').get('titoli').get('cards')
data = data[:15] if len(data) > 15 else data
# Use multithreading to determine media types in parallel
work_queue = queue.Queue()
result_dict = {}
# Add items to the work queue
for i, item in enumerate(data):
work_queue.put((i, item))
# Create and start worker threads
threads = []
for i in range(min(MAX_THREADS, len(data))):
thread = threading.Thread(
target=worker_determine_type,
args=(work_queue, result_dict, i),
daemon=True
)
threads.append(thread)
thread.start()
# Wait for all threads to complete
for thread in threads:
thread.join()
# Add all results to media manager in correct order
for i in range(len(data)):
if i in result_dict:
media_search_manager.add_media(result_dict[i])
# Process each item and add to media manager
for item in data:
media_search_manager.add_media({
'id': item.get('id', ''),
'name': item.get('titolo', ''),
'type': determine_media_type(item),
'path_id': item.get('path_id', ''),
'url': f"https://www.raiplay.it{item.get('url', '')}",
'image': f"https://www.raiplay.it{item.get('immagine', '')}",
})
# Return the number of titles found
return media_search_manager.get_length()

View File

@ -30,28 +30,48 @@ class GetSerieInfo:
try:
program_url = f"{self.base_url}/programmi/{self.program_name}.json"
response = httpx.get(url=program_url, headers=get_headers(), timeout=max_timeout)
# If 404, content is not yet available
if response.status_code == 404:
logging.info(f"Content not yet available: {self.program_name}")
return
response.raise_for_status()
json_data = response.json()
# Look for seasons in the 'blocks' property
for block in json_data.get('blocks'):
if block.get('type') == 'RaiPlay Multimedia Block' and block.get('name', '').lower() == 'episodi':
self.publishing_block_id = block.get('id')
# Extract seasons from sets array
for season_set in block.get('sets', []):
if 'stagione' in season_set.get('name', '').lower():
self.seasons_manager.add_season({
'id': season_set.get('id', ''),
'number': len(self.seasons_manager.seasons) + 1,
'name': season_set.get('name', ''),
'path': season_set.get('path_id', ''),
'episodes_count': season_set.get('episode_size', {}).get('number', 0)
})
for block in json_data.get('blocks', []):
except Exception as e:
# Check if block is a season block or episodi block
if block.get('type') == 'RaiPlay Multimedia Block':
if block.get('name', '').lower() == 'episodi':
self.publishing_block_id = block.get('id')
# Extract seasons from sets array
for season_set in block.get('sets', []):
if 'stagione' in season_set.get('name', '').lower():
self._add_season(season_set, block.get('id'))
elif 'stagione' in block.get('name', '').lower():
self.publishing_block_id = block.get('id')
# Extract season directly from block's sets
for season_set in block.get('sets', []):
self._add_season(season_set, block.get('id'))
except httpx.HTTPError as e:
logging.error(f"Error collecting series info: {e}")
except Exception as e:
logging.error(f"Unexpected error collecting series info: {e}")
def _add_season(self, season_set: dict, block_id: str):
self.seasons_manager.add_season({
'id': season_set.get('id', ''),
'number': len(self.seasons_manager.seasons) + 1,
'name': season_set.get('name', ''),
'path': season_set.get('path_id', ''),
'episodes_count': season_set.get('episode_size', {}).get('number', 0)
})
def collect_info_season(self, number_season: int) -> None:
"""Get episodes for a specific season."""

View File

@ -12,6 +12,7 @@ from rich.prompt import Prompt
# Internal utilities
from StreamingCommunity.Api.Template import get_select_title
from StreamingCommunity.Lib.Proxies.proxy import ProxyFinder
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
from StreamingCommunity.TelegramHelp.telegram_bot import get_bot_instance
@ -25,47 +26,72 @@ from .series import download_series
# Variable
indice = 0
_useFor = "film_serie"
_useFor = "Film_&_Serie" # "Movies_&_Series"
_priority = 0
_engineDownload = "hls"
_deprecate = False
msg = Prompt()
console = Console()
proxy = None
def get_user_input(string_to_search: str = None):
"""
Asks the user to input a search term.
Handles both Telegram bot input and direct input.
If string_to_search is provided, it's returned directly (after stripping).
"""
if string_to_search is None:
if site_constant.TELEGRAM_BOT:
bot = get_bot_instance()
string_to_search = bot.ask(
"key_search",
f"Enter the search term\nor type 'back' to return to the menu: ",
None
)
if string_to_search is not None:
return string_to_search.strip()
if string_to_search == 'back':
if site_constant.TELEGRAM_BOT:
bot = get_bot_instance()
user_response = bot.ask(
"key_search", # Request type
"Enter the search term\nor type 'back' to return to the menu: ",
None
)
if user_response is None:
bot.send_message("Timeout: No search term entered.", None)
return None
if user_response.lower() == 'back':
bot.send_message("Returning to the main menu...", None)
try:
# Restart the script
subprocess.Popen([sys.executable] + sys.argv)
sys.exit()
else:
string_to_search = msg.ask(f"\n[purple]Insert a word to search in [green]{site_constant.SITE_NAME}").strip()
except Exception as e:
bot.send_message(f"Error during restart attempt: {e}", None)
return None # Return None if restart fails
return user_response.strip()
else:
return msg.ask(f"\n[purple]Insert a word to search in [green]{site_constant.SITE_NAME}").strip()
return string_to_search
def process_search_result(select_title, selections=None):
def process_search_result(select_title, selections=None, proxy=None):
"""
Handles the search result and initiates the download for either a film or series.
Parameters:
select_title (MediaItem): The selected media item
select_title (MediaItem): The selected media item. Can be None if selection fails.
selections (dict, optional): Dictionary containing selection inputs that bypass manual input
{'season': season_selection, 'episode': episode_selection}
e.g., {'season': season_selection, 'episode': episode_selection}
proxy (str, optional): The proxy to use for downloads.
"""
if not select_title:
if site_constant.TELEGRAM_BOT:
bot = get_bot_instance()
bot.send_message("No title selected or selection cancelled.", None)
else:
console.print("[yellow]No title selected or selection cancelled.")
return
if select_title.type == 'tv':
season_selection = None
episode_selection = None
@ -74,42 +100,67 @@ def process_search_result(select_title, selections=None):
season_selection = selections.get('season')
episode_selection = selections.get('episode')
download_series(select_title, season_selection, episode_selection)
download_series(select_title, season_selection, episode_selection, proxy)
else:
download_film(select_title)
download_film(select_title, proxy)
def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_item: dict = None, selections: dict = None):
"""
Main function of the application for search.
Parameters:
string_to_search (str, optional): String to search for
get_onlyDatabase (bool, optional): If True, return only the database object
direct_item (dict, optional): Direct item to process (bypass search)
string_to_search (str, optional): String to search for. Can be passed from run.py.
If 'back', special handling might occur in get_user_input.
get_onlyDatabase (bool, optional): If True, return only the database search manager object.
direct_item (dict, optional): Direct item to process (bypasses search).
selections (dict, optional): Dictionary containing selection inputs that bypass manual input
{'season': season_selection, 'episode': episode_selection}
for series (season/episode).
"""
bot = None
if site_constant.TELEGRAM_BOT:
bot = get_bot_instance()
# Check proxy if not already set
finder = ProxyFinder(site_constant.FULL_URL)
proxy = finder.find_fast_proxy()
if direct_item:
select_title = MediaItem(**direct_item)
process_search_result(select_title, selections)
select_title_obj = MediaItem(**direct_item)
process_search_result(select_title_obj, selections, proxy)
return
actual_search_query = get_user_input(string_to_search)
# Handle cases where user input is empty, or 'back' was handled (sys.exit or None return)
if not actual_search_query:
if bot:
if actual_search_query is None: # Specifically for timeout from bot.ask or failed restart
bot.send_message("Search term not provided or operation cancelled. Returning.", None)
return
if string_to_search is None:
string_to_search = msg.ask(f"\n[purple]Insert a word to search in [green]{site_constant.SITE_NAME}").strip()
# Perform search on the database using the obtained query
finder = ProxyFinder(site_constant.FULL_URL)
proxy = finder.find_fast_proxy()
len_database = title_search(actual_search_query, proxy)
# Search on database
len_database = title_search(string_to_search)
# If only the database is needed, return the manager
# If only the database object (media_search_manager populated by title_search) is needed
if get_onlyDatabase:
return media_search_manager
return media_search_manager
if len_database > 0:
select_title = get_select_title(table_show_manager, media_search_manager)
process_search_result(select_title, selections)
select_title = get_select_title(table_show_manager, media_search_manager, len_database)
process_search_result(select_title, selections, proxy)
else:
# If no results are found, ask again
console.print(f"\n[red]Nothing matching was found for[white]: [purple]{string_to_search}")
search()
no_results_message = f"No results found for: '{actual_search_query}'"
if bot:
bot.send_message(no_results_message, None)
else:
console.print(f"\n[red]Nothing matching was found for[white]: [purple]{actual_search_query}")
# Do not call search() recursively here to avoid infinite loops on no results.
# The flow should return to the caller (e.g., main menu in run.py).
return

View File

@ -27,7 +27,7 @@ from StreamingCommunity.Api.Player.vixcloud import VideoSource
console = Console()
def download_film(select_title: MediaItem) -> str:
def download_film(select_title: MediaItem, proxy: str = None) -> str:
"""
Downloads a film using the provided film ID, title name, and domain.
@ -55,13 +55,17 @@ def download_film(select_title: MediaItem) -> str:
console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [cyan]{select_title.name}[/cyan] \n")
# Init class
video_source = VideoSource(site_constant.FULL_URL, False, select_title.id)
video_source = VideoSource(f"{site_constant.FULL_URL}/it", False, select_title.id, proxy)
# Retrieve scws and if available master playlist
video_source.get_iframe(select_title.id)
video_source.get_content()
master_playlist = video_source.get_playlist()
if master_playlist is None:
console.print(f"[red]Site: {site_constant.SITE_NAME}, error: No master playlist found[/red]")
return None
# Define the filename and path for the downloaded film
title_name = os_manager.get_sanitize_file(select_title.name) + ".mp4"
mp4_path = os.path.join(site_constant.MOVIE_FOLDER, title_name.replace(".mp4", ""))

View File

@ -142,7 +142,7 @@ def download_episode(index_season_selected: int, scrape_serie: GetSerieInfo, vid
break
def download_series(select_season: MediaItem, season_selection: str = None, episode_selection: str = None) -> None:
def download_series(select_season: MediaItem, season_selection: str = None, episode_selection: str = None, proxy = None) -> None:
"""
Handle downloading a complete series.
@ -154,8 +154,8 @@ def download_series(select_season: MediaItem, season_selection: str = None, epis
start_message()
# Init class
video_source = VideoSource(site_constant.FULL_URL, True, select_season.id)
scrape_serie = GetSerieInfo(site_constant.FULL_URL, select_season.id, select_season.slug)
video_source = VideoSource(f"{site_constant.FULL_URL}/it", True, select_season.id, proxy)
scrape_serie = GetSerieInfo(f"{site_constant.FULL_URL}/it", select_season.id, select_season.slug, proxy)
# Collect information about season
scrape_serie.getNumberSeason()
@ -219,4 +219,4 @@ def download_series(select_season: MediaItem, season_selection: str = None, epis
# Get script_id
script_id = TelegramSession.get_session()
if script_id != "unknown":
TelegramSession.deleteScriptId(script_id)
TelegramSession.deleteScriptId(script_id)

View File

@ -28,7 +28,7 @@ table_show_manager = TVShowManager()
max_timeout = config_manager.get_int("REQUESTS", "timeout")
def title_search(query: str) -> int:
def title_search(query: str, proxy: str) -> int:
"""
Search for titles based on a search query.
@ -46,9 +46,10 @@ def title_search(query: str) -> int:
try:
response = httpx.get(
site_constant.FULL_URL,
f"{site_constant.FULL_URL}/it",
headers={'user-agent': get_userAgent()},
timeout=max_timeout
timeout=max_timeout,
proxy=proxy
)
response.raise_for_status()
@ -56,10 +57,11 @@ def title_search(query: str) -> int:
version = json.loads(soup.find('div', {'id': "app"}).get("data-page"))['version']
except Exception as e:
if "WinError" in str(e) or "Errno" in str(e): console.print("\n[bold yellow]Please make sure you have enabled and configured a valid proxy.[/bold yellow]")
console.print(f"[red]Site: {site_constant.SITE_NAME} version, request error: {e}")
return 0
search_url = f"{site_constant.FULL_URL}/search?q={query}"
search_url = f"{site_constant.FULL_URL}/it/search?q={query}"
console.print(f"[cyan]Search url: [yellow]{search_url}")
try:
@ -71,7 +73,8 @@ def title_search(query: str) -> int:
'x-inertia': 'true',
'x-inertia-version': version
},
timeout=max_timeout
timeout=max_timeout,
proxy=proxy
)
response.raise_for_status()
@ -117,4 +120,4 @@ def title_search(query: str) -> int:
bot.send_message(f"Lista dei risultati:", choices)
# Return the number of titles found
return media_search_manager.get_length()
return media_search_manager.get_length()

View File

@ -20,7 +20,7 @@ max_timeout = config_manager.get_int("REQUESTS", "timeout")
class GetSerieInfo:
def __init__(self, url, media_id: int = None, series_name: str = None):
def __init__(self, url, media_id: int = None, series_name: str = None, proxy = None):
"""
Initialize the GetSerieInfo class for scraping TV series information.
@ -32,6 +32,7 @@ class GetSerieInfo:
self.is_series = False
self.headers = {'user-agent': get_userAgent()}
self.url = url
self.proxy = proxy
self.media_id = media_id
self.seasons_manager = SeasonManager()
@ -50,7 +51,8 @@ class GetSerieInfo:
response = httpx.get(
url=f"{self.url}/titles/{self.media_id}-{self.series_name}",
headers=self.headers,
timeout=max_timeout
timeout=max_timeout,
proxy=self.proxy
)
response.raise_for_status()
@ -104,7 +106,8 @@ class GetSerieInfo:
'x-inertia': 'true',
'x-inertia-version': self.version,
},
timeout=max_timeout
timeout=max_timeout,
proxy=self.proxy
)
# Extract episodes from JSON response

View File

@ -19,13 +19,15 @@ from .series import download_series
# Variable
indice = 8
_useFor = "film_serie"
_priority = 10 # !!! MOLTO LENTO
indice = 7
_useFor = "Film_&_Serie"
_priority = 0
_engineDownload = "hls"
_deprecate = False
msg = Prompt()
console = Console()
proxy = None
def get_user_input(string_to_search: str = None):
@ -73,20 +75,25 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
select_title = MediaItem(**direct_item)
process_search_result(select_title, selections) # DONT SUPPORT PROXY FOR NOW
return
# Check proxy if not already set
finder = ProxyFinder(site_constant.FULL_URL)
proxy = finder.find_fast_proxy()
if string_to_search is None:
string_to_search = msg.ask(f"\n[purple]Insert a word to search in [green]{site_constant.SITE_NAME}").strip()
# Perform search on the database using the obtained query
finder = ProxyFinder(url=f"{site_constant.FULL_URL}/serie/euphoria/")
proxy, response_serie, _ = finder.find_fast_proxy()
len_database = title_search(string_to_search, [proxy, response_serie])
proxy = finder.find_fast_proxy()
len_database = title_search(string_to_search, proxy)
# If only the database is needed, return the manager
if get_onlyDatabase:
return media_search_manager
if len_database > 0:
select_title = get_select_title(table_show_manager, media_search_manager)
select_title = get_select_title(table_show_manager, media_search_manager,len_database)
process_search_result(select_title, selections, proxy)
else:

View File

@ -27,9 +27,16 @@ table_show_manager = TVShowManager()
max_timeout = config_manager.get_int("REQUESTS", "timeout")
def extract_nonce(response_) -> str:
def extract_nonce(proxy) -> str:
"""Extract nonce value from the page script"""
soup = BeautifulSoup(response_.content, 'html.parser')
response = httpx.get(
site_constant.FULL_URL,
headers={'user-agent': get_userAgent()},
timeout=max_timeout,
proxy=proxy
)
soup = BeautifulSoup(response.content, 'html.parser')
script = soup.find('script', id='live-search-js-extra')
if script:
match = re.search(r'"admin_ajax_nonce":"([^"]+)"', script.text)
@ -38,7 +45,7 @@ def extract_nonce(response_) -> str:
return ""
def title_search(query: str, additionalData: list) -> int:
def title_search(query: str, proxy: str) -> int:
"""
Search for titles based on a search query.
@ -51,12 +58,11 @@ def title_search(query: str, additionalData: list) -> int:
media_search_manager.clear()
table_show_manager.clear()
proxy, response_serie = additionalData
search_url = f"{site_constant.FULL_URL}/wp-admin/admin-ajax.php"
console.print(f"[cyan]Search url: [yellow]{search_url}")
try:
_wpnonce = extract_nonce(response_serie)
_wpnonce = extract_nonce(proxy)
if not _wpnonce:
console.print("[red]Error: Failed to extract nonce")
@ -82,6 +88,7 @@ def title_search(query: str, additionalData: list) -> int:
soup = BeautifulSoup(response.text, 'html.parser')
except Exception as e:
if "WinError" in str(e) or "Errno" in str(e): console.print("\n[bold yellow]Please make sure you have enabled and configured a valid proxy.[/bold yellow]")
console.print(f"[red]Site: {site_constant.SITE_NAME}, request search error: {e}")
return 0

View File

@ -7,78 +7,123 @@ import sys
from rich.console import Console
# Internal utilities
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.TelegramHelp.telegram_bot import get_bot_instance
# Variable
console = Console()
available_colors = ['red', 'magenta', 'yellow', 'cyan', 'green', 'blue', 'white']
column_to_hide = ['Slug', 'Sub_ita', 'Last_air_date', 'Seasons_count', 'Url', 'Image', 'Path_id']
def get_select_title(table_show_manager, media_search_manager):
def get_select_title(table_show_manager, media_search_manager, num_results_available):
"""
Display a selection of titles and prompt the user to choose one.
Handles both console and Telegram bot input.
Parameters:
table_show_manager: Manager for console table display.
media_search_manager: Manager holding the list of media items.
num_results_available (int): The number of media items available for selection.
Returns:
MediaItem: The selected media item.
MediaItem: The selected media item, or None if no selection is made or an error occurs.
"""
# Determine column_info dynamically for (search site)
if not media_search_manager.media_list:
console.print("\n[red]No media items available.")
# console.print("\n[red]No media items available.")
return None
# Example of available colors for columns
available_colors = ['red', 'magenta', 'yellow', 'cyan', 'green', 'blue', 'white']
# Retrieve the keys of the first media item as column headers
first_media_item = media_search_manager.media_list[0]
column_info = {"Index": {'color': available_colors[0]}} # Always include Index with a fixed color
# Assign colors to the remaining keys dynamically
color_index = 1
for key in first_media_item.__dict__.keys():
if site_constant.TELEGRAM_BOT:
bot = get_bot_instance()
prompt_message = f"Inserisci il numero del titolo che vuoi selezionare (da 0 a {num_results_available - 1}):"
user_input_str = bot.ask(
"select_title_from_list_number",
prompt_message,
None
)
if key.capitalize() in column_to_hide:
continue
if user_input_str is None:
bot.send_message("Timeout: nessuna selezione ricevuta.", None)
return None
if key in ('id', 'type', 'name', 'score'): # Custom prioritization of colors
if key == 'type':
column_info["Type"] = {'color': 'yellow'}
elif key == 'name':
column_info["Name"] = {'color': 'magenta'}
elif key == 'score':
column_info["Score"] = {'color': 'cyan'}
try:
chosen_index = int(user_input_str)
if 0 <= chosen_index < num_results_available:
selected_item = media_search_manager.get(chosen_index)
if selected_item:
return selected_item
else:
bot.send_message(f"Errore interno: Impossibile recuperare il titolo con indice {chosen_index}.", None)
return None
else:
bot.send_message(f"Selezione '{chosen_index}' non valida. Inserisci un numero compreso tra 0 e {num_results_available - 1}.", None)
return None
except ValueError:
bot.send_message(f"Input '{user_input_str}' non valido. Devi inserire un numero.", None)
return None
except Exception as e:
bot.send_message(f"Si è verificato un errore durante la selezione: {e}", None)
return None
else:
column_info[key.capitalize()] = {'color': available_colors[color_index % len(available_colors)]}
color_index += 1
table_show_manager.add_column(column_info)
# Populate the table with title information
for i, media in enumerate(media_search_manager.media_list):
media_dict = {'Index': str(i)}
else:
# Logica originale per la console
if not media_search_manager.media_list:
console.print("\n[red]No media items available.")
return None
first_media_item = media_search_manager.media_list[0]
column_info = {"Index": {'color': available_colors[0]}}
color_index = 1
for key in first_media_item.__dict__.keys():
if key.capitalize() in column_to_hide:
continue
if key in ('id', 'type', 'name', 'score'):
if key == 'type': column_info["Type"] = {'color': 'yellow'}
elif key == 'name': column_info["Name"] = {'color': 'magenta'}
elif key == 'score': column_info["Score"] = {'color': 'cyan'}
else:
column_info[key.capitalize()] = {'color': available_colors[color_index % len(available_colors)]}
color_index += 1
# Ensure all values are strings for rich add table
media_dict[key.capitalize()] = str(getattr(media, key))
table_show_manager.clear()
table_show_manager.add_column(column_info)
table_show_manager.add_tv_show(media_dict)
for i, media in enumerate(media_search_manager.media_list):
media_dict = {'Index': str(i)}
for key in first_media_item.__dict__.keys():
if key.capitalize() in column_to_hide:
continue
media_dict[key.capitalize()] = str(getattr(media, key))
table_show_manager.add_tv_show(media_dict)
# Run the table and handle user input
last_command = table_show_manager.run(force_int_input=True, max_int_input=len(media_search_manager.media_list))
table_show_manager.clear()
last_command_str = table_show_manager.run(force_int_input=True, max_int_input=len(media_search_manager.media_list))
table_show_manager.clear()
# Handle user's quit command
if last_command == "q" or last_command == "quit":
console.print("\n[red]Quit ...")
sys.exit(0)
if last_command_str is None or last_command_str.lower() in ["q", "quit"]:
console.print("\n[red]Selezione annullata o uscita.")
return None
# Check if the selected index is within range
if 0 <= int(last_command) < len(media_search_manager.media_list):
return media_search_manager.get(int(last_command))
else:
console.print("\n[red]Wrong index")
sys.exit(0)
try:
selected_index = int(last_command_str)
if 0 <= selected_index < len(media_search_manager.media_list):
return media_search_manager.get(selected_index)
else:
console.print("\n[red]Indice errato o non valido.")
# sys.exit(0)
return None
except ValueError:
console.print("\n[red]Input non numerico ricevuto dalla tabella.")
# sys.exit(0)
return None

View File

@ -1,20 +1,15 @@
# 29.04.25
import os
import sys
import time
import json
import signal
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
from datetime import datetime, timedelta
from concurrent.futures import ThreadPoolExecutor, as_completed
# External library
import httpx
from rich import print
from rich.progress import Progress, SpinnerColumn, BarColumn, TextColumn, TimeRemainingColumn
# Internal utilities
@ -27,118 +22,18 @@ MAX_TIMEOUT = config_manager.get_int("REQUESTS", "timeout")
class ProxyFinder:
def __init__(self, url, timeout_threshold: float = 7.0, max_proxies: int = 150, max_workers: int = 12):
def __init__(self, url, timeout_threshold: float = 7.0):
self.url = url
self.timeout_threshold = timeout_threshold
self.max_proxies = max_proxies
self.max_workers = max_workers
self.found_proxy = None
self.shutdown_flag = False
self.json_file = os.path.join(os.path.dirname(__file__), 'working_proxies.json')
signal.signal(signal.SIGINT, self._handle_interrupt)
def load_saved_proxies(self) -> tuple:
"""Load saved proxies if they're not expired (2 hours old)"""
try:
if not os.path.exists(self.json_file):
return None, None
with open(self.json_file, 'r') as f:
data = json.load(f)
if not data.get('proxies') or not data.get('last_update'):
return None, None
last_update = datetime.fromisoformat(data['last_update'])
if datetime.now() - last_update > timedelta(hours=2):
return None, None
return data['proxies'], last_update
except Exception:
return None, None
def save_working_proxy(self, proxy: str, response_time: float):
"""Save working proxy to JSON file"""
data = {
'proxies': [{'proxy': proxy, 'response_time': response_time}],
'last_update': datetime.now().isoformat()
}
try:
with open(self.json_file, 'w') as f:
json.dump(data, f, indent=4)
except Exception as e:
print(f"[bold red]Error saving proxy:[/bold red] {str(e)}")
def fetch_geonode(self) -> list:
proxies = []
try:
response = httpx.get(
"https://proxylist.geonode.com/api/proxy-list?protocols=http%2Chttps&limit=100&page=1&sort_by=speed&sort_type=asc",
headers=get_headers(),
timeout=MAX_TIMEOUT
)
data = response.json()
proxies = [(f"http://{p['ip']}:{p['port']}", "Geonode") for p in data.get('data', [])]
except Exception as e:
print(f"[bold red]Error in Geonode:[/bold red] {str(e)[:100]}")
return proxies
def fetch_proxyscrape(self) -> list:
proxies = []
try:
response = httpx.get(
"https://api.proxyscrape.com/v4/free-proxy-list/get?request=get_proxies&protocol=http&skip=0&proxy_format=protocolipport&format=json&limit=100&timeout=1000",
headers=get_headers(),
timeout=MAX_TIMEOUT
)
data = response.json()
if 'proxies' in data and isinstance(data['proxies'], list):
proxies = [(proxy_data['proxy'], "ProxyScrape") for proxy_data in data['proxies'] if 'proxy' in proxy_data]
except Exception as e:
print(f"[bold red]Error in ProxyScrape:[/bold red] {str(e)[:100]}")
return proxies
def fetch_proxies_from_sources(self) -> list:
#print("[cyan]Fetching proxies from sources...[/cyan]")
with ThreadPoolExecutor(max_workers=3) as executor:
proxyscrape_future = executor.submit(self.fetch_proxyscrape)
geonode_future = executor.submit(self.fetch_geonode)
sources_proxies = {}
try:
proxyscrape_result = proxyscrape_future.result()
sources_proxies["proxyscrape"] = proxyscrape_result[:int(self.max_proxies/2)]
except Exception as e:
print(f"[bold red]Error fetching from proxyscrape:[/bold red] {str(e)[:100]}")
sources_proxies["proxyscrape"] = []
try:
geonode_result = geonode_future.result()
sources_proxies["geonode"] = geonode_result[:int(self.max_proxies/2)]
except Exception as e:
print(f"[bold red]Error fetching from geonode:[/bold red] {str(e)[:100]}")
sources_proxies["geonode"] = []
merged_proxies = []
if "proxyscrape" in sources_proxies:
merged_proxies.extend(sources_proxies["proxyscrape"])
if "geonode" in sources_proxies:
merged_proxies.extend(sources_proxies["geonode"])
proxy_list = merged_proxies[:self.max_proxies]
return proxy_list
def _test_single_request(self, proxy_info: tuple) -> tuple:
proxy, source = proxy_info
try:
start = time.time()
print(f"[yellow]Testing proxy for URL: {self.url}...")
with httpx.Client(proxy=proxy, timeout=self.timeout_threshold) as client:
response = client.get(self.url, headers=get_headers())
if response.status_code == 200:
@ -161,72 +56,17 @@ class ProxyFinder:
return (proxy, success2 and time2 <= self.timeout_threshold, avg_time, text1, source)
def _handle_interrupt(self, sig, frame):
print("\n[bold yellow]Received keyboard interrupt. Terminating...[/bold yellow]")
print("\n[red]Received keyboard interrupt. Terminating...")
self.shutdown_flag = True
sys.exit(0)
def find_fast_proxy(self) -> tuple:
saved_proxies, last_update = self.load_saved_proxies()
if saved_proxies:
print("[cyan]Testing saved proxy...[/cyan]")
for proxy_data in saved_proxies:
result = self.test_proxy((proxy_data['proxy'], 'cached'))
if result[1]:
return proxy_data['proxy'], result[3], result[2]
else:
print(f"[red]Saved proxy {proxy_data['proxy']} failed - response time: {result[2]:.2f}s[/red]")
proxies = self.fetch_proxies_from_sources()
if not proxies:
print("[bold red]No proxies fetched to test.[/bold red]")
return (None, None, None)
found_proxy = None
response_text = None
source = None
failed_count = 0
success_count = 0
#print(f"[cyan]Testing {len(proxies)} proxies...[/cyan]")
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
futures = {executor.submit(self.test_proxy, p): p for p in proxies}
with Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
BarColumn(),
TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
TextColumn("[cyan]{task.fields[success]}[/cyan]/[red]{task.fields[failed]}[/red]"),
TimeRemainingColumn(),
) as progress:
task = progress.add_task(
"[cyan]Testing Proxies",
total=len(futures),
success=success_count,
failed=failed_count
)
for future in as_completed(futures):
if self.shutdown_flag:
break
try:
proxy, success, elapsed, response, proxy_source = future.result()
if success:
success_count += 1
print(f"[bold green]Found valid proxy:[/bold green] {proxy} ({elapsed:.2f}s)")
found_proxy = proxy
response_text = response
self.save_working_proxy(proxy, elapsed)
self.shutdown_flag = True
break
else:
failed_count += 1
except Exception:
failed_count += 1
progress.update(task, advance=1, success=success_count, failed=failed_count)
if not found_proxy:
print("[bold red]No working proxies found[/bold red]")
return (found_proxy, response_text, source)
def find_fast_proxy(self) -> str:
try:
proxy_config = config_manager.get("REQUESTS", "proxy")
if proxy_config and isinstance(proxy_config, dict) and 'http' in proxy_config:
print("[cyan]Using configured proxy from config.json...[/cyan]")
return proxy_config['http']
except Exception as e:
print(f"[red]Error getting configured proxy: {str(e)}[/red]")
return None

View File

@ -0,0 +1,62 @@
{
"DEFAULT": {
"debug": false,
"show_message": true,
"clean_console": true,
"show_trending": true,
"use_api": true,
"not_close": false,
"telegram_bot": true,
"download_site_data": true,
"validate_github_config": true
},
"OUT_FOLDER": {
"root_path": "/mnt/data/media/",
"movie_folder_name": "films",
"serie_folder_name": "serie_tv",
"anime_folder_name": "Anime",
"map_episode_name": "E%(episode)_%(episode_name)",
"add_siteName": false
},
"QBIT_CONFIG": {
"host": "192.168.1.51",
"port": "6666",
"user": "admin",
"pass": "adminadmin"
},
"M3U8_DOWNLOAD": {
"tqdm_delay": 0.01,
"default_video_workser": 12,
"default_audio_workser": 12,
"segment_timeout": 8,
"download_audio": true,
"merge_audio": true,
"specific_list_audio": [
"ita"
],
"download_subtitle": true,
"merge_subs": true,
"specific_list_subtitles": [
"ita",
"eng"
],
"cleanup_tmp_folder": true
},
"M3U8_CONVERSION": {
"use_codec": false,
"use_vcodec": true,
"use_acodec": true,
"use_bitrate": true,
"use_gpu": false,
"default_preset": "ultrafast"
},
"M3U8_PARSER": {
"force_resolution": "Best",
"get_only_link": false
},
"REQUESTS": {
"verify": false,
"timeout": 20,
"max_retry": 8
}
}

View File

@ -575,6 +575,10 @@ class TelegramBot:
cleaned_output = cleaned_output.replace(
"\n\n", "\n"
) # Rimuovi newline multipli
# Inizializza le variabili
cleaned_output_0 = None # o ""
cleaned_output_1 = None # o ""
# Dentro cleaned_output c'è una stringa recupero quello che si trova tra ## ##
download_section = re.search(r"##(.*?)##", cleaned_output, re.DOTALL)

View File

@ -4,6 +4,7 @@ import os
import sys
import time
import asyncio
import importlib.metadata
# External library
import httpx
@ -11,7 +12,7 @@ from rich.console import Console
# Internal utilities
from .version import __version__, __author__, __title__
from .version import __version__ as source_code_version, __author__, __title__
from StreamingCommunity.Util.config_json import config_manager
from StreamingCommunity.Util.headers import get_userAgent
@ -75,7 +76,11 @@ def update():
percentual_stars = 0
# Get the current version (installed version)
current_version = __version__
try:
current_version = importlib.metadata.version(__title__)
except importlib.metadata.PackageNotFoundError:
#console.print(f"[yellow]Warning: Could not determine installed version for '{__title__}' via importlib.metadata. Falling back to source version.[/yellow]")
current_version = source_code_version
# Get commit details
latest_commit = response_commits[0] if response_commits else None

View File

@ -1,5 +1,5 @@
__title__ = 'StreamingCommunity'
__version__ = '3.0.4'
__version__ = '3.0.9'
__author__ = 'Arrowar'
__description__ = 'A command-line program to download film'
__copyright__ = 'Copyright 2024'
__copyright__ = 'Copyright 2025'

View File

@ -36,8 +36,10 @@ class ConfigManager:
base_path = os.path.dirname(sys.executable)
else:
# Use the current directory where the script is executed
base_path = os.getcwd()
# Get the actual path of the module file
current_file_path = os.path.abspath(__file__)
base_path = os.path.dirname(os.path.dirname(os.path.dirname(current_file_path)))
# Initialize file paths
self.file_path = os.path.join(base_path, file_name)
@ -166,7 +168,6 @@ class ConfigManager:
raise Exception(f"Error downloading reference configuration. Code: {response.status_code}")
reference_config = response.json()
console.print(f"[bold cyan]Reference configuration downloaded:[/bold cyan] [green]{len(reference_config)} keys available[/green]")
# Compare and update missing keys
merged_config = self._deep_merge_configs(self.config, reference_config)
@ -267,34 +268,32 @@ class ConfigManager:
self._load_site_data_from_file()
def _load_site_data_from_api(self) -> None:
"""Load site data from API."""
"""Load site data from GitHub."""
domains_github_url = "https://raw.githubusercontent.com/Arrowar/StreamingCommunity/refs/heads/main/.github/.domain/domains.json"
headers = {
"apikey": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6Inp2Zm5ncG94d3Jnc3duenl0YWRoIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NDAxNTIxNjMsImV4cCI6MjA1NTcyODE2M30.FNTCCMwi0QaKjOu8gtZsT5yQttUW8QiDDGXmzkn89QE",
"Authorization": f"Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6Inp2Zm5ncG94d3Jnc3duenl0YWRoIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NDAxNTIxNjMsImV4cCI6MjA1NTcyODE2M30.FNTCCMwi0QaKjOu8gtZsT5yQttUW8QiDDGXmzkn89QE",
"Content-Type": "application/json",
"User-Agent": get_userAgent()
"User-Agent": get_userAgent()
}
try:
console.print("[bold cyan]Retrieving site data from API...[/bold cyan]")
response = requests.get("https://zvfngpoxwrgswnzytadh.supabase.co/rest/v1/public", timeout=8, headers=headers)
console.print(f"[bold cyan]Retrieving site data from GitHub:[/bold cyan] [green]{domains_github_url}[/green]")
response = requests.get(domains_github_url, timeout=8, headers=headers)
if response.ok:
data = response.json()
if data and len(data) > 0:
self.configSite = data[0]['data']
site_count = len(self.configSite) if isinstance(self.configSite, dict) else 0
console.print(f"[bold green]Site data retrieved:[/bold green] {site_count} streaming services available")
else:
console.print("[bold yellow]API returned an empty data set[/bold yellow]")
self.configSite = response.json()
site_count = len(self.configSite) if isinstance(self.configSite, dict) else 0
console.print(f"[bold green]Site data loaded from GitHub:[/bold green] {site_count} streaming services found.")
else:
console.print(f"[bold red]API request failed:[/bold red] HTTP {response.status_code}, {response.text[:100]}")
console.print(f"[bold red]GitHub request failed:[/bold red] HTTP {response.status_code}, {response.text[:100]}")
self._handle_site_data_fallback()
except json.JSONDecodeError as e:
console.print(f"[bold red]Error parsing JSON from GitHub:[/bold red] {str(e)}")
self._handle_site_data_fallback()
except Exception as e:
console.print(f"[bold red]API connection error:[/bold red] {str(e)}")
console.print(f"[bold red]GitHub connection error:[/bold red] {str(e)}")
self._handle_site_data_fallback()
def _load_site_data_from_file(self) -> None:
@ -559,7 +558,6 @@ class ConfigManager:
return section in config_source
# Helper function to check the platform
def get_use_large_bar():
"""
Determine if the large bar feature should be enabled.

View File

@ -12,7 +12,7 @@ import inspect
import subprocess
import contextlib
import importlib.metadata
import socket
# External library
from unidecode import unidecode
@ -283,43 +283,61 @@ class InternManager():
else:
return f"{bytes / (1024 * 1024):.2f} MB/s"
def check_dns_provider(self):
# def check_dns_provider(self):
# """
# Check if the system's current DNS server matches any known DNS providers.
# Returns:
# bool: True if the current DNS server matches a known provider,
# False if no match is found or in case of errors
# """
# dns_providers = {
# "Cloudflare": ["1.1.1.1", "1.0.0.1"],
# "Google": ["8.8.8.8", "8.8.4.4"],
# "OpenDNS": ["208.67.222.222", "208.67.220.220"],
# "Quad9": ["9.9.9.9", "149.112.112.112"],
# "AdGuard": ["94.140.14.14", "94.140.15.15"],
# "Comodo": ["8.26.56.26", "8.20.247.20"],
# "Level3": ["209.244.0.3", "209.244.0.4"],
# "Norton": ["199.85.126.10", "199.85.127.10"],
# "CleanBrowsing": ["185.228.168.9", "185.228.169.9"],
# "Yandex": ["77.88.8.8", "77.88.8.1"]
# }
# try:
# resolver = dns.resolver.Resolver()
# nameservers = resolver.nameservers
# if not nameservers:
# return False
# for server in nameservers:
# for provider, ips in dns_providers.items():
# if server in ips:
# return True
# return False
# except Exception:
# return False
def check_dns_resolve(self):
"""
Check if the system's current DNS server matches any known DNS providers.
Check if the system's current DNS server can resolve a domain name.
Works on both Windows and Unix-like systems.
Returns:
bool: True if the current DNS server matches a known provider,
False if no match is found or in case of errors
bool: True if the current DNS server can resolve a domain name,
False if can't resolve or in case of errors
"""
dns_providers = {
"Cloudflare": ["1.1.1.1", "1.0.0.1"],
"Google": ["8.8.8.8", "8.8.4.4"],
"OpenDNS": ["208.67.222.222", "208.67.220.220"],
"Quad9": ["9.9.9.9", "149.112.112.112"],
"AdGuard": ["94.140.14.14", "94.140.15.15"],
"Comodo": ["8.26.56.26", "8.20.247.20"],
"Level3": ["209.244.0.3", "209.244.0.4"],
"Norton": ["199.85.126.10", "199.85.127.10"],
"CleanBrowsing": ["185.228.168.9", "185.228.169.9"],
"Yandex": ["77.88.8.8", "77.88.8.1"]
}
test_domains = ["github.com", "google.com", "microsoft.com", "amazon.com"]
try:
resolver = dns.resolver.Resolver()
nameservers = resolver.nameservers
if not nameservers:
return False
for server in nameservers:
for provider, ips in dns_providers.items():
if server in ips:
return True
for domain in test_domains:
# socket.gethostbyname() works consistently across all platforms
socket.gethostbyname(domain)
return True
except (socket.gaierror, socket.error):
return False
except Exception:
return False
class OsSummary:
def __init__(self):

View File

@ -61,7 +61,8 @@ def load_search_functions():
priority = getattr(mod, '_priority', 0)
if priority == 0:
modules.append((module_name, indice, use_for))
if not getattr(mod, '_deprecate'):
modules.append((module_name, indice, use_for))
except Exception as e:
console.print(f"[red]Failed to import module {module_name}: {str(e)}")

View File

@ -30,7 +30,7 @@ from StreamingCommunity.TelegramHelp.telegram_bot import get_bot_instance, Teleg
# Config
SHOW_TRENDING = config_manager.get_bool('DEFAULT', 'show_trending')
CLOSE_CONSOLE = config_manager.get_bool('DEFAULT', 'not_close')
NOT_CLOSE_CONSOLE = config_manager.get_bool('DEFAULT', 'not_close')
TELEGRAM_BOT = config_manager.get_bool('DEFAULT', 'telegram_bot')
@ -61,7 +61,7 @@ def load_search_functions():
loaded_functions = {}
# Lista dei siti da escludere se TELEGRAM_BOT è attivo
excluded_sites = {"cb01new", "ddlstreamitaly", "guardaserie", "ilcorsaronero", "mostraguarda"} if TELEGRAM_BOT else set()
excluded_sites = {"cb01new", "guardaserie", "ilcorsaronero", "mostraguarda"} if TELEGRAM_BOT else set()
# Find api home directory
if getattr(sys, 'frozen', False): # Modalità PyInstaller
@ -89,9 +89,11 @@ def load_search_functions():
mod = importlib.import_module(f'StreamingCommunity.Api.Site.{module_name}')
# Get 'indice' from the module
indice = getattr(mod, 'indice', 0)
use_for = getattr(mod, '_useFor', 'other')
modules.append((module_name, indice, use_for))
indice = getattr(mod, 'indice')
use_for = getattr(mod, '_useFor')
if not getattr(mod, '_deprecate'):
modules.append((module_name, indice, use_for))
except Exception as e:
console.print(f"[red]Failed to import module {module_name}: {str(e)}")
@ -191,6 +193,13 @@ def force_exit():
def main(script_id = 0):
color_map = {
"anime": "red",
"film_&_serie": "yellow",
"serie": "blue",
"torrent": "white"
}
if TELEGRAM_BOT:
bot = get_bot_instance()
bot.send_message(f"Avviato script {script_id}", None)
@ -201,14 +210,28 @@ def main(script_id = 0):
log_not = Logger()
initialize()
if not internet_manager.check_dns_provider():
# if not internet_manager.check_dns_provider():
# print()
# console.print("[red]❌ ERROR: DNS configuration is required!")
# console.print("[red]The program cannot function correctly without proper DNS settings.")
# console.print("[yellow]Please configure one of these DNS servers:")
# console.print("[blue]• Cloudflare (1.1.1.1) 'https://developers.cloudflare.com/1.1.1.1/setup/windows/'")
# console.print("[blue]• Quad9 (9.9.9.9) 'https://docs.quad9.net/Setup_Guides/Windows/Windows_10/'")
# console.print("\n[yellow]⚠️ The program will not work until you configure your DNS settings.")
# time.sleep(2)
# msg.ask("[yellow]Press Enter to continue ...")
if not internet_manager.check_dns_resolve():
print()
console.print("[red]❌ ERROR: DNS configuration is required!")
console.print("[red]The program cannot function correctly without proper DNS settings.")
console.print("[yellow]Please configure one of these DNS servers:")
console.print("[blue]• Cloudflare (1.1.1.1)")
console.print("[blue]• Quad9 (9.9.9.9)")
console.print("[blue]• Cloudflare (1.1.1.1) 'https://developers.cloudflare.com/1.1.1.1/setup/windows/'")
console.print("[blue]• Quad9 (9.9.9.9) 'https://docs.quad9.net/Setup_Guides/Windows/Windows_10/'")
console.print("\n[yellow]⚠️ The program will not work until you configure your DNS settings.")
input("[yellow]Press Enter to exit...")
os._exit(0)
# Load search functions
search_functions = load_search_functions()
@ -251,30 +274,6 @@ def main(script_id = 0):
)
# Add arguments for search functions
color_map = {
"anime": "red",
"film_serie": "yellow",
"film": "blue",
"serie": "green",
"other": "white"
}
# Add dynamic arguments based on loaded search modules
used_short_options = set()
for alias, (_, use_for) in search_functions.items():
short_option = alias[:3].upper()
original_short_option = short_option
count = 1
while short_option in used_short_options:
short_option = f"{original_short_option}{count}"
count += 1
used_short_options.add(short_option)
long_option = alias
parser.add_argument(f'-{short_option}', f'--{long_option}', action='store_true', help=f'Search for {alias.split("_")[0]} on streaming platforms.')
parser.add_argument('-s', '--search', default=None, help='Search terms')
# Parse command-line arguments
@ -309,54 +308,45 @@ def main(script_id = 0):
global_search(search_terms)
return
# Map command-line arguments to functions
arg_to_function = {alias: func for alias, (func, _) in search_functions.items()}
# Create mappings using module indice
input_to_function = {}
choice_labels = {}
for alias, (func, use_for) in search_functions.items():
module_name = alias.split("_")[0]
try:
mod = importlib.import_module(f'StreamingCommunity.Api.Site.{module_name}')
site_index = str(getattr(mod, 'indice'))
input_to_function[site_index] = func
choice_labels[site_index] = (module_name.capitalize(), use_for.lower())
except Exception as e:
console.print(f"[red]Error mapping module {module_name}: {str(e)}")
# Check which argument is provided and run the corresponding function
for arg, func in arg_to_function.items():
if getattr(args, arg):
run_function(func, search_terms=search_terms)
return
# Mapping user input to functions
input_to_function = {str(i): func for i, (alias, (func, _)) in enumerate(search_functions.items())}
# Create dynamic prompt message and choices
choice_labels = {str(i): (alias.split("_")[0].capitalize(), use_for) for i, (alias, (_, use_for)) in enumerate(search_functions.items())}
# Add global search option to the menu
#global_search_key = str(len(choice_labels))
#choice_labels[global_search_key] = ("Global Search", "all")
#input_to_function[global_search_key] = global_search
# Display the category legend in a single line
# Display the category legend
legend_text = " | ".join([f"[{color}]{category.capitalize()}[/{color}]" for category, color in color_map.items()])
console.print(f"\n[bold green]Category Legend:[/bold green] {legend_text}")
# Construct the prompt message with color-coded site names
# Construct prompt with proper color mapping
prompt_message = "[green]Insert category [white](" + ", ".join(
[f"{key}: [{color_map.get(label[1], 'white')}]{label[0]}[/{color_map.get(label[1], 'white')}]" for key, label in choice_labels.items()]
[f"[{color_map.get(label[1], 'white')}]{key}: {label[0]}[/{color_map.get(label[1], 'white')}]"
for key, label in choice_labels.items()]
) + "[white])"
if TELEGRAM_BOT:
# Display the category legend in a single line
category_legend_str = "Categorie: \n" + " | ".join([
f"{category.capitalize()}" for category in color_map.keys()
])
# Costruisci il messaggio senza emoji
prompt_message = "Inserisci il sito:\n" + "\n".join(
[f"{key}: {label[0]}" for key, label in choice_labels.items()]
)
console.print(f"\n{prompt_message}")
# Chiedi la scelta all'utente con il bot Telegram
category = bot.ask(
"select_provider",
f"{category_legend_str}\n\n{prompt_message}",
None # Passiamo la lista delle chiavi come scelte
None
)
else:
@ -364,13 +354,6 @@ def main(script_id = 0):
# Run the corresponding function based on user input
if category in input_to_function:
"""if category == global_search_key:
# Run global search
run_function(input_to_function[category], search_terms=search_terms)
else:"""
# Run normal site-specific search
run_function(input_to_function[category], search_terms=search_terms)
else:
@ -379,10 +362,11 @@ def main(script_id = 0):
console.print("[red]Invalid category.")
if CLOSE_CONSOLE:
restart_script() # Riavvia lo script invece di uscire
if NOT_CLOSE_CONSOLE:
restart_script()
else:
force_exit() # Usa la funzione per chiudere sempre
force_exit()
if TELEGRAM_BOT:
bot.send_message(f"Chiusura in corso", None)
@ -390,4 +374,4 @@ def main(script_id = 0):
# Delete script_id
script_id = TelegramSession.get_session()
if script_id != "unknown":
TelegramSession.deleteScriptId(script_id)
TelegramSession.deleteScriptId(script_id)

View File

@ -1,329 +0,0 @@
@import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap');
:root {
--primary-color: #8c52ff;
--secondary-color: #6930c3;
--accent-color: #00e5ff;
--background-color: #121212;
--card-background: #1e1e1e;
--text-color: #f8f9fa;
--shadow-color: rgba(0, 0, 0, 0.25);
--card-hover: #2a2a2a;
--border-color: #333333;
--header-bg: rgba(18, 18, 18, 0.95);
}
* {
margin: 0;
padding: 0;
box-sizing: border-box;
transition: all 0.2s ease;
}
body {
font-family: 'Inter', 'Segoe UI', sans-serif;
background-color: var(--background-color);
color: var(--text-color);
line-height: 1.6;
min-height: 100vh;
display: flex;
flex-direction: column;
}
header {
background-color: var(--header-bg);
backdrop-filter: blur(10px);
position: fixed;
width: 100%;
padding: 15px 0;
z-index: 1000;
box-shadow: 0 2px 12px var(--shadow-color);
}
.container {
max-width: 1400px;
margin: 0 auto;
padding: 20px;
flex: 1;
}
.site-grid {
display: grid;
grid-template-columns: repeat(auto-fill, minmax(300px, 1fr));
gap: 24px;
padding: 2rem 0;
}
.site-item {
min-height: 280px;
background-color: var(--card-background);
border-radius: 16px;
padding: 30px;
box-shadow: 0 6px 20px var(--shadow-color);
transition: transform 0.3s ease, box-shadow 0.3s ease;
display: flex;
flex-direction: column;
align-items: center;
border: 1px solid var(--border-color);
position: relative;
overflow: hidden;
}
.site-item::before {
content: '';
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 4px;
background: linear-gradient(90deg, var(--primary-color), var(--accent-color));
}
.site-item:hover {
transform: translateY(-5px);
box-shadow: 0 12px 30px var(--shadow-color);
}
.site-item img {
width: 80px;
height: 80px;
margin-bottom: 1.5rem;
border-radius: 16px;
object-fit: cover;
border: 2px solid var(--border-color);
}
.site-content {
text-align: center;
width: 100%;
}
.site-item h3 {
font-size: 1.4rem;
font-weight: 600;
margin-bottom: 0.5rem;
color: var(--primary-color);
}
.domain {
color: var(--text-color);
opacity: 0.8;
font-size: 0.9rem;
margin-bottom: 1.5rem;
word-break: break-all;
}
.site-item a {
margin-top: 1rem;
background: linear-gradient(135deg, var(--primary-color), var(--secondary-color));
color: white;
text-decoration: none;
font-weight: 500;
padding: 12px 28px;
border-radius: 8px;
width: fit-content;
margin: 0 auto;
display: flex;
align-items: center;
gap: 8px;
}
.site-item a:hover {
opacity: 0.9;
transform: translateY(-2px);
}
footer {
background: var(--card-background);
border-top: 1px solid var(--border-color);
margin-top: auto;
padding: 40px 20px;
position: relative;
}
.footer-content {
max-width: 1200px;
margin: 0 auto;
display: grid;
grid-template-columns: repeat(auto-fit, minmax(250px, 1fr));
gap: 30px;
position: relative;
padding: 10px 0;
}
.footer-section {
padding: 20px;
border-radius: 12px;
transition: transform 0.3s ease, background-color 0.3s ease;
background-color: var(--card-background);
border: 1px solid var(--border-color);
}
.footer-section:hover {
transform: translateY(-5px);
background-color: var(--card-hover);
}
.footer-title {
color: var(--accent-color);
font-size: 1.3rem;
margin-bottom: 1.5rem;
padding-bottom: 0.5rem;
position: relative;
letter-spacing: 0.5px;
}
.footer-title::after {
content: '';
position: absolute;
bottom: 0;
left: 0;
width: 60px;
height: 3px;
border-radius: 2px;
background: linear-gradient(90deg, var(--primary-color), var(--accent-color));
}
.footer-links {
list-style: none;
}
.footer-links li {
margin-bottom: 0.8rem;
}
.footer-links a {
color: var(--text-color);
text-decoration: none;
display: flex;
align-items: center;
gap: 8px;
opacity: 0.8;
transition: all 0.3s ease;
padding: 8px 12px;
border-radius: 8px;
background-color: transparent;
}
.footer-links a:hover {
opacity: 1;
color: var(--accent-color);
transform: translateX(8px);
background-color: rgba(140, 82, 255, 0.1);
}
.footer-links i {
width: 20px;
text-align: center;
font-size: 1.2rem;
color: var(--primary-color);
transition: transform 0.3s ease;
}
.footer-links a:hover i {
transform: scale(1.2);
}
.github-stats {
display: flex;
gap: 10px;
margin-top: 10px;
font-size: 0.8rem;
}
.github-badge {
background-color: var(--background-color);
padding: 4px 8px;
border-radius: 4px;
display: flex;
align-items: center;
gap: 4px;
}
.github-badge i {
color: var(--accent-color);
}
.footer-description {
margin-top: 15px;
font-size: 0.9rem;
color: var(--text-color);
opacity: 0.8;
line-height: 1.5;
}
.update-info {
text-align: center;
margin-top: 30px;
padding-top: 30px;
border-top: 1px solid var(--border-color);
}
.update-note {
color: var(--accent-color);
font-size: 0.9rem;
opacity: 0.9;
}
@media (max-width: 768px) {
.footer-content {
grid-template-columns: 1fr;
text-align: center;
}
.footer-title::after {
left: 50%;
transform: translateX(-50%);
}
.footer-links a {
justify-content: center;
}
.footer-links a:hover {
transform: translateY(-5px);
}
.footer-section {
margin-bottom: 20px;
}
}
.loader {
border: 3px solid var(--border-color);
border-top: 3px solid var(--primary-color);
border-right: 3px solid var(--accent-color);
border-radius: 50%;
width: 50px;
height: 50px;
animation: spin 1s linear infinite;
}
@keyframes spin {
0% { transform: rotate(0deg); }
100% { transform: rotate(360deg); }
}
@media (max-width: 768px) {
.site-item {
padding: 25px;
}
.site-item img {
width: 70px;
height: 70px;
}
}
.old-domain, .time-change {
color: var(--text-color);
opacity: 0.7;
font-size: 0.85rem;
margin-bottom: 0.5rem;
word-break: break-all;
}
.label {
color: var(--accent-color);
font-weight: 500;
}

View File

@ -1,88 +0,0 @@
const supabaseUrl = 'https://zvfngpoxwrgswnzytadh.supabase.co';
const supabaseKey = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6Inp2Zm5ncG94d3Jnc3duenl0YWRoIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NDAxNTIxNjMsImV4cCI6MjA1NTcyODE2M30.FNTCCMwi0QaKjOu8gtZsT5yQttUW8QiDDGXmzkn89QE';
async function loadSiteData() {
try {
const siteList = document.getElementById('site-list');
const headers = {
'apikey': supabaseKey,
'Authorization': `Bearer ${supabaseKey}`,
'Content-Type': 'application/json'
};
const response = await fetch(`${supabaseUrl}/rest/v1/public`, {
method: 'GET',
headers: headers
});
if (!response.ok) throw new Error(`HTTP error! Status: ${response.status}`);
const data = await response.json();
siteList.innerHTML = '';
if (data && data.length > 0) {
const configSite = data[0].data;
for (const siteName in configSite) {
const site = configSite[siteName];
const siteItem = document.createElement('div');
siteItem.className = 'site-item';
const siteIcon = document.createElement('img');
siteIcon.src = `https://t2.gstatic.com/faviconV2?client=SOCIAL&type=FAVICON&fallback_opts=TYPE,SIZE,URL&url=${site.full_url}&size=128`;
siteIcon.alt = `${siteName} icon`;
siteIcon.onerror = function() {
this.src = 'data:image/svg+xml;utf8,<svg xmlns="http://www.w3.org/2000/svg" width="100" height="100" viewBox="0 0 24 24" fill="none" stroke="%238c52ff" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><path d="M21 12.79A9 9 0 1 1 11.21 3 7 7 0 0 0 21 12.79z"></path></svg>';
};
const siteContent = document.createElement('div');
siteContent.className = 'site-content';
const siteTitle = document.createElement('h3');
siteTitle.textContent = siteName;
if (site.old_domain) {
const oldDomain = document.createElement('p');
oldDomain.className = 'old-domain';
oldDomain.innerHTML = `<span class="label">Previous domain:</span> ${site.old_domain.replace(/^https?:\/\//, '')}`;
siteContent.appendChild(oldDomain);
}
if (site.time_change) {
const timeChange = document.createElement('p');
timeChange.className = 'time-change';
const changeDate = new Date(site.time_change);
const dateString = isNaN(changeDate) ? site.time_change : changeDate.toLocaleDateString();
timeChange.innerHTML = `<span class="label">Updated:</span> ${dateString}`;
siteContent.appendChild(timeChange);
}
const siteLink = document.createElement('a');
siteLink.href = site.full_url;
siteLink.target = '_blank';
siteLink.innerHTML = 'Visit <i class="fas fa-external-link-alt"></i>';
siteLink.rel = 'noopener noreferrer';
siteContent.appendChild(siteTitle);
siteContent.appendChild(siteLink);
siteItem.appendChild(siteIcon);
siteItem.appendChild(siteContent);
siteList.appendChild(siteItem);
}
} else {
siteList.innerHTML = '<div class="no-sites">No sites available</div>';
}
} catch (error) {
console.error('Errore:', error);
siteList.innerHTML = `
<div class="error-message">
<p>Errore nel caricamento</p>
<button onclick="loadSiteData()" class="retry-button">Riprova</button>
</div>
`;
}
}
document.addEventListener('DOMContentLoaded', loadSiteData);

View File

@ -24,11 +24,6 @@
"user": "admin",
"pass": "adminadmin"
},
"REQUESTS": {
"verify": false,
"timeout": 20,
"max_retry": 8
},
"M3U8_DOWNLOAD": {
"tqdm_delay": 0.01,
"default_video_workser": 12,
@ -59,11 +54,10 @@
"force_resolution": "Best",
"get_only_link": false
},
"SITE_EXTRA": {
"ddlstreamitaly": {
"ips4_device_key": "",
"ips4_member_id": "",
"ips4_login_key": ""
}
"REQUESTS": {
"verify": false,
"timeout": 20,
"max_retry": 8,
"proxy": ""
}
}

View File

@ -1,20 +1,19 @@
FROM python:3.11-slim
COPY . /app
WORKDIR /app
ENV TEMP /tmp
RUN mkdir -p $TEMP
RUN apt-get update && apt-get install -y \
RUN apt-get update && apt-get install -y --no-install-recommends \
ffmpeg \
build-essential \
libssl-dev \
libffi-dev \
python3-dev \
libxml2-dev \
libxslt1-dev
libxslt1-dev \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
COPY . .
CMD ["python", "test_run.py"]

View File

@ -6,6 +6,7 @@ m3u8
certifi
psutil
unidecode
curl_cffi
dnspython
jsbeautifier
pathvalidate
@ -13,3 +14,4 @@ pycryptodomex
ua-generator
qbittorrent-api
pyTelegramBotAPI
beautifulsoup4

View File

@ -1,4 +1,5 @@
import os
import re
from setuptools import setup, find_packages
def read_readme():
@ -8,9 +9,21 @@ def read_readme():
with open(os.path.join(os.path.dirname(__file__), "requirements.txt"), "r", encoding="utf-8-sig") as f:
required_packages = f.read().splitlines()
def get_version():
try:
import pkg_resources
return pkg_resources.get_distribution('StreamingCommunity').version
except:
version_file_path = os.path.join(os.path.dirname(__file__), "StreamingCommunity", "Upload", "version.py")
with open(version_file_path, "r", encoding="utf-8") as f:
version_match = re.search(r"^__version__\s*=\s*['\"]([^'\"]*)['\"]", f.read(), re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string in StreamingCommunity/Upload/version.py.")
setup(
name="StreamingCommunity",
version="3.0.4",
version=get_version(),
long_description=read_readme(),
long_description_content_type="text/markdown",
author="Lovi-0",
@ -29,4 +42,4 @@ setup(
"Bug Reports": "https://github.com/Lovi-0/StreamingCommunity/issues",
"Source": "https://github.com/Lovi-0/StreamingCommunity",
}
)
)