Merge branch 'main' into feat/gui

This commit is contained in:
Francesco Grazioso 2025-06-12 16:35:10 +02:00 committed by GitHub
commit b5a22aea47
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
22 changed files with 704 additions and 128 deletions

360
.github/.domain/domain_update.py vendored Normal file
View File

@ -0,0 +1,360 @@
# 20.04.2024
import os
import json
from datetime import datetime
from urllib.parse import urlparse, unquote
# External libraries
import httpx
import tldextract
import ua_generator
import dns.resolver
# Variables
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
JSON_FILE_PATH = os.path.join(SCRIPT_DIR, "domains.json")
ua = ua_generator.generate(device='desktop', browser=('chrome', 'edge'))
def get_headers():
return ua.headers.get()
def get_tld(url_str):
try:
parsed = urlparse(unquote(url_str))
domain = parsed.netloc.lower().lstrip('www.')
parts = domain.split('.')
return parts[-1] if len(parts) >= 2 else None
except Exception:
return None
def get_base_domain(url_str):
try:
parsed = urlparse(url_str)
domain = parsed.netloc.lower().lstrip('www.')
parts = domain.split('.')
return '.'.join(parts[:-1]) if len(parts) > 2 else parts[0]
except Exception:
return None
def get_base_url(url_str):
try:
parsed = urlparse(url_str)
return f"{parsed.scheme}://{parsed.netloc}"
except Exception:
return None
def log(msg, level='INFO'):
levels = {
'INFO': '[ ]',
'SUCCESS': '[+]',
'WARNING': '[!]',
'ERROR': '[-]'
}
entry = f"{levels.get(level, '[?]')} {msg}"
print(entry)
def load_json_data(file_path):
if not os.path.exists(file_path):
log(f"Error: The file {file_path} was not found.", "ERROR")
return None
try:
with open(file_path, 'r', encoding='utf-8') as f:
return json.load(f)
except Exception as e:
log(f"Error reading the file {file_path}: {e}", "ERROR")
return None
def save_json_data(file_path, data):
try:
with open(file_path, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2, ensure_ascii=False)
log(f"Data successfully saved to {file_path}", "SUCCESS")
except Exception as e:
log(f"Error saving the file {file_path}: {e}", "ERROR")
def parse_url(url):
if not url.startswith(('http://', 'https://')):
url = 'https://' + url
try:
extracted = tldextract.extract(url)
parsed = urlparse(url)
clean_url = f"{parsed.scheme}://{parsed.netloc}/"
full_domain = f"{extracted.domain}.{extracted.suffix}" if extracted.domain else extracted.suffix
domain_tld = extracted.suffix
result = {
'url': clean_url,
'full_domain': full_domain,
'domain': domain_tld,
'suffix': extracted.suffix,
'subdomain': extracted.subdomain or None
}
return result
except Exception as e:
log(f"Error parsing URL: {e}", "ERROR")
return None
def check_dns_resolution(domain):
try:
resolver = dns.resolver.Resolver()
resolver.timeout = 2
resolver.lifetime = 2
try:
answers = resolver.resolve(domain, 'A')
return str(answers[0])
except:
try:
answers = resolver.resolve(domain, 'AAAA')
return str(answers[0])
except:
pass
return None
except:
return None
def find_new_domain(input_url, output_file=None, verbose=True, json_output=False):
log_buffer = []
original_info = parse_url(input_url)
if not original_info:
log(f"Could not parse original URL: {input_url}", "ERROR")
if json_output:
return {'full_url': input_url, 'domain': None}
return None
log(f"Starting analysis for: {original_info['full_domain']}")
orig_ip = check_dns_resolution(original_info['full_domain'])
if orig_ip:
log(f"Original domain resolves to: {orig_ip}", "SUCCESS")
else:
log(f"Original domain does not resolve to an IP address", "WARNING")
headers = get_headers()
new_domains = []
redirects = []
final_url = None
final_domain_info = None
url_to_test_in_loop = None
for protocol in ['https://', 'http://']:
try:
url_to_test_in_loop = f"{protocol}{original_info['full_domain']}"
log(f"Testing connectivity to {url_to_test_in_loop}")
redirect_chain = []
current_url = url_to_test_in_loop
max_redirects = 10
redirect_count = 0
while redirect_count < max_redirects:
with httpx.Client(verify=False, follow_redirects=False, timeout=5) as client:
response = client.get(current_url, headers=headers)
redirect_info = {'url': current_url, 'status_code': response.status_code}
redirect_chain.append(redirect_info)
log(f"Request to {current_url} - Status: {response.status_code}")
if response.status_code in (301, 302, 303, 307, 308):
if 'location' in response.headers:
next_url = response.headers['location']
if next_url.startswith('/'):
parsed_current = urlparse(current_url)
next_url = f"{parsed_current.scheme}://{parsed_current.netloc}{next_url}"
log(f"Redirect found: {next_url} (Status: {response.status_code})")
current_url = next_url
redirect_count += 1
redirect_domain_info_val = parse_url(next_url)
if redirect_domain_info_val and redirect_domain_info_val['full_domain'] != original_info['full_domain']:
new_domains.append({'domain': redirect_domain_info_val['full_domain'], 'url': next_url, 'source': 'redirect'})
else:
log(f"Redirect status code but no Location header", "WARNING")
break
else:
break
if redirect_chain:
final_url = redirect_chain[-1]['url']
final_domain_info = parse_url(final_url)
redirects.extend(redirect_chain)
log(f"Final URL after redirects: {final_url}", "SUCCESS")
if final_domain_info and final_domain_info['full_domain'] != original_info['full_domain']:
new_domains.append({'domain': final_domain_info['full_domain'], 'url': final_url, 'source': 'final_url'})
final_status = redirect_chain[-1]['status_code'] if redirect_chain else None
if final_status and final_status < 400 and final_status != 403:
break
if final_status == 403 and redirect_chain and len(redirect_chain) > 1:
log(f"Got 403 Forbidden, but captured {len(redirect_chain)-1} redirects before that", "SUCCESS")
break
except httpx.RequestError as e:
log(f"Error connecting to {protocol}{original_info['full_domain']}: {str(e)}", "ERROR")
url_for_auto_redirect = input_url
if url_to_test_in_loop:
url_for_auto_redirect = url_to_test_in_loop
elif original_info and original_info.get('url'):
url_for_auto_redirect = original_info['url']
if not redirects or not new_domains:
log("Trying alternate method with automatic redirect following")
try:
with httpx.Client(verify=False, follow_redirects=True, timeout=5) as client:
response_auto = client.get(url_for_auto_redirect, headers=headers)
log(f"Connected with auto-redirects: Status {response_auto.status_code}")
if response_auto.history:
log(f"Found {len(response_auto.history)} redirects with auto-following", "SUCCESS")
for r_hist in response_auto.history:
redirect_info_auto = {'url': str(r_hist.url), 'status_code': r_hist.status_code}
redirects.append(redirect_info_auto)
log(f"Auto-redirect: {r_hist.url} (Status: {r_hist.status_code})")
final_url = str(response_auto.url)
final_domain_info = parse_url(final_url)
for redirect_hist_item in response_auto.history:
redirect_domain_val = parse_url(str(redirect_hist_item.url))
if redirect_domain_val and original_info and redirect_domain_val['full_domain'] != original_info['full_domain']:
new_domains.append({'domain': redirect_domain_val['full_domain'], 'url': str(redirect_hist_item.url), 'source': 'auto-redirect'})
current_final_url_info = parse_url(str(response_auto.url))
if current_final_url_info and original_info and current_final_url_info['full_domain'] != original_info['full_domain']:
is_already_added = any(d['domain'] == current_final_url_info['full_domain'] and d['source'] == 'auto-redirect' for d in new_domains)
if not is_already_added:
new_domains.append({'domain': current_final_url_info['full_domain'], 'url': str(response_auto.url), 'source': 'final_url_auto'})
final_url = str(response_auto.url)
final_domain_info = current_final_url_info
log(f"Final URL from auto-redirect: {final_url}", "SUCCESS")
except httpx.RequestError as e:
log(f"Error with auto-redirect attempt: {str(e)}", "ERROR")
except NameError:
log(f"Error: URL for auto-redirect attempt was not defined.", "ERROR")
unique_domains = []
seen_domains = set()
for domain_info_item in new_domains:
if domain_info_item['domain'] not in seen_domains:
seen_domains.add(domain_info_item['domain'])
unique_domains.append(domain_info_item)
if not final_url:
final_url = input_url
if not final_domain_info:
final_domain_info = original_info
if final_domain_info:
parsed_final_url_info = parse_url(final_url)
if parsed_final_url_info:
final_url = parsed_final_url_info['url']
final_domain_info = parsed_final_url_info
else:
final_domain_info = original_info
final_url = original_info['url'] if original_info else input_url
results_original_domain = original_info['full_domain'] if original_info else None
results_final_domain_tld = final_domain_info['domain'] if final_domain_info and 'domain' in final_domain_info else None
results = {
'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
'original_url': input_url,
'original_domain': results_original_domain,
'original_ip': orig_ip,
'new_domains': unique_domains,
'redirects': redirects,
'log': log_buffer
}
simplified_json_output = {'full_url': final_url, 'domain': results_final_domain_tld}
if verbose:
log(f"DEBUG - Simplified output: {simplified_json_output}", "INFO")
if output_file:
try:
with open(output_file, 'w', encoding='utf-8') as f:
json.dump(results, f, indent=2, ensure_ascii=False)
log(f"Results saved to {output_file}", "SUCCESS")
except Exception as e:
log(f"Error writing to output file: {str(e)}", "ERROR")
if json_output:
return simplified_json_output
else:
return results
def update_site_entry(site_name: str, all_domains_data: dict):
site_config = all_domains_data.get(site_name, {})
log(f"Processing site: {site_name}", "INFO")
if not site_config.get('full_url'):
log(f"Site {site_name} has no full_url in config. Skipping.", "WARNING")
return False
current_full_url = site_config.get('full_url')
current_domain_tld = site_config.get('domain')
found_domain_info = find_new_domain(current_full_url, verbose=False, json_output=True)
if found_domain_info and found_domain_info.get('full_url') and found_domain_info.get('domain'):
new_full_url = found_domain_info['full_url']
new_domain_tld = found_domain_info['domain']
if new_full_url != current_full_url or new_domain_tld != current_domain_tld:
log(f"Update found for {site_name}: URL '{current_full_url}' -> '{new_full_url}', TLD '{current_domain_tld}' -> '{new_domain_tld}'", "SUCCESS")
updated_entry = site_config.copy()
updated_entry['full_url'] = new_full_url
updated_entry['domain'] = new_domain_tld
if new_domain_tld != current_domain_tld :
updated_entry['old_domain'] = current_domain_tld if current_domain_tld else ""
updated_entry['time_change'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
all_domains_data[site_name] = updated_entry
return True
else:
log(f"No changes detected for {site_name}.", "INFO")
return False
else:
log(f"Could not reliably find new domain info for {site_name} from URL: {current_full_url}. No search fallback.", "WARNING")
return False
def main():
log("Starting domain update script...")
all_domains_data = load_json_data(JSON_FILE_PATH)
if not all_domains_data:
log("Cannot proceed: Domain data is missing or could not be loaded.", "ERROR")
log("Script finished.")
return
any_updates_made = False
for site_name_key in list(all_domains_data.keys()):
if update_site_entry(site_name_key, all_domains_data):
any_updates_made = True
print("\n")
if any_updates_made:
save_json_data(JSON_FILE_PATH, all_domains_data)
log("Update complete. Some entries were modified.", "SUCCESS")
else:
log("Update complete. No domains were modified.", "INFO")
log("Script finished.")
if __name__ == "__main__":
main()

62
.github/.domain/domains.json vendored Normal file
View File

@ -0,0 +1,62 @@
{
"1337xx": {
"domain": "to",
"full_url": "https://www.1337xx.to/",
"old_domain": "to",
"time_change": "2025-03-19 12:20:19"
},
"cb01new": {
"domain": "live",
"full_url": "https://cb01net.live/",
"old_domain": "digital",
"time_change": "2025-06-11 07:20:30"
},
"animeunity": {
"domain": "so",
"full_url": "https://www.animeunity.so/",
"old_domain": "so",
"time_change": "2025-03-19 12:20:23"
},
"animeworld": {
"domain": "ac",
"full_url": "https://www.animeworld.ac/",
"old_domain": "ac",
"time_change": "2025-03-21 12:20:27"
},
"guardaserie": {
"domain": "meme",
"full_url": "https://guardaserie.meme/",
"old_domain": "meme",
"time_change": "2025-06-11 07:20:36"
},
"ddlstreamitaly": {
"domain": "co",
"full_url": "https://ddlstreamitaly.co/",
"old_domain": "co",
"time_change": "2025-03-19 12:20:26"
},
"streamingwatch": {
"domain": "org",
"full_url": "https://www.streamingwatch.org/",
"old_domain": "org",
"time_change": "2025-04-29 12:30:30"
},
"altadefinizione": {
"domain": "spa",
"full_url": "https://altadefinizione.spa/",
"old_domain": "locker",
"time_change": "2025-05-26 23:22:45"
},
"streamingcommunity": {
"domain": "agency",
"full_url": "https://streamingunity.agency/",
"old_domain": "art",
"time_change": "2025-06-10 10:23:11"
},
"altadefinizionegratis": {
"domain": "club",
"full_url": "https://altadefinizionegratis.club/",
"old_domain": "cc",
"time_change": "2025-06-11 07:20:42"
}
}

View File

@ -113,43 +113,27 @@ async function checkSiteStatus(url, siteName) {
} }
} }
const supabaseUrl = 'https://zvfngpoxwrgswnzytadh.supabase.co'; const domainsJsonUrl = 'https://raw.githubusercontent.com/Arrowar/StreamingCommunity/refs/heads/main/.github/.domain/domains.json';
const supabaseKey = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6Inp2Zm5ncG94d3Jnc3duenl0YWRoIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NDAxNTIxNjMsImV4cCI6MjA1NTcyODE2M30.FNTCCMwi0QaKjOu8gtZsT5yQttUW8QiDDGXmzkn89QE';
async function loadSiteData() { async function loadSiteData() {
try { try {
console.log('Starting to load site data...'); console.log('Starting to load site data from GitHub...');
createStatusIndicator(); createStatusIndicator();
updateStatusIndicator('Loading...', 'Fetching site data from database...', 0); updateStatusIndicator('Loading...', 'Fetching site data from GitHub repository...', 0);
const siteList = document.getElementById('site-list'); const siteList = document.getElementById('site-list');
const headers = { console.log(`Fetching from GitHub: ${domainsJsonUrl}`);
'accept': '*/*', const response = await fetch(domainsJsonUrl);
'accept-language': 'it-IT,it;q=0.9,en-US;q=0.8,en;q=0.7',
'apikey': supabaseKey,
'authorization': `Bearer ${supabaseKey}`,
'content-type': 'application/json',
'cache-control': 'no-cache',
'pragma': 'no-cache',
'range': '0-9'
};
console.log('Fetching from Supabase with headers:', headers);
const response = await fetch(`${supabaseUrl}/rest/v1/public?select=*`, {
method: 'GET',
headers: headers
});
if (!response.ok) throw new Error(`HTTP error! Status: ${response.status}`); if (!response.ok) throw new Error(`HTTP error! Status: ${response.status}`);
const data = await response.json(); const configSite = await response.json(); // Directly get the site data object
siteList.innerHTML = ''; siteList.innerHTML = '';
if (data && data.length > 0) { if (configSite && Object.keys(configSite).length > 0) { // Check if configSite is a non-empty object
const configSite = data[0].data;
totalSites = Object.keys(configSite).length; totalSites = Object.keys(configSite).length;
completedSites = 0; completedSites = 0;
let latestUpdate = new Date(0); let latestUpdate = new Date(0);
@ -239,7 +223,7 @@ async function loadSiteData() {
document.getElementById('last-update-time').textContent = formattedDate; document.getElementById('last-update-time').textContent = formattedDate;
} else { } else {
siteList.innerHTML = '<div class="no-sites">No sites available</div>'; siteList.innerHTML = '<div class="no-sites">No sites available</div>';
updateStatusIndicator('Ready', 'No sites found in database', 100); updateStatusIndicator('Ready', 'No sites found in the JSON file.', 100);
} }
} catch (error) { } catch (error) {
console.error('Errore:', error); console.error('Errore:', error);

View File

@ -75,9 +75,24 @@ jobs:
executable: StreamingCommunity_linux_previous executable: StreamingCommunity_linux_previous
separator: ':' separator: ':'
# ARM64 build
- os: ubuntu-latest
artifact_name: StreamingCommunity_linux_arm64
executable: StreamingCommunity_linux_arm64
separator: ':'
architecture: arm64
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
# For ARM64, set architecture if present
defaults:
run:
shell: bash
steps: steps:
- name: Set up QEMU (for ARM64)
if: ${{ matrix.architecture == 'arm64' }}
uses: docker/setup-qemu-action@v3
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
@ -94,6 +109,7 @@ jobs:
uses: actions/setup-python@v4 uses: actions/setup-python@v4
with: with:
python-version: '3.12' python-version: '3.12'
architecture: ${{ matrix.architecture || 'x64' }}
- name: Install dependencies - name: Install dependencies
run: | run: |
@ -122,6 +138,8 @@ jobs:
--hidden-import=Cryptodome.Util --hidden-import=Cryptodome.Util.Padding \ --hidden-import=Cryptodome.Util --hidden-import=Cryptodome.Util.Padding \
--hidden-import=Cryptodome.Random \ --hidden-import=Cryptodome.Random \
--hidden-import=telebot \ --hidden-import=telebot \
--hidden-import=curl_cffi --hidden-import=_cffi_backend \
--collect-all curl_cffi \
--additional-hooks-dir=pyinstaller/hooks \ --additional-hooks-dir=pyinstaller/hooks \
--add-data "StreamingCommunity${{ matrix.separator }}StreamingCommunity" \ --add-data "StreamingCommunity${{ matrix.separator }}StreamingCommunity" \
--name=${{ matrix.artifact_name }} test_run.py --name=${{ matrix.artifact_name }} test_run.py

View File

@ -16,12 +16,12 @@ jobs:
- name: Count Lines of Code - name: Count Lines of Code
run: | run: |
LOC=$(cloc . --json | jq '.SUM.code') LOC=$(cloc . --json | jq '.SUM.code')
echo "{\"schemaVersion\": 1, \"label\": \"Lines of Code\", \"message\": \"$LOC\", \"color\": \"green\"}" > .github/media/loc-badge.json echo "{\"schemaVersion\": 1, \"label\": \"Lines of Code\", \"message\": \"$LOC\", \"color\": \"green\"}" > .github/.domain/loc-badge.json
- name: Commit and Push LOC Badge - name: Commit and Push LOC Badge
run: | run: |
git config --local user.name "GitHub Actions" git config --local user.name "GitHub Actions"
git config --local user.email "actions@github.com" git config --local user.email "actions@github.com"
git add .github/media/loc-badge.json git add .github/.domain/loc-badge.json
git commit -m "Update lines of code badge" || echo "No changes to commit" git commit -m "Update lines of code badge" || echo "No changes to commit"
git push git push

58
.github/workflows/update_domain.yml vendored Normal file
View File

@ -0,0 +1,58 @@
name: Update domains (Amend Strategy)
on:
schedule:
- cron: "0 7-21 * * *"
workflow_dispatch:
jobs:
update-domains:
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0 # Serve per l'amend
token: ${{ secrets.GITHUB_TOKEN }}
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: '3.12'
- name: Install dependencies
run: |
pip install httpx tldextract ua-generator dnspython
pip install --upgrade pip setuptools wheel
- name: Configure DNS
run: |
sudo sh -c 'echo "nameserver 9.9.9.9" > /etc/resolv.conf'
cat /etc/resolv.conf
- name: Execute domain update script
run: python .github/.domain/domain_update.py
- name: Always amend last commit
run: |
git config --global user.name 'github-actions[bot]'
git config --global user.email 'github-actions[bot]@users.noreply.github.com'
if ! git diff --quiet .github/.domain/domains.json; then
echo "📝 Changes detected - amending last commit"
git add .github/.domain/domains.json
git commit --amend --no-edit
git push --force-with-lease origin main
else
echo "✅ No changes to domains.json"
fi
- name: Verify repository state
if: failure()
run: |
echo "❌ Something went wrong. Repository state:"
git log --oneline -5
git status

View File

@ -25,7 +25,7 @@
<img src="https://img.shields.io/pypi/dm/streamingcommunity?style=for-the-badge" alt="PyPI Downloads"/> <img src="https://img.shields.io/pypi/dm/streamingcommunity?style=for-the-badge" alt="PyPI Downloads"/>
</a> </a>
<a href="https://github.com/Arrowar/StreamingCommunity"> <a href="https://github.com/Arrowar/StreamingCommunity">
<img src="https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/Arrowar/StreamingCommunity/main/.github/media/loc-badge.json&style=for-the-badge" alt="Lines of Code"/> <img src="https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/Arrowar/StreamingCommunity/main/.github/.domain/loc-badge.json&style=for-the-badge" alt="Lines of Code"/>
</a> </a>
</p> </p>
@ -518,7 +518,7 @@ To enable qBittorrent integration, follow the setup guide [here](https://github.
"download_subtitle": true, "download_subtitle": true,
"merge_subs": true, "merge_subs": true,
"specific_list_subtitles": [ "specific_list_subtitles": [
"ita", "ita", // Specify language codes or use ["*"] to download all available subtitles
"eng" "eng"
], ],
"cleanup_tmp_folder": true "cleanup_tmp_folder": true
@ -544,6 +544,8 @@ To enable qBittorrent integration, follow the setup guide [here](https://github.
- `download_subtitle`: Whether to download subtitles - `download_subtitle`: Whether to download subtitles
- `merge_subs`: Whether to merge subtitles with video - `merge_subs`: Whether to merge subtitles with video
- `specific_list_subtitles`: List of subtitle languages to download - `specific_list_subtitles`: List of subtitle languages to download
* Use `["*"]` to download all available subtitles
* Or specify individual languages like `["ita", "eng"]`
* Can be changed with `--specific_list_subtitles ita,eng` * Can be changed with `--specific_list_subtitles ita,eng`
#### Cleanup #### Cleanup
@ -812,9 +814,6 @@ Addon per Stremio che consente lo streaming HTTPS di film, serie, anime e TV in
### 🧩 [streamingcommunity-unofficialapi](https://github.com/Blu-Tiger/streamingcommunity-unofficialapi) ### 🧩 [streamingcommunity-unofficialapi](https://github.com/Blu-Tiger/streamingcommunity-unofficialapi)
API non ufficiale per accedere ai contenuti del sito italiano StreamingCommunity. API non ufficiale per accedere ai contenuti del sito italiano StreamingCommunity.
### 🎥 [stream-buddy](https://github.com/Bbalduzz/stream-buddy)
Tool per guardare o scaricare film dalla piattaforma StreamingCommunity.
# Disclaimer # Disclaimer
This software is provided "as is", without warranty of any kind, express or implied, including but not limited to the warranties of merchantability, fitness for a particular purpose, and noninfringement. In no event shall the authors or copyright holders be liable for any claim, damages, or other liability, whether in an action of contract, tort, or otherwise, arising from, out of, or in connection with the software or the use or other dealings in the software. This software is provided "as is", without warranty of any kind, express or implied, including but not limited to the warranties of merchantability, fitness for a particular purpose, and noninfringement. In no event shall the authors or copyright holders be liable for any claim, damages, or other liability, whether in an action of contract, tort, or otherwise, arising from, out of, or in connection with the software or the use or other dealings in the software.

View File

@ -5,9 +5,9 @@ import logging
# External libraries # External libraries
import httpx
import jsbeautifier import jsbeautifier
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from curl_cffi import requests
# Internal utilities # Internal utilities
@ -28,7 +28,6 @@ class VideoSource:
- url (str): The URL of the video source. - url (str): The URL of the video source.
""" """
self.headers = get_headers() self.headers = get_headers()
self.client = httpx.Client()
self.url = url self.url = url
def make_request(self, url: str) -> str: def make_request(self, url: str) -> str:
@ -42,8 +41,10 @@ class VideoSource:
- str: The response content if successful, None otherwise. - str: The response content if successful, None otherwise.
""" """
try: try:
response = self.client.get(url, headers=self.headers, timeout=MAX_TIMEOUT, follow_redirects=True) response = requests.get(url, headers=self.headers, timeout=MAX_TIMEOUT, impersonate="chrome110")
response.raise_for_status() if response.status_code >= 400:
logging.error(f"Request failed with status code: {response.status_code}")
return None
return response.text return response.text
except Exception as e: except Exception as e:

View File

@ -39,6 +39,7 @@ class VideoSource:
self.is_series = is_series self.is_series = is_series
self.media_id = media_id self.media_id = media_id
self.iframe_src = None self.iframe_src = None
self.window_parameter = None
def get_iframe(self, episode_id: int) -> None: def get_iframe(self, episode_id: int) -> None:
""" """
@ -109,41 +110,45 @@ class VideoSource:
# Parse script to get video information # Parse script to get video information
self.parse_script(script_text=script) self.parse_script(script_text=script)
except httpx.HTTPStatusError as e:
if e.response.status_code == 404:
console.print("[yellow]This content will be available soon![/yellow]")
return
logging.error(f"Error getting content: {e}")
raise
except Exception as e: except Exception as e:
logging.error(f"Error getting content: {e}") logging.error(f"Error getting content: {e}")
raise raise
def get_playlist(self) -> str: def get_playlist(self) -> str | None:
""" """
Generate authenticated playlist URL. Generate authenticated playlist URL.
Returns: Returns:
str: Fully constructed playlist URL with authentication parameters str | None: Fully constructed playlist URL with authentication parameters, or None if content unavailable
""" """
if not self.window_parameter:
return None
params = {} params = {}
# Add 'h' parameter if video quality is 1080p
if self.canPlayFHD: if self.canPlayFHD:
params['h'] = 1 params['h'] = 1
# Parse the original URL
parsed_url = urlparse(self.window_parameter.url) parsed_url = urlparse(self.window_parameter.url)
query_params = parse_qs(parsed_url.query) query_params = parse_qs(parsed_url.query)
# Check specifically for 'b=1' in the query parameters
if 'b' in query_params and query_params['b'] == ['1']: if 'b' in query_params and query_params['b'] == ['1']:
params['b'] = 1 params['b'] = 1
# Add authentication parameters (token and expiration)
params.update({ params.update({
"token": self.window_parameter.token, "token": self.window_parameter.token,
"expires": self.window_parameter.expires "expires": self.window_parameter.expires
}) })
# Build the updated query string
query_string = urlencode(params) query_string = urlencode(params)
# Construct the new URL with updated query parameters
return urlunparse(parsed_url._replace(query=query_string)) return urlunparse(parsed_url._replace(query=query_string))

View File

@ -61,16 +61,22 @@ def download_film(select_title: MediaItem) -> str:
# Extract mostraguarda URL # Extract mostraguarda URL
try: try:
response = httpx.get(select_title.url, headers=get_headers(), timeout=10) response = httpx.get(select_title.url, headers=get_headers(), timeout=10)
response.raise_for_status()
soup = BeautifulSoup(response.text, 'html.parser') soup = BeautifulSoup(response.text, 'html.parser')
iframes = soup.find_all('iframe') iframes = soup.find_all('iframe')
mostraguarda = iframes[0]['src'] mostraguarda = iframes[0]['src']
except Exception as e: except Exception as e:
console.print(f"[red]Site: {site_constant.SITE_NAME}, request error: {e}, get mostraguarda") console.print(f"[red]Site: {site_constant.SITE_NAME}, request error: {e}, get mostraguarda")
return None
# Extract supervideo URL # Extract supervideo URL
supervideo_url = None
try: try:
response = httpx.get(mostraguarda, headers=get_headers(), timeout=10) response = httpx.get(mostraguarda, headers=get_headers(), timeout=10)
response.raise_for_status()
soup = BeautifulSoup(response.text, 'html.parser') soup = BeautifulSoup(response.text, 'html.parser')
pattern = r'//supervideo\.[^/]+/[a-z]/[a-zA-Z0-9]+' pattern = r'//supervideo\.[^/]+/[a-z]/[a-zA-Z0-9]+'
supervideo_match = re.search(pattern, response.text) supervideo_match = re.search(pattern, response.text)
@ -78,7 +84,9 @@ def download_film(select_title: MediaItem) -> str:
except Exception as e: except Exception as e:
console.print(f"[red]Site: {site_constant.SITE_NAME}, request error: {e}, get supervideo URL") console.print(f"[red]Site: {site_constant.SITE_NAME}, request error: {e}, get supervideo URL")
console.print("[yellow]This content will be available soon![/yellow]")
return None
# Init class # Init class
video_source = VideoSource(supervideo_url) video_source = VideoSource(supervideo_url)
master_playlist = video_source.get_playlist() master_playlist = video_source.get_playlist()

View File

@ -38,38 +38,52 @@ class GetSerieInfo:
soup = BeautifulSoup(response.text, "html.parser") soup = BeautifulSoup(response.text, "html.parser")
self.series_name = soup.find("title").get_text(strip=True).split(" - ")[0] self.series_name = soup.find("title").get_text(strip=True).split(" - ")[0]
# Process all seasons # Find all season dropdowns
season_items = soup.find_all('div', class_='accordion-item') seasons_dropdown = soup.find('div', class_='dropdown seasons')
if not seasons_dropdown:
for season_idx, season_item in enumerate(season_items, 1): return
season_header = season_item.find('div', class_='accordion-header')
if not season_header: # Get all season items
continue season_items = seasons_dropdown.find_all('span', {'data-season': True})
season_name = season_header.get_text(strip=True) for season_item in season_items:
season_num = int(season_item['data-season'])
season_name = season_item.get_text(strip=True)
# Create a new season and get a reference to it # Create a new season
current_season = self.seasons_manager.add_season({ current_season = self.seasons_manager.add_season({
'number': season_idx, 'number': season_num,
'name': season_name 'name': season_name
}) })
# Find episodes for this season # Find all episodes for this season
episode_divs = season_item.find_all('div', class_='down-episode') episodes_container = soup.find('div', {'class': 'dropdown mirrors', 'data-season': str(season_num)})
for ep_idx, ep_div in enumerate(episode_divs, 1): if not episodes_container:
episode_name_tag = ep_div.find('b') continue
if not episode_name_tag:
# Get all episode mirrors for this season
episode_mirrors = soup.find_all('div', {'class': 'dropdown mirrors',
'data-season': str(season_num)})
for mirror in episode_mirrors:
episode_data = mirror.get('data-episode', '').split('-')
if len(episode_data) != 2:
continue continue
episode_name = episode_name_tag.get_text(strip=True) ep_num = int(episode_data[1])
link_tag = ep_div.find('a', string=lambda text: text and "Supervideo" in text)
episode_url = link_tag['href'] if link_tag else None # Find supervideo link
supervideo_span = mirror.find('span', {'data-id': 'supervideo'})
if not supervideo_span:
continue
episode_url = supervideo_span.get('data-link', '')
# Add episode to the season # Add episode to the season
if current_season: if current_season:
current_season.episodes.add({ current_season.episodes.add({
'number': ep_idx, 'number': ep_num,
'name': episode_name, 'name': f"Episodio {ep_num}",
'url': episode_url 'url': episode_url
}) })

View File

@ -62,6 +62,10 @@ def download_film(select_title: MediaItem, proxy: str = None) -> str:
video_source.get_content() video_source.get_content()
master_playlist = video_source.get_playlist() master_playlist = video_source.get_playlist()
if master_playlist is None:
console.print(f"[red]Site: {site_constant.SITE_NAME}, error: No master playlist found[/red]")
return None
# Define the filename and path for the downloaded film # Define the filename and path for the downloaded film
title_name = os_manager.get_sanitize_file(select_title.name) + ".mp4" title_name = os_manager.get_sanitize_file(select_title.name) + ".mp4"
mp4_path = os.path.join(site_constant.MOVIE_FOLDER, title_name.replace(".mp4", "")) mp4_path = os.path.join(site_constant.MOVIE_FOLDER, title_name.replace(".mp4", ""))

View File

@ -180,10 +180,14 @@ class M3U8Manager:
self.sub_streams = [] self.sub_streams = []
if ENABLE_SUBTITLE: if ENABLE_SUBTITLE:
self.sub_streams = [ if "*" in DOWNLOAD_SPECIFIC_SUBTITLE:
s for s in (self.parser._subtitle.get_all_uris_and_names() or []) self.sub_streams = self.parser._subtitle.get_all_uris_and_names() or []
if s.get('language') in DOWNLOAD_SPECIFIC_SUBTITLE
] else:
self.sub_streams = [
s for s in (self.parser._subtitle.get_all_uris_and_names() or [])
if s.get('language') in DOWNLOAD_SPECIFIC_SUBTITLE
]
def log_selection(self): def log_selection(self):
tuple_available_resolution = self.parser._video.get_list_resolution() tuple_available_resolution = self.parser._video.get_list_resolution()
@ -209,9 +213,13 @@ class M3U8Manager:
f"[red]Set:[/red] {set_codec_info}" f"[red]Set:[/red] {set_codec_info}"
) )
# Get available subtitles and their languages
available_subtitles = self.parser._subtitle.get_all_uris_and_names() or [] available_subtitles = self.parser._subtitle.get_all_uris_and_names() or []
available_sub_languages = [sub.get('language') for sub in available_subtitles] available_sub_languages = [sub.get('language') for sub in available_subtitles]
downloadable_sub_languages = list(set(available_sub_languages) & set(DOWNLOAD_SPECIFIC_SUBTITLE))
# If "*" is in DOWNLOAD_SPECIFIC_SUBTITLE, all languages are downloadable
downloadable_sub_languages = available_sub_languages if "*" in DOWNLOAD_SPECIFIC_SUBTITLE else list(set(available_sub_languages) & set(DOWNLOAD_SPECIFIC_SUBTITLE))
if available_sub_languages: if available_sub_languages:
console.print( console.print(
f"[cyan bold]Subtitle [/cyan bold] [green]Available:[/green] [purple]{', '.join(available_sub_languages)}[/purple] | " f"[cyan bold]Subtitle [/cyan bold] [green]Available:[/green] [purple]{', '.join(available_sub_languages)}[/purple] | "

View File

@ -4,6 +4,7 @@ import os
import sys import sys
import time import time
import asyncio import asyncio
import importlib.metadata
# External library # External library
import httpx import httpx
@ -11,7 +12,7 @@ from rich.console import Console
# Internal utilities # Internal utilities
from .version import __version__, __author__, __title__ from .version import __version__ as source_code_version, __author__, __title__
from StreamingCommunity.Util.config_json import config_manager from StreamingCommunity.Util.config_json import config_manager
from StreamingCommunity.Util.headers import get_userAgent from StreamingCommunity.Util.headers import get_userAgent
@ -75,7 +76,11 @@ def update():
percentual_stars = 0 percentual_stars = 0
# Get the current version (installed version) # Get the current version (installed version)
current_version = __version__ try:
current_version = importlib.metadata.version(__title__)
except importlib.metadata.PackageNotFoundError:
#console.print(f"[yellow]Warning: Could not determine installed version for '{__title__}' via importlib.metadata. Falling back to source version.[/yellow]")
current_version = source_code_version
# Get commit details # Get commit details
latest_commit = response_commits[0] if response_commits else None latest_commit = response_commits[0] if response_commits else None

View File

@ -1,5 +1,5 @@
__title__ = 'StreamingCommunity' __title__ = 'StreamingCommunity'
__version__ = '3.0.8' __version__ = '3.0.9'
__author__ = 'Arrowar' __author__ = 'Arrowar'
__description__ = 'A command-line program to download film' __description__ = 'A command-line program to download film'
__copyright__ = 'Copyright 2024' __copyright__ = 'Copyright 2025'

View File

@ -268,33 +268,32 @@ class ConfigManager:
self._load_site_data_from_file() self._load_site_data_from_file()
def _load_site_data_from_api(self) -> None: def _load_site_data_from_api(self) -> None:
"""Load site data from API.""" """Load site data from GitHub."""
domains_github_url = "https://raw.githubusercontent.com/Arrowar/StreamingCommunity/refs/heads/main/.github/.domain/domains.json"
headers = { headers = {
"apikey": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6Inp2Zm5ncG94d3Jnc3duenl0YWRoIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NDAxNTIxNjMsImV4cCI6MjA1NTcyODE2M30.FNTCCMwi0QaKjOu8gtZsT5yQttUW8QiDDGXmzkn89QE", "User-Agent": get_userAgent()
"Authorization": f"Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6Inp2Zm5ncG94d3Jnc3duenl0YWRoIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NDAxNTIxNjMsImV4cCI6MjA1NTcyODE2M30.FNTCCMwi0QaKjOu8gtZsT5yQttUW8QiDDGXmzkn89QE",
"Content-Type": "application/json",
"User-Agent": get_userAgent()
} }
try: try:
console.print("[bold cyan]Retrieving site data from API...[/bold cyan]") console.print(f"[bold cyan]Retrieving site data from GitHub:[/bold cyan] [green]{domains_github_url}[/green]")
response = requests.get("https://zvfngpoxwrgswnzytadh.supabase.co/rest/v1/public", timeout=8, headers=headers) response = requests.get(domains_github_url, timeout=8, headers=headers)
if response.ok: if response.ok:
data = response.json() self.configSite = response.json()
if data and len(data) > 0:
self.configSite = data[0]['data'] site_count = len(self.configSite) if isinstance(self.configSite, dict) else 0
console.print(f"[bold green]Site data loaded from GitHub:[/bold green] {site_count} streaming services found.")
site_count = len(self.configSite) if isinstance(self.configSite, dict) else 0
else:
console.print("[bold yellow]API returned an empty data set[/bold yellow]")
else: else:
console.print(f"[bold red]API request failed:[/bold red] HTTP {response.status_code}, {response.text[:100]}") console.print(f"[bold red]GitHub request failed:[/bold red] HTTP {response.status_code}, {response.text[:100]}")
self._handle_site_data_fallback() self._handle_site_data_fallback()
except json.JSONDecodeError as e:
console.print(f"[bold red]Error parsing JSON from GitHub:[/bold red] {str(e)}")
self._handle_site_data_fallback()
except Exception as e: except Exception as e:
console.print(f"[bold red]API connection error:[/bold red] {str(e)}") console.print(f"[bold red]GitHub connection error:[/bold red] {str(e)}")
self._handle_site_data_fallback() self._handle_site_data_fallback()
def _load_site_data_from_file(self) -> None: def _load_site_data_from_file(self) -> None:

View File

@ -12,7 +12,7 @@ import inspect
import subprocess import subprocess
import contextlib import contextlib
import importlib.metadata import importlib.metadata
import socket
# External library # External library
from unidecode import unidecode from unidecode import unidecode
@ -283,37 +283,64 @@ class InternManager():
else: else:
return f"{bytes / (1024 * 1024):.2f} MB/s" return f"{bytes / (1024 * 1024):.2f} MB/s"
def check_dns_provider(self): # def check_dns_provider(self):
""" # """
Check if the system's current DNS server matches any known DNS providers. # Check if the system's current DNS server matches any known DNS providers.
Returns: # Returns:
bool: True if the current DNS server matches a known provider, # bool: True if the current DNS server matches a known provider,
False if no match is found or in case of errors # False if no match is found or in case of errors
# """
# dns_providers = {
# "Cloudflare": ["1.1.1.1", "1.0.0.1"],
# "Google": ["8.8.8.8", "8.8.4.4"],
# "OpenDNS": ["208.67.222.222", "208.67.220.220"],
# "Quad9": ["9.9.9.9", "149.112.112.112"],
# "AdGuard": ["94.140.14.14", "94.140.15.15"],
# "Comodo": ["8.26.56.26", "8.20.247.20"],
# "Level3": ["209.244.0.3", "209.244.0.4"],
# "Norton": ["199.85.126.10", "199.85.127.10"],
# "CleanBrowsing": ["185.228.168.9", "185.228.169.9"],
# "Yandex": ["77.88.8.8", "77.88.8.1"]
# }
# try:
# resolver = dns.resolver.Resolver()
# nameservers = resolver.nameservers
# if not nameservers:
# return False
# for server in nameservers:
# for provider, ips in dns_providers.items():
# if server in ips:
# return True
# return False
# except Exception:
# return False
def check_dns_resolve(self, domains_list: list = None):
""" """
dns_providers = { Check if the system's current DNS server can resolve a domain name.
"Cloudflare": ["1.1.1.1", "1.0.0.1"], Works on both Windows and Unix-like systems.
"Google": ["8.8.8.8", "8.8.4.4"],
"OpenDNS": ["208.67.222.222", "208.67.220.220"], Args:
"Quad9": ["9.9.9.9", "149.112.112.112"], domains_list (list, optional): List of domains to test. Defaults to common domains.
}
Returns:
bool: True if the current DNS server can resolve a domain name,
False if can't resolve or in case of errors
"""
test_domains = domains_list or ["github.com", "google.com", "microsoft.com", "amazon.com"]
try: try:
resolver = dns.resolver.Resolver() for domain in test_domains:
nameservers = resolver.nameservers # socket.gethostbyname() works consistently across all platforms
socket.gethostbyname(domain)
if not nameservers: return True
return False except (socket.gaierror, socket.error):
for server in nameservers:
for provider, ips in dns_providers.items():
if server in ips:
return True
return False return False
except Exception:
return False
class OsSummary: class OsSummary:
def __init__(self): def __init__(self):

View File

@ -157,7 +157,7 @@ def global_search(search_terms: str = None, selected_sites: list = None):
# Display progress information # Display progress information
console.print(f"\n[bold green]Searching for:[/bold green] [yellow]{search_terms}[/yellow]") console.print(f"\n[bold green]Searching for:[/bold green] [yellow]{search_terms}[/yellow]")
console.print(f"[bold green]Searching across:[/bold green] {len(selected_sites)} sites") console.print(f"[bold green]Searching across:[/bold green] {len(selected_sites)} sites \n")
with Progress() as progress: with Progress() as progress:
search_task = progress.add_task("[cyan]Searching...", total=len(selected_sites)) search_task = progress.add_task("[cyan]Searching...", total=len(selected_sites))
@ -188,7 +188,7 @@ def global_search(search_terms: str = None, selected_sites: list = None):
item_dict['source_alias'] = alias item_dict['source_alias'] = alias
all_results[alias].append(item_dict) all_results[alias].append(item_dict)
console.print(f"[green]Found {len(database.media_list)} results from {site_name}") console.print(f"\n[green]Found {len(database.media_list)} results from {site_name}")
except Exception as e: except Exception as e:
console.print(f"[bold red]Error searching {site_name}:[/bold red] {str(e)}") console.print(f"[bold red]Error searching {site_name}:[/bold red] {str(e)}")

View File

@ -9,6 +9,7 @@ import platform
import argparse import argparse
import importlib import importlib
import threading, asyncio import threading, asyncio
from urllib.parse import urlparse
from typing import Callable from typing import Callable
@ -153,6 +154,7 @@ def initialize():
except: except:
console.log("[red]Error with loading github.") console.log("[red]Error with loading github.")
def restart_script(): def restart_script():
"""Riavvia lo script con gli stessi argomenti della riga di comando.""" """Riavvia lo script con gli stessi argomenti della riga di comando."""
print("\nRiavvio dello script...\n") print("\nRiavvio dello script...\n")
@ -191,6 +193,11 @@ def force_exit():
os._exit(0) os._exit(0)
def _extract_hostname(url_string: str) -> str:
"""Safely extracts the hostname from a URL string."""
return urlparse(url_string).hostname
def main(script_id = 0): def main(script_id = 0):
color_map = { color_map = {
@ -209,8 +216,11 @@ def main(script_id = 0):
# Create logger # Create logger
log_not = Logger() log_not = Logger()
initialize() initialize()
if not internet_manager.check_dns_provider(): # Get all site hostname
hostname_list = [hostname for site_info in config_manager.configSite.values() if (hostname := _extract_hostname(site_info.get('full_url')))]
if not internet_manager.check_dns_resolve(hostname_list):
print() print()
console.print("[red]❌ ERROR: DNS configuration is required!") console.print("[red]❌ ERROR: DNS configuration is required!")
console.print("[red]The program cannot function correctly without proper DNS settings.") console.print("[red]The program cannot function correctly without proper DNS settings.")
@ -219,8 +229,7 @@ def main(script_id = 0):
console.print("[blue]• Quad9 (9.9.9.9) 'https://docs.quad9.net/Setup_Guides/Windows/Windows_10/'") console.print("[blue]• Quad9 (9.9.9.9) 'https://docs.quad9.net/Setup_Guides/Windows/Windows_10/'")
console.print("\n[yellow]⚠️ The program will not work until you configure your DNS settings.") console.print("\n[yellow]⚠️ The program will not work until you configure your DNS settings.")
time.sleep(2) os._exit(0)
msg.ask("[yellow]Press Enter to continue ...")
# Load search functions # Load search functions
search_functions = load_search_functions() search_functions = load_search_functions()
@ -363,4 +372,4 @@ def main(script_id = 0):
# Delete script_id # Delete script_id
script_id = TelegramSession.get_session() script_id = TelegramSession.get_session()
if script_id != "unknown": if script_id != "unknown":
TelegramSession.deleteScriptId(script_id) TelegramSession.deleteScriptId(script_id)

View File

@ -6,6 +6,7 @@ m3u8
certifi certifi
psutil psutil
unidecode unidecode
curl_cffi
dnspython dnspython
jsbeautifier jsbeautifier
pathvalidate pathvalidate
@ -14,3 +15,4 @@ ua-generator
qbittorrent-api qbittorrent-api
pyTelegramBotAPI pyTelegramBotAPI
PyQt5 PyQt5
beautifulsoup4

View File

@ -1,4 +1,5 @@
import os import os
import re
from setuptools import setup, find_packages from setuptools import setup, find_packages
def read_readme(): def read_readme():
@ -8,9 +9,21 @@ def read_readme():
with open(os.path.join(os.path.dirname(__file__), "requirements.txt"), "r", encoding="utf-8-sig") as f: with open(os.path.join(os.path.dirname(__file__), "requirements.txt"), "r", encoding="utf-8-sig") as f:
required_packages = f.read().splitlines() required_packages = f.read().splitlines()
def get_version():
try:
import pkg_resources
return pkg_resources.get_distribution('StreamingCommunity').version
except:
version_file_path = os.path.join(os.path.dirname(__file__), "StreamingCommunity", "Upload", "version.py")
with open(version_file_path, "r", encoding="utf-8") as f:
version_match = re.search(r"^__version__\s*=\s*['\"]([^'\"]*)['\"]", f.read(), re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string in StreamingCommunity/Upload/version.py.")
setup( setup(
name="StreamingCommunity", name="StreamingCommunity",
version="3.0.8", version=get_version(),
long_description=read_readme(), long_description=read_readme(),
long_description_content_type="text/markdown", long_description_content_type="text/markdown",
author="Lovi-0", author="Lovi-0",
@ -29,4 +42,4 @@ setup(
"Bug Reports": "https://github.com/Lovi-0/StreamingCommunity/issues", "Bug Reports": "https://github.com/Lovi-0/StreamingCommunity/issues",
"Source": "https://github.com/Lovi-0/StreamingCommunity", "Source": "https://github.com/Lovi-0/StreamingCommunity",
} }
) )