From ef6c8c9cb3450f5c25480b7f73bcaf668078bf93 Mon Sep 17 00:00:00 2001 From: Lovi <62809003+Arrowar@users.noreply.github.com> Date: Sun, 25 May 2025 15:37:53 +0200 Subject: [PATCH 01/10] api: Fix tipo raiplay --- .github/.site/css/style.css | 525 ++++++++---------- .github/.site/js/script.js | 154 +++-- README.md | 2 +- .../Api/Site/animeworld/util/ScrapeSerie.py | 3 +- .../Api/Site/raiplay/__init__.py | 2 +- StreamingCommunity/Api/Site/raiplay/site.py | 122 +--- .../Api/Site/raiplay/util/ScrapeSerie.py | 52 +- StreamingCommunity/Util/config_json.py | 4 - StreamingCommunity/Util/os.py | 6 - 9 files changed, 408 insertions(+), 462 deletions(-) diff --git a/.github/.site/css/style.css b/.github/.site/css/style.css index 35d95af..33d50fc 100644 --- a/.github/.site/css/style.css +++ b/.github/.site/css/style.css @@ -38,14 +38,11 @@ body { flex-direction: column; } -header { - background-color: var(--header-bg); - backdrop-filter: blur(10px); - position: fixed; - width: 100%; - padding: 15px 0; - z-index: 1000; - box-shadow: 0 2px 12px var(--shadow-color); +.container { + max-width: 1400px; + margin: 0 auto; + padding: 20px; + flex: 1; } .header-container { @@ -88,13 +85,6 @@ header { font-size: 1.1rem; } -.container { - max-width: 1400px; - margin: 0 auto; - padding: 20px; - flex: 1; -} - .site-grid { display: grid; grid-template-columns: repeat(auto-fill, minmax(300px, 1fr)); @@ -166,78 +156,6 @@ header { color: var(--accent-color); } -.site-content { - text-align: center; - width: 100%; -} - -.domain { - color: var(--text-color); - opacity: 0.8; - font-size: 0.9rem; - margin-bottom: 1.5rem; - word-break: break-all; -} - -.site-item a { - margin-top: 1rem; - background: linear-gradient(135deg, var(--primary-color), var(--secondary-color)); - color: white; - text-decoration: none; - font-weight: 500; - padding: 12px 28px; - border-radius: 8px; - width: fit-content; - margin: 0 auto; - display: flex; - align-items: center; - gap: 8px; -} - -.site-item a:hover { - opacity: 0.9; - transform: translateY(-2px); -} - -.site-title { - opacity: 0; - position: absolute; - top: 50%; - left: 50%; - transform: translate(-50%, -50%); - background: rgba(0, 0, 0, 0.8); - padding: 10px 20px; - border-radius: 8px; - transition: opacity 0.3s ease; - color: white; - font-size: 1.2rem; - text-align: center; - width: 80%; - pointer-events: none; - z-index: 2; -} - -.site-item:hover .site-title { - opacity: 1; -} - -.site-item::after { - content: ''; - position: absolute; - top: 0; - left: 0; - right: 0; - bottom: 0; - background: rgba(0, 0, 0, 0.5); - opacity: 0; - transition: opacity 0.3s ease; - pointer-events: none; -} - -.site-item:hover::after { - opacity: 1; -} - .site-info { display: flex; flex-direction: column; @@ -264,6 +182,211 @@ header { opacity: 1; } +.site-status { + position: absolute; + top: 10px; + right: 10px; + width: 12px; + height: 12px; + border-radius: 50%; + background: #4CAF50; +} + +.site-status.offline { + background: #f44336; +} + +.status-indicator { + position: fixed; + top: 20px; + right: 20px; + background: var(--card-background); + border: 1px solid var(--border-color); + border-radius: 12px; + padding: 15px 20px; + box-shadow: 0 4px 20px var(--shadow-color); + z-index: 1001; + min-width: 280px; + max-width: 400px; + transition: all 0.3s ease; +} + +.status-indicator.hidden { + opacity: 0; + transform: translateY(-20px); + pointer-events: none; +} + +.status-header { + display: flex; + align-items: center; + gap: 10px; + margin-bottom: 15px; + font-weight: 600; + color: var(--primary-color); +} + +.status-icon { + width: 20px; + height: 20px; + border: 2px solid var(--primary-color); + border-radius: 50%; + border-top-color: transparent; + animation: spin 1s linear infinite; +} + +.status-icon.ready { + border: none; + background: #4CAF50; + animation: none; + position: relative; +} + +.status-icon.ready::after { + content: '✓'; + position: absolute; + top: 50%; + left: 50%; + transform: translate(-50%, -50%); + color: white; + font-size: 12px; + font-weight: bold; +} + +@keyframes spin { + 0% { transform: rotate(0deg); } + 100% { transform: rotate(360deg); } +} + +.status-text { + color: var(--text-color); + font-size: 0.9rem; + margin-bottom: 10px; +} + +.checking-sites { + max-height: 200px; + overflow-y: auto; + background: var(--background-color); + border-radius: 8px; + padding: 10px; + border: 1px solid var(--border-color); +} + +.checking-site { + display: flex; + align-items: center; + justify-content: between; + gap: 10px; + padding: 6px 8px; + margin-bottom: 4px; + border-radius: 6px; + background: var(--card-background); + font-size: 0.8rem; + color: var(--text-color); + transition: all 0.2s ease; +} + +.checking-site.completed { + opacity: 0.6; + background: var(--card-hover); +} + +.checking-site.online { + border-left: 3px solid #4CAF50; +} + +.checking-site.offline { + border-left: 3px solid #f44336; +} + +.checking-site .site-name { + flex: 1; + font-weight: 500; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.checking-site .site-status-icon { + width: 12px; + height: 12px; + border-radius: 50%; + flex-shrink: 0; +} + +.checking-site .site-status-icon.checking { + background: var(--primary-color); + animation: pulse 1s infinite; +} + +.checking-site .site-status-icon.online { + background: #4CAF50; +} + +.checking-site .site-status-icon.offline { + background: #f44336; +} + +@keyframes pulse { + 0%, 100% { opacity: 1; } + 50% { opacity: 0.5; } +} + +.progress-bar { + width: 100%; + height: 6px; + background: var(--background-color); + border-radius: 3px; + overflow: hidden; + margin-top: 10px; +} + +.progress-fill { + height: 100%; + background: linear-gradient(90deg, var(--primary-color), var(--accent-color)); + width: 0%; + transition: width 0.3s ease; + border-radius: 3px; +} + +.loader { + width: 48px; + height: 48px; + border: 3px solid var(--primary-color); + border-bottom-color: transparent; + border-radius: 50%; + display: inline-block; + position: relative; + box-sizing: border-box; + animation: rotation 1s linear infinite; +} + +.loader::after { + content: ''; + position: absolute; + box-sizing: border-box; + left: 0; + top: 0; + width: 48px; + height: 48px; + border-radius: 50%; + border: 3px solid transparent; + border-bottom-color: var(--accent-color); + animation: rotationBack 0.5s linear infinite; + transform: rotate(45deg); +} + +@keyframes rotation { + 0% { transform: rotate(0deg) } + 100% { transform: rotate(360deg) } +} + +@keyframes rotationBack { + 0% { transform: rotate(0deg) } + 100% { transform: rotate(-360deg) } +} + footer { background: var(--card-background); border-top: 1px solid var(--border-color); @@ -355,26 +478,6 @@ footer { transform: scale(1.2); } -.github-stats { - display: flex; - gap: 10px; - margin-top: 10px; - font-size: 0.8rem; -} - -.github-badge { - background-color: var(--background-color); - padding: 4px 8px; - border-radius: 4px; - display: flex; - align-items: center; - gap: 4px; -} - -.github-badge i { - color: var(--accent-color); -} - .footer-description { margin-top: 15px; font-size: 0.9rem; @@ -383,103 +486,13 @@ footer { line-height: 1.5; } -.update-info { - text-align: center; - margin-top: 30px; - padding-top: 30px; - border-top: 1px solid var(--border-color); -} - .update-note { color: var(--accent-color); font-size: 0.9rem; opacity: 0.9; } -.theme-toggle { - position: relative; - top: unset; - right: unset; - z-index: 1; -} - -.theme-toggle input { - display: none; -} - -.theme-toggle label { - cursor: pointer; - padding: 8px; - background: var(--background-color); - border-radius: 50%; - display: flex; - align-items: center; - justify-content: center; - box-shadow: 0 0 10px var(--shadow-color); - border: 1px solid var(--border-color); - transition: all 0.3s ease; -} - -.theme-toggle label:hover { - border-color: var(--primary-color); - transform: translateY(-2px); -} - -.theme-toggle .fa-sun { - display: none; - color: #ffd700; -} - -.theme-toggle .fa-moon { - color: #8c52ff; -} - -.theme-toggle input:checked ~ label .fa-sun { - display: block; -} - -.theme-toggle input:checked ~ label .fa-moon { - display: none; -} - -.loader { - width: 48px; - height: 48px; - border: 3px solid var(--primary-color); - border-bottom-color: transparent; - border-radius: 50%; - display: inline-block; - position: relative; - box-sizing: border-box; - animation: rotation 1s linear infinite; -} - -.loader::after { - content: ''; - position: absolute; - box-sizing: border-box; - left: 0; - top: 0; - width: 48px; - height: 48px; - border-radius: 50%; - border: 3px solid transparent; - border-bottom-color: var(--accent-color); - animation: rotationBack 0.5s linear infinite; - transform: rotate(45deg); -} - -@keyframes rotation { - 0% { transform: rotate(0deg) } - 100% { transform: rotate(360deg) } -} - -@keyframes rotationBack { - 0% { transform: rotate(0deg) } - 100% { transform: rotate(-360deg) } -} - -/* Improved Responsiveness */ +/* Responsiveness */ @media (max-width: 768px) { .site-grid { grid-template-columns: repeat(auto-fill, minmax(250px, 1fr)); @@ -496,11 +509,7 @@ footer { grid-template-columns: 1fr; gap: 20px; padding: 15px; - } - - .theme-toggle { - top: 10px; - right: 10px; + text-align: center; } .header-container { @@ -517,27 +526,6 @@ footer { width: 100%; justify-content: center; } -} - -@media (max-width: 480px) { - .site-grid { - grid-template-columns: 1fr; - } - - .site-item { - min-height: 220px; - } - - .container { - padding: 10px; - } -} - -@media (max-width: 768px) { - .footer-content { - grid-template-columns: 1fr; - text-align: center; - } .footer-title::after { left: 50%; @@ -557,83 +545,16 @@ footer { } } -.time-change { - color: var(--text-color); - opacity: 0.7; - font-size: 0.85rem; - margin-bottom: 0.5rem; - word-break: break-all; -} +@media (max-width: 480px) { + .site-grid { + grid-template-columns: 1fr; + } -.label { - color: var(--accent-color); - font-weight: 500; -} - -.controls-container { - display: flex; - justify-content: space-between; - align-items: center; - margin-bottom: 20px; - padding: 15px 20px; - background: var(--card-background); - border-radius: 12px; - border: 1px solid var(--border-color); -} - -.grid-controls { - display: flex; - align-items: center; - gap: 10px; -} - -.grid-controls label { - color: var(--text-color); - font-weight: 500; -} - -.grid-controls select { - padding: 8px 12px; - border-radius: 8px; - border: 1px solid var(--border-color); - background: var(--background-color); - color: var(--text-color); - cursor: pointer; - transition: all 0.3s ease; -} - -.grid-controls select:hover { - border-color: var(--primary-color); -} - -.sites-stats { - display: flex; - gap: 20px; - align-items: center; -} - -.total-sites, .last-update-global { - display: flex; - align-items: center; - gap: 8px; - color: var(--text-color); - font-size: 0.9rem; -} - -.total-sites i, .last-update-global i { - color: var(--primary-color); -} - -.site-status { - position: absolute; - top: 10px; - right: 10px; - width: 12px; - height: 12px; - border-radius: 50%; - background: #4CAF50; -} - -.site-status.offline { - background: #f44336; + .site-item { + min-height: 220px; + } + + .container { + padding: 10px; + } } \ No newline at end of file diff --git a/.github/.site/js/script.js b/.github/.site/js/script.js index 5a9f34c..e89eb9f 100644 --- a/.github/.site/js/script.js +++ b/.github/.site/js/script.js @@ -1,32 +1,82 @@ document.documentElement.setAttribute('data-theme', 'dark'); -function initGridControls() { - const gridSize = document.getElementById('grid-size'); - const siteGrid = document.querySelector('.site-grid'); - - gridSize.addEventListener('change', function() { - switch(this.value) { - case 'small': - siteGrid.style.gridTemplateColumns = 'repeat(auto-fill, minmax(200px, 1fr))'; - break; - case 'medium': - siteGrid.style.gridTemplateColumns = 'repeat(auto-fill, minmax(300px, 1fr))'; - break; - case 'large': - siteGrid.style.gridTemplateColumns = 'repeat(auto-fill, minmax(400px, 1fr))'; - break; - } - localStorage.setItem('preferredGridSize', this.value); - }); +let statusIndicator = null; +let checkingSites = new Map(); +let totalSites = 0; +let completedSites = 0; - const savedSize = localStorage.getItem('preferredGridSize'); - if (savedSize) { - gridSize.value = savedSize; - gridSize.dispatchEvent(new Event('change')); +function createStatusIndicator() { + statusIndicator = document.createElement('div'); + statusIndicator.className = 'status-indicator'; + statusIndicator.innerHTML = ` +
+
+ Loading Sites... +
+
Initializing site checks...
+
+
+
+
+ `; + document.body.appendChild(statusIndicator); + return statusIndicator; +} + +function updateStatusIndicator(status, text, progress = 0) { + if (!statusIndicator) return; + + const statusIcon = statusIndicator.querySelector('.status-icon'); + const statusTitle = statusIndicator.querySelector('.status-title'); + const statusText = statusIndicator.querySelector('.status-text'); + const progressFill = statusIndicator.querySelector('.progress-fill'); + + statusTitle.textContent = status; + statusText.textContent = text; + progressFill.style.width = `${progress}%`; + + if (status === 'Ready') { + statusIcon.classList.add('ready'); + setTimeout(() => { + statusIndicator.classList.add('hidden'); + setTimeout(() => statusIndicator.remove(), 300); + }, 2000); } } -async function checkSiteStatus(url) { +function addSiteToCheck(siteName, siteUrl) { + if (!statusIndicator) return; + + const checkingSitesContainer = statusIndicator.querySelector('.checking-sites'); + const siteElement = document.createElement('div'); + siteElement.className = 'checking-site'; + siteElement.innerHTML = ` + ${siteName} +
+ `; + checkingSitesContainer.appendChild(siteElement); + checkingSites.set(siteName, siteElement); +} + +function updateSiteStatus(siteName, isOnline) { + const siteElement = checkingSites.get(siteName); + if (!siteElement) return; + + const statusIcon = siteElement.querySelector('.site-status-icon'); + statusIcon.classList.remove('checking'); + statusIcon.classList.add(isOnline ? 'online' : 'offline'); + siteElement.classList.add('completed', isOnline ? 'online' : 'offline'); + + completedSites++; + const progress = (completedSites / totalSites) * 100; + updateStatusIndicator( + 'Checking Sites...', + `Checked ${completedSites}/${totalSites} sites`, + progress + ); +} + +async function checkSiteStatus(url, siteName) { try { console.log(`Checking status for: ${url}`); const controller = new AbortController(); @@ -46,9 +96,19 @@ async function checkSiteStatus(url) { const isOnline = response.type === 'opaque'; console.log(`Site ${url} is ${isOnline ? 'online' : 'offline'} (Type: ${response.type})`); + + if (siteName) { + updateSiteStatus(siteName, isOnline); + } + return isOnline; } catch (error) { console.log(`Error checking ${url}:`, error.message); + + if (siteName) { + updateSiteStatus(siteName, false); + } + return false; } } @@ -59,9 +119,12 @@ const supabaseKey = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS async function loadSiteData() { try { console.log('Starting to load site data...'); + + createStatusIndicator(); + updateStatusIndicator('Loading...', 'Fetching site data from database...', 0); + const siteList = document.getElementById('site-list'); - siteList.innerHTML = '
'; - + const headers = { 'accept': '*/*', 'accept-language': 'it-IT,it;q=0.9,en-US;q=0.8,en;q=0.7', @@ -83,29 +146,41 @@ async function loadSiteData() { const data = await response.json(); - siteList.innerHTML = ''; if (data && data.length > 0) { - console.log('Raw data from Supabase:', data); + siteList.innerHTML = ''; + + if (data && data.length > 0) { const configSite = data[0].data; - console.log('Parsed config site:', configSite); - let totalSites = Object.keys(configSite).length; + totalSites = Object.keys(configSite).length; + completedSites = 0; let latestUpdate = new Date(0); document.getElementById('sites-count').textContent = totalSites; + + updateStatusIndicator('Checking Sites...', `Starting checks for ${totalSites} sites...`, 0); + + Object.entries(configSite).forEach(([siteName, site]) => { + addSiteToCheck(siteName, site.full_url); + }); - for (const siteName in configSite) { - const site = configSite[siteName]; + const statusChecks = Object.entries(configSite).map(async ([siteName, site]) => { + const isOnline = await checkSiteStatus(site.full_url, siteName); + return { siteName, site, isOnline }; + }); + + const results = await Promise.all(statusChecks); + + updateStatusIndicator('Ready', 'All sites checked successfully!', 100); + + results.forEach(({ siteName, site, isOnline }) => { const siteItem = document.createElement('div'); siteItem.className = 'site-item'; siteItem.style.cursor = 'pointer'; - // Add status indicator const statusDot = document.createElement('div'); statusDot.className = 'site-status'; - const isOnline = await checkSiteStatus(site.full_url); if (!isOnline) statusDot.classList.add('offline'); siteItem.appendChild(statusDot); - // Update latest update time const updateTime = new Date(site.time_change); if (updateTime > latestUpdate) { latestUpdate = updateTime; @@ -133,7 +208,9 @@ async function loadSiteData() { oldDomain.className = 'old-domain'; oldDomain.innerHTML = ` ${site.old_domain}`; siteInfo.appendChild(oldDomain); - } siteItem.addEventListener('click', function() { + } + + siteItem.addEventListener('click', function() { window.open(site.full_url, '_blank', 'noopener,noreferrer'); }); @@ -150,7 +227,7 @@ async function loadSiteData() { siteItem.appendChild(siteTitle); siteItem.appendChild(siteInfo); siteList.appendChild(siteItem); - } + }); const formattedDate = latestUpdate.toLocaleDateString('it-IT', { year: 'numeric', @@ -162,6 +239,7 @@ async function loadSiteData() { document.getElementById('last-update-time').textContent = formattedDate; } else { siteList.innerHTML = '
No sites available
'; + updateStatusIndicator('Ready', 'No sites found in database', 100); } } catch (error) { console.error('Errore:', error); @@ -171,6 +249,10 @@ async function loadSiteData() { `; + if (statusIndicator) { + updateStatusIndicator('Error', `Failed to load: ${error.message}`, 0); + statusIndicator.querySelector('.status-icon').style.background = '#f44336'; + } } } diff --git a/README.md b/README.md index 7ff53c4..0780427 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@

- Project Logo + Project Logo

diff --git a/StreamingCommunity/Api/Site/animeworld/util/ScrapeSerie.py b/StreamingCommunity/Api/Site/animeworld/util/ScrapeSerie.py index 0dd36a7..d46f635 100644 --- a/StreamingCommunity/Api/Site/animeworld/util/ScrapeSerie.py +++ b/StreamingCommunity/Api/Site/animeworld/util/ScrapeSerie.py @@ -31,7 +31,8 @@ class ScrapSerie: self.client = httpx.Client( cookies={"sessionId": self.session_id}, headers={"User-Agent": get_userAgent(), "csrf-token": self.csrf_token}, - base_url=full_url + base_url=full_url, + verify=False ) try: diff --git a/StreamingCommunity/Api/Site/raiplay/__init__.py b/StreamingCommunity/Api/Site/raiplay/__init__.py index d1b7e23..816d753 100644 --- a/StreamingCommunity/Api/Site/raiplay/__init__.py +++ b/StreamingCommunity/Api/Site/raiplay/__init__.py @@ -21,7 +21,7 @@ from .film import download_film # Variable indice = 5 _useFor = "Film_&_Serie" -_priority = 1 # NOTE: Site search need the use of tmbd obj +_priority = 0 _engineDownload = "hls" _deprecate = False diff --git a/StreamingCommunity/Api/Site/raiplay/site.py b/StreamingCommunity/Api/Site/raiplay/site.py index c4a4b1e..ef95cbc 100644 --- a/StreamingCommunity/Api/Site/raiplay/site.py +++ b/StreamingCommunity/Api/Site/raiplay/site.py @@ -1,9 +1,5 @@ # 21.05.24 -import threading -import queue - - # External libraries import httpx from rich.console import Console @@ -13,12 +9,9 @@ from rich.console import Console from StreamingCommunity.Util.config_json import config_manager from StreamingCommunity.Util.headers import get_userAgent from StreamingCommunity.Util.table import TVShowManager -from StreamingCommunity.Lib.TMBD.tmdb import tmdb - - -# Logic class from StreamingCommunity.Api.Template.config_loader import site_constant from StreamingCommunity.Api.Template.Class.SearchType import MediaManager +from .util.ScrapeSerie import GetSerieInfo # Variable @@ -26,76 +19,33 @@ console = Console() media_search_manager = MediaManager() table_show_manager = TVShowManager() max_timeout = config_manager.get_int("REQUESTS", "timeout") -MAX_THREADS = 12 -def determine_media_type(title): +def determine_media_type(item): """ - Use TMDB to determine if a title is a movie or TV show. + Determine if the item is a film or TV series by checking actual seasons count + using GetSerieInfo. """ try: - # First search as a movie - movie_results = tmdb._make_request("search/movie", {"query": title}) - movie_count = len(movie_results.get("results", [])) - - # Then search as a TV show - tv_results = tmdb._make_request("search/tv", {"query": title}) - tv_count = len(tv_results.get("results", [])) - - # If results found in only one category, use that - if movie_count > 0 and tv_count == 0: - return "film" - elif tv_count > 0 and movie_count == 0: - return "tv" - - # If both have results, compare popularity - if movie_count > 0 and tv_count > 0: - top_movie = movie_results["results"][0] - top_tv = tv_results["results"][0] - - return "film" if top_movie.get("popularity", 0) > top_tv.get("popularity", 0) else "tv" + # Extract program name from path_id + program_name = None + if item.get('path_id'): + parts = item['path_id'].strip('/').split('/') + if len(parts) >= 2: + program_name = parts[-1].split('.')[0] - return "film" + if not program_name: + return "film" + + scraper = GetSerieInfo(program_name) + scraper.collect_info_title() + return "tv" if scraper.getNumberSeason() > 0 else "film" except Exception as e: - console.log(f"Error determining media type with TMDB: {e}") + console.print(f"[red]Error determining media type: {e}[/red]") return "film" -def worker_determine_type(work_queue, result_dict, worker_id): - """ - Worker function to process items from queue and determine media types. - - Parameters: - - work_queue: Queue containing items to process - - result_dict: Dictionary to store results - - worker_id: ID of the worker thread - """ - while not work_queue.empty(): - try: - index, item = work_queue.get(block=False) - title = item.get('titolo', '') - media_type = determine_media_type(title) - - result_dict[index] = { - 'id': item.get('id', ''), - 'name': title, - 'type': media_type, - 'path_id': item.get('path_id', ''), - 'url': f"https://www.raiplay.it{item.get('url', '')}", - 'image': f"https://www.raiplay.it{item.get('immagine', '')}", - } - - work_queue.task_done() - - except queue.Empty: - break - - except Exception as e: - console.log(f"Worker {worker_id} error: {e}") - work_queue.task_done() - - def title_search(query: str) -> int: """ Search for titles based on a search query. @@ -141,33 +91,15 @@ def title_search(query: str) -> int: data = response.json().get('agg').get('titoli').get('cards') data = data[:15] if len(data) > 15 else data - # Use multithreading to determine media types in parallel - work_queue = queue.Queue() - result_dict = {} - - # Add items to the work queue - for i, item in enumerate(data): - work_queue.put((i, item)) - - # Create and start worker threads - threads = [] - for i in range(min(MAX_THREADS, len(data))): - thread = threading.Thread( - target=worker_determine_type, - args=(work_queue, result_dict, i), - daemon=True - ) - threads.append(thread) - thread.start() - - # Wait for all threads to complete - for thread in threads: - thread.join() - - # Add all results to media manager in correct order - for i in range(len(data)): - if i in result_dict: - media_search_manager.add_media(result_dict[i]) + # Process each item and add to media manager + for item in data: + media_search_manager.add_media({ + 'id': item.get('id', ''), + 'name': item.get('titolo', ''), + 'type': determine_media_type(item), + 'path_id': item.get('path_id', ''), + 'url': f"https://www.raiplay.it{item.get('url', '')}", + 'image': f"https://www.raiplay.it{item.get('immagine', '')}", + }) - # Return the number of titles found return media_search_manager.get_length() \ No newline at end of file diff --git a/StreamingCommunity/Api/Site/raiplay/util/ScrapeSerie.py b/StreamingCommunity/Api/Site/raiplay/util/ScrapeSerie.py index d54ec1f..b7bd863 100644 --- a/StreamingCommunity/Api/Site/raiplay/util/ScrapeSerie.py +++ b/StreamingCommunity/Api/Site/raiplay/util/ScrapeSerie.py @@ -30,28 +30,48 @@ class GetSerieInfo: try: program_url = f"{self.base_url}/programmi/{self.program_name}.json" response = httpx.get(url=program_url, headers=get_headers(), timeout=max_timeout) + + # If 404, content is not yet available + if response.status_code == 404: + logging.info(f"Content not yet available: {self.program_name}") + return + response.raise_for_status() - json_data = response.json() # Look for seasons in the 'blocks' property - for block in json_data.get('blocks'): - if block.get('type') == 'RaiPlay Multimedia Block' and block.get('name', '').lower() == 'episodi': - self.publishing_block_id = block.get('id') - - # Extract seasons from sets array - for season_set in block.get('sets', []): - if 'stagione' in season_set.get('name', '').lower(): - self.seasons_manager.add_season({ - 'id': season_set.get('id', ''), - 'number': len(self.seasons_manager.seasons) + 1, - 'name': season_set.get('name', ''), - 'path': season_set.get('path_id', ''), - 'episodes_count': season_set.get('episode_size', {}).get('number', 0) - }) + for block in json_data.get('blocks', []): - except Exception as e: + # Check if block is a season block or episodi block + if block.get('type') == 'RaiPlay Multimedia Block': + if block.get('name', '').lower() == 'episodi': + self.publishing_block_id = block.get('id') + + # Extract seasons from sets array + for season_set in block.get('sets', []): + if 'stagione' in season_set.get('name', '').lower(): + self._add_season(season_set, block.get('id')) + + elif 'stagione' in block.get('name', '').lower(): + self.publishing_block_id = block.get('id') + + # Extract season directly from block's sets + for season_set in block.get('sets', []): + self._add_season(season_set, block.get('id')) + + except httpx.HTTPError as e: logging.error(f"Error collecting series info: {e}") + except Exception as e: + logging.error(f"Unexpected error collecting series info: {e}") + + def _add_season(self, season_set: dict, block_id: str): + self.seasons_manager.add_season({ + 'id': season_set.get('id', ''), + 'number': len(self.seasons_manager.seasons) + 1, + 'name': season_set.get('name', ''), + 'path': season_set.get('path_id', ''), + 'episodes_count': season_set.get('episode_size', {}).get('number', 0) + }) def collect_info_season(self, number_season: int) -> None: """Get episodes for a specific season.""" diff --git a/StreamingCommunity/Util/config_json.py b/StreamingCommunity/Util/config_json.py index bea1edc..08070cd 100644 --- a/StreamingCommunity/Util/config_json.py +++ b/StreamingCommunity/Util/config_json.py @@ -39,9 +39,6 @@ class ConfigManager: # Get the actual path of the module file current_file_path = os.path.abspath(__file__) - # Navigate upwards to find the project root - # Assuming this file is in a package structure like StreamingCommunity/Util/config_json.py - # We need to go up 2 levels to reach the project root base_path = os.path.dirname(os.path.dirname(os.path.dirname(current_file_path))) # Initialize file paths @@ -562,7 +559,6 @@ class ConfigManager: return section in config_source -# Helper function to check the platform def get_use_large_bar(): """ Determine if the large bar feature should be enabled. diff --git a/StreamingCommunity/Util/os.py b/StreamingCommunity/Util/os.py index 8da5de1..5e35f03 100644 --- a/StreamingCommunity/Util/os.py +++ b/StreamingCommunity/Util/os.py @@ -296,12 +296,6 @@ class InternManager(): "Google": ["8.8.8.8", "8.8.4.4"], "OpenDNS": ["208.67.222.222", "208.67.220.220"], "Quad9": ["9.9.9.9", "149.112.112.112"], - "AdGuard": ["94.140.14.14", "94.140.15.15"], - "Comodo": ["8.26.56.26", "8.20.247.20"], - "Level3": ["209.244.0.3", "209.244.0.4"], - "Norton": ["199.85.126.10", "199.85.127.10"], - "CleanBrowsing": ["185.228.168.9", "185.228.169.9"], - "Yandex": ["77.88.8.8", "77.88.8.1"] } try: From 86c72937792b2fd695d16d0a4e4c95a73b7fed32 Mon Sep 17 00:00:00 2001 From: Lovi <62809003+Arrowar@users.noreply.github.com> Date: Sun, 25 May 2025 16:59:29 +0200 Subject: [PATCH 02/10] Bump v3.0.8 --- StreamingCommunity/Upload/version.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/StreamingCommunity/Upload/version.py b/StreamingCommunity/Upload/version.py index a2df4df..535de5b 100644 --- a/StreamingCommunity/Upload/version.py +++ b/StreamingCommunity/Upload/version.py @@ -1,5 +1,5 @@ __title__ = 'StreamingCommunity' -__version__ = '3.0.7' +__version__ = '3.0.8' __author__ = 'Arrowar' __description__ = 'A command-line program to download film' __copyright__ = 'Copyright 2024' diff --git a/setup.py b/setup.py index 5a63b87..1fe021a 100644 --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ with open(os.path.join(os.path.dirname(__file__), "requirements.txt"), "r", enco setup( name="StreamingCommunity", - version="3.0.7", + version="3.0.8", long_description=read_readme(), long_description_content_type="text/markdown", author="Lovi-0", From ded66f446e4c8c942fc18e7db801968927a84dc2 Mon Sep 17 00:00:00 2001 From: Lovi <62809003+Arrowar@users.noreply.github.com> Date: Sat, 31 May 2025 10:52:16 +0200 Subject: [PATCH 03/10] Remove database of domain --- .github/.domain/domain_update.py | 253 +++++++++++++++++++++++++ .github/.domain/domains.json | 62 ++++++ .gitignore | 1 - StreamingCommunity/Util/config_json.py | 33 ++-- 4 files changed, 331 insertions(+), 18 deletions(-) create mode 100644 .github/.domain/domain_update.py create mode 100644 .github/.domain/domains.json diff --git a/.github/.domain/domain_update.py b/.github/.domain/domain_update.py new file mode 100644 index 0000000..e4be02d --- /dev/null +++ b/.github/.domain/domain_update.py @@ -0,0 +1,253 @@ +# 20.04.2024 + +import os +import re +import time +import json +from datetime import datetime +from urllib.parse import urlparse, urlunparse + + +import httpx +import ua_generator + + +JSON_FILE_PATH = os.path.join(".github", ".domain", "domains.json") + + +def load_domains(file_path): + if not os.path.exists(file_path): + print(f"Error: The file {file_path} was not found.") + return None + + try: + with open(file_path, 'r', encoding='utf-8') as f: + return json.load(f) + + except Exception as e: + print(f"Error reading the file {file_path}: {e}") + return None + +def save_domains(file_path, data): + try: + with open(file_path, 'w', encoding='utf-8') as f: + json.dump(data, f, indent=2, ensure_ascii=False) + print(f"Data successfully saved to {file_path}") + + except Exception as e: + print(f"Error saving the file {file_path}: {e}") + +def get_new_tld(full_url): + try: + parsed_url = urlparse(full_url) + hostname = parsed_url.hostname + if hostname: + parts = hostname.split('.') + return parts[-1] + + except Exception: + pass + + return None + +def try_url_with_retries(url_to_try, headers, timeout=15, retries=3, backoff_factor=0.5): + for attempt in range(retries): + try: + with httpx.Client(headers=headers, timeout=timeout, follow_redirects=True) as client: + response = client.get(url_to_try) + response.raise_for_status() + return response + + except (httpx.TimeoutException, httpx.ConnectError) as e: + print(f" [!] Attempt {attempt + 1}/{retries} for {url_to_try}: Network error ({type(e).__name__}). Retrying in {backoff_factor * (2 ** attempt)}s...") + if attempt + 1 == retries: + print(f" [!] Failed all {retries} attempts for {url_to_try} due to {type(e).__name__}.") + return None + time.sleep(backoff_factor * (2 ** attempt)) + + except httpx.HTTPStatusError as http_err: + if http_err.response.status_code in [403, 429, 503]: + print(f" [!] HTTP error {http_err.response.status_code} for {url_to_try}. Suspected Cloudflare, checking for ...") + try: + with httpx.Client(headers=headers, timeout=timeout, follow_redirects=False) as cf_client: + cf_page_response = cf_client.get(url_to_try) + if cf_page_response.status_code != http_err.response.status_code and not (200 <= cf_page_response.status_code < 300) : + cf_page_response.raise_for_status() + + match = re.search(r': {base_href_url}") + try: + print(f" [] Attempting request to URL: {base_href_url}") + with httpx.Client(headers=headers, timeout=timeout, follow_redirects=True) as base_client: + final_response_from_base = base_client.get(base_href_url) + final_response_from_base.raise_for_status() + print(f" [+] Successfully fetched from URL.") + return final_response_from_base + + except httpx.RequestError as base_req_e: + print(f" [!] Error requesting URL {base_href_url}: {base_req_e}") + return None + + else: + print(f" [!] No found in page content for {url_to_try}.") + return None + + except httpx.RequestError as cf_req_e: + print(f" [!] Error fetching Cloudflare-like page content for {url_to_try}: {cf_req_e}") + return None + + else: + print(f" [!] HTTP error {http_err.response.status_code} for {url_to_try}. No retry.") + return None + + except httpx.RequestError as e: + print(f" [!] Generic error for {url_to_try}: {e}. No retry.") + return None + + return None + + +def update_domain_entries(data): + if not data: + return False + + updated_count = 0 + + for key, entry in data.items(): + print(f"\n--- [DOMAIN] {key} ---") + original_full_url = entry.get("full_url") + original_domain_in_entry = entry.get("domain") + + if not original_full_url: + print(f" [!] 'full_url' missing. Skipped.") + continue + + ua = ua_generator.generate(device=('desktop', 'mobile'), browser=('chrome', 'edge', 'firefox', 'safari')) + current_headers = ua.headers.get() + + print(f" [] Stored URL: {original_full_url}") + if original_domain_in_entry: + print(f" [] Stored Domain (TLD): {original_domain_in_entry}") + + potential_urls_to_try = [] + potential_urls_to_try.append(("Original", original_full_url)) + + try: + parsed_original = urlparse(original_full_url) + + current_netloc = parsed_original.netloc + if current_netloc.startswith("www."): + varied_netloc = current_netloc[4:] + potential_urls_to_try.append(("Without www", urlunparse(parsed_original._replace(netloc=varied_netloc)))) + else: + varied_netloc = "www." + current_netloc + potential_urls_to_try.append(("With www", urlunparse(parsed_original._replace(netloc=varied_netloc)))) + + current_path = parsed_original.path + if not current_path: + potential_urls_to_try.append(("With trailing slash", urlunparse(parsed_original._replace(path='/')))) + elif current_path.endswith('/'): + potential_urls_to_try.append(("Without trailing slash", urlunparse(parsed_original._replace(path=current_path[:-1])))) + else: + potential_urls_to_try.append(("With trailing slash", urlunparse(parsed_original._replace(path=current_path + '/')))) + + except Exception as e: + print(f" [!] Error generating URL variations: {e}") + + entry_updated_in_this_run = False + + seen_urls_for_entry = set() + unique_potential_urls = [] + for label, url_val in potential_urls_to_try: + if url_val not in seen_urls_for_entry: + unique_potential_urls.append((label, url_val)) + seen_urls_for_entry.add(url_val) + + parsed_original_for_http_check = urlparse(original_full_url) + if parsed_original_for_http_check.scheme == 'https': + http_url = urlunparse(parsed_original_for_http_check._replace(scheme='http')) + if http_url not in seen_urls_for_entry: + unique_potential_urls.append(("HTTP Fallback", http_url)) + + for label, url_to_check in unique_potential_urls: + if entry_updated_in_this_run: + break + + print(f" [] Testing URL ({label}): {url_to_check}") + response = try_url_with_retries(url_to_check, current_headers) + + if response: + final_url_from_request = str(response.url) + print(f" [+] Redirect/Response to: {final_url_from_request}") + + parsed_final_url = urlparse(final_url_from_request) + normalized_full_url = urlunparse(parsed_final_url._replace(path='/', params='', query='', fragment='')) + if parsed_final_url.path == '' and not normalized_full_url.endswith('/'): + normalized_full_url += '/' + + if normalized_full_url != final_url_from_request: + print(f" [+] Normalized URL: {normalized_full_url}") + + if normalized_full_url != original_full_url: + new_tld_val = get_new_tld(final_url_from_request) + + if new_tld_val: + entry["full_url"] = normalized_full_url + + if new_tld_val != original_domain_in_entry: + print(f" [-] Domain TLD Changed: '{original_domain_in_entry}' -> '{new_tld_val}'") + entry["old_domain"] = original_domain_in_entry if original_domain_in_entry else entry.get("old_domain", "") + entry["domain"] = new_tld_val + entry["time_change"] = datetime.now().strftime('%Y-%m-%d %H:%M:%S') + print(f" [-] Domain & URL Updated: New TLD '{new_tld_val}', New URL '{normalized_full_url}'") + + else: + entry["domain"] = new_tld_val + print(f" [-] URL Updated (TLD Unchanged '{new_tld_val}'): New URL '{normalized_full_url}'") + + updated_count += 1 + entry_updated_in_this_run = True + + else: + print(f" [!] Could not extract TLD from {final_url_from_request}. URL not updated despite potential change.") + else: + if final_url_from_request != original_full_url: + print(f" [] Same Domain (after normalization): {final_url_from_request} -> {normalized_full_url}") + + else: + print(f" [] Same Domain: {final_url_from_request}") + + if label == "Original" or normalized_full_url == original_full_url : + entry_updated_in_this_run = True + + if not entry_updated_in_this_run: + print(f" [-] No Update for {key} after {len(unique_potential_urls)} attempts.") + + return updated_count > 0 + +def main(): + print("Starting domain update script...") + domain_data = load_domains(JSON_FILE_PATH) + + if domain_data: + if update_domain_entries(domain_data): + save_domains(JSON_FILE_PATH, domain_data) + print("\nUpdate complete. Some entries were modified.") + + else: + print("\nUpdate complete. No domains were modified.") + + else: + print("\nCannot proceed without domain data.") + + print("Script finished.") + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/.github/.domain/domains.json b/.github/.domain/domains.json new file mode 100644 index 0000000..a7f588a --- /dev/null +++ b/.github/.domain/domains.json @@ -0,0 +1,62 @@ +{ + "1337xx": { + "domain": "to", + "full_url": "https://www.1337xx.to/", + "old_domain": "to", + "time_change": "2025-03-19 12:20:19" + }, + "cb01new": { + "domain": "download", + "full_url": "https://cb01net.download/", + "old_domain": "my", + "time_change": "2025-05-26 22:23:24" + }, + "animeunity": { + "domain": "so", + "full_url": "https://www.animeunity.so/", + "old_domain": "so", + "time_change": "2025-03-19 12:20:23" + }, + "animeworld": { + "domain": "ac", + "full_url": "https://www.animeworld.ac/", + "old_domain": "ac", + "time_change": "2025-03-21 12:20:27" + }, + "guardaserie": { + "domain": "meme", + "full_url": "https://guardaserie.meme/", + "old_domain": "meme", + "time_change": "2025-03-19 12:20:24" + }, + "ddlstreamitaly": { + "domain": "co", + "full_url": "https://ddlstreamitaly.co/", + "old_domain": "co", + "time_change": "2025-03-19 12:20:26" + }, + "streamingwatch": { + "domain": "org", + "full_url": "https://www.streamingwatch.org/", + "old_domain": "org", + "time_change": "2025-04-29 12:30:30" + }, + "altadefinizione": { + "domain": "spa", + "full_url": "https://altadefinizione.spa/", + "old_domain": "locker", + "time_change": "2025-05-26 23:22:45" + }, + "streamingcommunity": { + "domain": "blog", + "full_url": "https://streamingunity.blog/", + "old_domain": "to", + "time_change": "2025-05-31 10:45:55" + }, + "altadefinizionegratis": { + "domain": "icu", + "full_url": "https://altadefinizionegratis.icu/", + "old_domain": "taipei", + "time_change": "2025-05-18 11:21:05" + } +} \ No newline at end of file diff --git a/.gitignore b/.gitignore index 5cf4a3f..9322c75 100644 --- a/.gitignore +++ b/.gitignore @@ -52,5 +52,4 @@ cmd.txt bot_config.json scripts.json active_requests.json -domains.json working_proxies.json \ No newline at end of file diff --git a/StreamingCommunity/Util/config_json.py b/StreamingCommunity/Util/config_json.py index 08070cd..62f68a4 100644 --- a/StreamingCommunity/Util/config_json.py +++ b/StreamingCommunity/Util/config_json.py @@ -268,33 +268,32 @@ class ConfigManager: self._load_site_data_from_file() def _load_site_data_from_api(self) -> None: - """Load site data from API.""" + """Load site data from GitHub.""" + domains_github_url = "https://raw.githubusercontent.com/Arrowar/StreamingCommunity/refs/heads/main/.github/.domain/domains.json" headers = { - "apikey": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6Inp2Zm5ncG94d3Jnc3duenl0YWRoIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NDAxNTIxNjMsImV4cCI6MjA1NTcyODE2M30.FNTCCMwi0QaKjOu8gtZsT5yQttUW8QiDDGXmzkn89QE", - "Authorization": f"Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6Inp2Zm5ncG94d3Jnc3duenl0YWRoIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NDAxNTIxNjMsImV4cCI6MjA1NTcyODE2M30.FNTCCMwi0QaKjOu8gtZsT5yQttUW8QiDDGXmzkn89QE", - "Content-Type": "application/json", - "User-Agent": get_userAgent() + "User-Agent": get_userAgent() } try: - console.print("[bold cyan]Retrieving site data from API...[/bold cyan]") - response = requests.get("https://zvfngpoxwrgswnzytadh.supabase.co/rest/v1/public", timeout=8, headers=headers) + console.print(f"[bold cyan]Retrieving site data from GitHub:[/bold cyan] [green]{domains_github_url}[/green]") + response = requests.get(domains_github_url, timeout=8, headers=headers) if response.ok: - data = response.json() - if data and len(data) > 0: - self.configSite = data[0]['data'] - - site_count = len(self.configSite) if isinstance(self.configSite, dict) else 0 - - else: - console.print("[bold yellow]API returned an empty data set[/bold yellow]") + self.configSite = response.json() + + site_count = len(self.configSite) if isinstance(self.configSite, dict) else 0 + console.print(f"[bold green]Site data loaded from GitHub:[/bold green] {site_count} streaming services found.") + else: - console.print(f"[bold red]API request failed:[/bold red] HTTP {response.status_code}, {response.text[:100]}") + console.print(f"[bold red]GitHub request failed:[/bold red] HTTP {response.status_code}, {response.text[:100]}") self._handle_site_data_fallback() + except json.JSONDecodeError as e: + console.print(f"[bold red]Error parsing JSON from GitHub:[/bold red] {str(e)}") + self._handle_site_data_fallback() + except Exception as e: - console.print(f"[bold red]API connection error:[/bold red] {str(e)}") + console.print(f"[bold red]GitHub connection error:[/bold red] {str(e)}") self._handle_site_data_fallback() def _load_site_data_from_file(self) -> None: From 71e97c2c65ab52d9b5a89be3e7c8e5e2d8e279eb Mon Sep 17 00:00:00 2001 From: Lovi <62809003+Arrowar@users.noreply.github.com> Date: Sat, 31 May 2025 10:58:12 +0200 Subject: [PATCH 04/10] Site: Update endpoint --- .github/.site/js/script.js | 32 ++++++++------------------------ 1 file changed, 8 insertions(+), 24 deletions(-) diff --git a/.github/.site/js/script.js b/.github/.site/js/script.js index e89eb9f..727e297 100644 --- a/.github/.site/js/script.js +++ b/.github/.site/js/script.js @@ -113,43 +113,27 @@ async function checkSiteStatus(url, siteName) { } } -const supabaseUrl = 'https://zvfngpoxwrgswnzytadh.supabase.co'; -const supabaseKey = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6Inp2Zm5ncG94d3Jnc3duenl0YWRoIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NDAxNTIxNjMsImV4cCI6MjA1NTcyODE2M30.FNTCCMwi0QaKjOu8gtZsT5yQttUW8QiDDGXmzkn89QE'; +const domainsJsonUrl = 'https://raw.githubusercontent.com/Arrowar/StreamingCommunity/refs/heads/main/.github/.domain/domains.json'; async function loadSiteData() { try { - console.log('Starting to load site data...'); + console.log('Starting to load site data from GitHub...'); createStatusIndicator(); - updateStatusIndicator('Loading...', 'Fetching site data from database...', 0); + updateStatusIndicator('Loading...', 'Fetching site data from GitHub repository...', 0); const siteList = document.getElementById('site-list'); - const headers = { - 'accept': '*/*', - 'accept-language': 'it-IT,it;q=0.9,en-US;q=0.8,en;q=0.7', - 'apikey': supabaseKey, - 'authorization': `Bearer ${supabaseKey}`, - 'content-type': 'application/json', - 'cache-control': 'no-cache', - 'pragma': 'no-cache', - 'range': '0-9' - }; - - console.log('Fetching from Supabase with headers:', headers); - const response = await fetch(`${supabaseUrl}/rest/v1/public?select=*`, { - method: 'GET', - headers: headers - }); + console.log(`Fetching from GitHub: ${domainsJsonUrl}`); + const response = await fetch(domainsJsonUrl); if (!response.ok) throw new Error(`HTTP error! Status: ${response.status}`); - const data = await response.json(); + const configSite = await response.json(); // Directly get the site data object siteList.innerHTML = ''; - if (data && data.length > 0) { - const configSite = data[0].data; + if (configSite && Object.keys(configSite).length > 0) { // Check if configSite is a non-empty object totalSites = Object.keys(configSite).length; completedSites = 0; let latestUpdate = new Date(0); @@ -239,7 +223,7 @@ async function loadSiteData() { document.getElementById('last-update-time').textContent = formattedDate; } else { siteList.innerHTML = '

No sites available
'; - updateStatusIndicator('Ready', 'No sites found in database', 100); + updateStatusIndicator('Ready', 'No sites found in the JSON file.', 100); } } catch (error) { console.error('Errore:', error); From 884bcf656cad7fbd104f10e95104fb6b9fb82ff8 Mon Sep 17 00:00:00 2001 From: None <62809003+Arrowar@users.noreply.github.com> Date: Sat, 31 May 2025 10:59:11 +0200 Subject: [PATCH 05/10] Create update_domain.yml --- .github/workflows/update_domain.yml | 48 +++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 .github/workflows/update_domain.yml diff --git a/.github/workflows/update_domain.yml b/.github/workflows/update_domain.yml new file mode 100644 index 0000000..3d7a0bc --- /dev/null +++ b/.github/workflows/update_domain.yml @@ -0,0 +1,48 @@ +name: Aggiorna Domini Periodicamente + +on: + schedule: + - cron: "*/45 * * * *" + workflow_dispatch: + +jobs: + update-domains: + runs-on: ubuntu-latest + permissions: + contents: write + + steps: + - name: Checkout del codice + uses: actions/checkout@v4 + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: '3.12' + + - name: Installa dipendenze + run: pip install httpx ua-generator + + - name: Configura DNS + run: | + sudo sh -c 'echo "nameserver 9.9.9.9" > /etc/resolv.conf' + sudo sh -c 'echo "nameserver 149.112.112.122" >> /etc/resolv.conf' + cat /etc/resolv.conf + + - name: Esegui lo script di aggiornamento domini + run: python domain_updater.py + + - name: Commit e Push delle modifiche (se presenti) + run: | + git config --global user.name 'github-actions[bot]' + git config --global user.email 'github-actions[bot]@users.noreply.github.com' + + # Controlla se domain.json è stato modificato + if ! git diff --quiet domain.json; then + git add domain.json + git commit -m "Aggiornamento automatico domini [skip ci]" + echo "Modifiche committate. Tentativo di push..." + git push + else + echo "Nessuna modifica a domain.json da committare." + fi From 1776538c6c13ece38971ff79a62fc0ca44214cdf Mon Sep 17 00:00:00 2001 From: Lovi <62809003+Arrowar@users.noreply.github.com> Date: Sat, 31 May 2025 11:28:38 +0200 Subject: [PATCH 06/10] github: Update domains --- .github/.domain/domain_update.py | 310 ++++++++++++++-------------- .github/workflows/update_domain.yml | 39 ++-- 2 files changed, 180 insertions(+), 169 deletions(-) diff --git a/.github/.domain/domain_update.py b/.github/.domain/domain_update.py index e4be02d..937661f 100644 --- a/.github/.domain/domain_update.py +++ b/.github/.domain/domain_update.py @@ -1,17 +1,14 @@ # 20.04.2024 -import os import re -import time +import os import json from datetime import datetime from urllib.parse import urlparse, urlunparse - import httpx import ua_generator - JSON_FILE_PATH = os.path.join(".github", ".domain", "domains.json") @@ -50,69 +47,137 @@ def get_new_tld(full_url): return None -def try_url_with_retries(url_to_try, headers, timeout=15, retries=3, backoff_factor=0.5): - for attempt in range(retries): - try: - with httpx.Client(headers=headers, timeout=timeout, follow_redirects=True) as client: - response = client.get(url_to_try) - response.raise_for_status() - return response - - except (httpx.TimeoutException, httpx.ConnectError) as e: - print(f" [!] Attempt {attempt + 1}/{retries} for {url_to_try}: Network error ({type(e).__name__}). Retrying in {backoff_factor * (2 ** attempt)}s...") - if attempt + 1 == retries: - print(f" [!] Failed all {retries} attempts for {url_to_try} due to {type(e).__name__}.") - return None - time.sleep(backoff_factor * (2 ** attempt)) +def extract_domain_from_response(response, original_url): + if 'location' in response.headers: + return response.headers['location'] + + if str(response.url) != original_url: + return str(response.url) + + try: + content_type = response.headers.get('content-type', '').lower() + if 'text/html' in content_type or 'text/plain' in content_type: + response_text = response.text - except httpx.HTTPStatusError as http_err: - if http_err.response.status_code in [403, 429, 503]: - print(f" [!] HTTP error {http_err.response.status_code} for {url_to_try}. Suspected Cloudflare, checking for ...") - try: - with httpx.Client(headers=headers, timeout=timeout, follow_redirects=False) as cf_client: - cf_page_response = cf_client.get(url_to_try) - if cf_page_response.status_code != http_err.response.status_code and not (200 <= cf_page_response.status_code < 300) : - cf_page_response.raise_for_status() - - match = re.search(r': {base_href_url}") - try: - print(f" [] Attempting request to URL: {base_href_url}") - with httpx.Client(headers=headers, timeout=timeout, follow_redirects=True) as base_client: - final_response_from_base = base_client.get(base_href_url) - final_response_from_base.raise_for_status() - print(f" [+] Successfully fetched from URL.") - return final_response_from_base - - except httpx.RequestError as base_req_e: - print(f" [!] Error requesting URL {base_href_url}: {base_req_e}") - return None - - else: - print(f" [!] No found in page content for {url_to_try}.") - return None - - except httpx.RequestError as cf_req_e: - print(f" [!] Error fetching Cloudflare-like page content for {url_to_try}: {cf_req_e}") - return None - - else: - print(f" [!] HTTP error {http_err.response.status_code} for {url_to_try}. No retry.") - return None + js_redirect_patterns = [ + r'window\.location\.href\s*=\s*["\']([^"\']+)["\']', + r'window\.location\s*=\s*["\']([^"\']+)["\']', + r'location\.href\s*=\s*["\']([^"\']+)["\']', + r'document\.location\s*=\s*["\']([^"\']+)["\']' + ] - except httpx.RequestError as e: - print(f" [!] Generic error for {url_to_try}: {e}. No retry.") - return None - + for pattern in js_redirect_patterns: + js_match = re.search(pattern, response_text, re.IGNORECASE) + if js_match: + return js_match.group(1) + + meta_patterns = [ + r']*http-equiv=["\']?refresh["\']?[^>]*content=["\'][^"\']*url=([^"\'>\s]+)', + r']*content=["\'][^"\']*url=([^"\'>\s]+)[^>]*http-equiv=["\']?refresh["\']?' + ] + + for pattern in meta_patterns: + meta_match = re.search(pattern, response_text, re.IGNORECASE) + if meta_match: + return meta_match.group(1) + + canonical_match = re.search(r']*rel=["\']?canonical["\']?[^>]*href=["\']([^"\']+)["\']', response_text, re.IGNORECASE) + if canonical_match: + return canonical_match.group(1) + + base_match = re.search(r']*href=["\']([^"\']+)["\']', response_text, re.IGNORECASE) + if base_match: + return base_match.group(1) + + error_redirect_patterns = [ + r'[Rr]edirect(?:ed)?\s+to:?\s*([^\s<>"\']+)', + r'[Nn]ew\s+[Uu][Rr][Ll]:?\s*([^\s<>"\']+)', + r'[Mm]oved\s+to:?\s*([^\s<>"\']+)', + r'[Ff]ound\s+at:?\s*([^\s<>"\']+)' + ] + + for pattern in error_redirect_patterns: + error_match = re.search(pattern, response_text) + if error_match: + potential_url = error_match.group(1) + if potential_url.startswith(('http://', 'https://', '//')): + return potential_url + + except Exception as e: + print(f" [!] Error extracting from response content: {e}") + return None +def try_url(url_to_try, headers, timeout=15): + try: + with httpx.Client(headers=headers, timeout=timeout, follow_redirects=False) as client: + response = client.get(url_to_try) + + if response.status_code in [301, 302, 303, 307, 308]: + location = response.headers.get('location') + if location: + print(f" [+] Found redirect ({response.status_code}) to: {location}") + try: + final_response = client.get(location) + if 200 <= final_response.status_code < 400: + return final_response + else: + return httpx.Response( + status_code=200, + headers={"location": location}, + content=b"", + request=response.request + ) + except Exception: + return httpx.Response( + status_code=200, + headers={"location": location}, + content=b"", + request=response.request + ) + + elif response.status_code in [403, 409, 429, 503]: + print(f" [!] HTTP {response.status_code} - attempting to extract redirect info") + + location = response.headers.get('location') + if location: + print(f" [+] Found location header in error response: {location}") + return httpx.Response( + status_code=200, + headers={"location": location}, + content=b"", + request=response.request + ) + + new_url = extract_domain_from_response(response, url_to_try) + if new_url and new_url != url_to_try: + print(f" [+] Found redirect URL in error response content: {new_url}") + return httpx.Response( + status_code=200, + headers={"location": new_url}, + content=b"", + request=response.request + ) + + if 200 <= response.status_code < 400: + return response + + print(f" [!] HTTP {response.status_code} for {url_to_try}") + + except httpx.HTTPStatusError as http_err: + new_url = extract_domain_from_response(http_err.response, url_to_try) + if new_url: + print(f" [+] Found new URL from HTTPStatusError response: {new_url}") + return httpx.Response( + status_code=200, + headers={"location": new_url}, + content=b"", + request=http_err.request + ) + except Exception as e: + print(f" [!] Error for {url_to_try}: {type(e).__name__}") + + return None def update_domain_entries(data): if not data: @@ -135,100 +200,47 @@ def update_domain_entries(data): print(f" [] Stored URL: {original_full_url}") if original_domain_in_entry: print(f" [] Stored Domain (TLD): {original_domain_in_entry}") - - potential_urls_to_try = [] - potential_urls_to_try.append(("Original", original_full_url)) - - try: - parsed_original = urlparse(original_full_url) - - current_netloc = parsed_original.netloc - if current_netloc.startswith("www."): - varied_netloc = current_netloc[4:] - potential_urls_to_try.append(("Without www", urlunparse(parsed_original._replace(netloc=varied_netloc)))) - else: - varied_netloc = "www." + current_netloc - potential_urls_to_try.append(("With www", urlunparse(parsed_original._replace(netloc=varied_netloc)))) - - current_path = parsed_original.path - if not current_path: - potential_urls_to_try.append(("With trailing slash", urlunparse(parsed_original._replace(path='/')))) - elif current_path.endswith('/'): - potential_urls_to_try.append(("Without trailing slash", urlunparse(parsed_original._replace(path=current_path[:-1])))) - else: - potential_urls_to_try.append(("With trailing slash", urlunparse(parsed_original._replace(path=current_path + '/')))) - - except Exception as e: - print(f" [!] Error generating URL variations: {e}") - - entry_updated_in_this_run = False - seen_urls_for_entry = set() - unique_potential_urls = [] - for label, url_val in potential_urls_to_try: - if url_val not in seen_urls_for_entry: - unique_potential_urls.append((label, url_val)) - seen_urls_for_entry.add(url_val) - - parsed_original_for_http_check = urlparse(original_full_url) - if parsed_original_for_http_check.scheme == 'https': - http_url = urlunparse(parsed_original_for_http_check._replace(scheme='http')) - if http_url not in seen_urls_for_entry: - unique_potential_urls.append(("HTTP Fallback", http_url)) + print(f" [] Testing URL: {original_full_url}") + response = try_url(original_full_url, current_headers) - for label, url_to_check in unique_potential_urls: - if entry_updated_in_this_run: - break + if response: + final_url_from_request = str(response.url) + print(f" [+] Redirect/Response to: {final_url_from_request}") + + parsed_final_url = urlparse(final_url_from_request) + normalized_full_url = urlunparse(parsed_final_url._replace(path='/', params='', query='', fragment='')) + if parsed_final_url.path == '' and not normalized_full_url.endswith('/'): + normalized_full_url += '/' - print(f" [] Testing URL ({label}): {url_to_check}") - response = try_url_with_retries(url_to_check, current_headers) + if normalized_full_url != final_url_from_request: + print(f" [+] Normalized URL: {normalized_full_url}") - if response: - final_url_from_request = str(response.url) - print(f" [+] Redirect/Response to: {final_url_from_request}") - - parsed_final_url = urlparse(final_url_from_request) - normalized_full_url = urlunparse(parsed_final_url._replace(path='/', params='', query='', fragment='')) - if parsed_final_url.path == '' and not normalized_full_url.endswith('/'): - normalized_full_url += '/' + if normalized_full_url != original_full_url: + new_tld_val = get_new_tld(final_url_from_request) - if normalized_full_url != final_url_from_request: - print(f" [+] Normalized URL: {normalized_full_url}") - - if normalized_full_url != original_full_url: - new_tld_val = get_new_tld(final_url_from_request) + if new_tld_val: + entry["full_url"] = normalized_full_url - if new_tld_val: - entry["full_url"] = normalized_full_url - - if new_tld_val != original_domain_in_entry: - print(f" [-] Domain TLD Changed: '{original_domain_in_entry}' -> '{new_tld_val}'") - entry["old_domain"] = original_domain_in_entry if original_domain_in_entry else entry.get("old_domain", "") - entry["domain"] = new_tld_val - entry["time_change"] = datetime.now().strftime('%Y-%m-%d %H:%M:%S') - print(f" [-] Domain & URL Updated: New TLD '{new_tld_val}', New URL '{normalized_full_url}'") - - else: - entry["domain"] = new_tld_val - print(f" [-] URL Updated (TLD Unchanged '{new_tld_val}'): New URL '{normalized_full_url}'") - - updated_count += 1 - entry_updated_in_this_run = True - + if new_tld_val != original_domain_in_entry: + print(f" [-] Domain TLD Changed: '{original_domain_in_entry}' -> '{new_tld_val}'") + entry["old_domain"] = original_domain_in_entry if original_domain_in_entry else entry.get("old_domain", "") + entry["domain"] = new_tld_val + entry["time_change"] = datetime.now().strftime('%Y-%m-%d %H:%M:%S') + print(f" [-] Domain & URL Updated: New TLD '{new_tld_val}', New URL '{normalized_full_url}'") else: - print(f" [!] Could not extract TLD from {final_url_from_request}. URL not updated despite potential change.") + entry["domain"] = new_tld_val + print(f" [-] URL Updated (TLD Unchanged '{new_tld_val}'): New URL '{normalized_full_url}'") + + updated_count += 1 + else: - if final_url_from_request != original_full_url: - print(f" [] Same Domain (after normalization): {final_url_from_request} -> {normalized_full_url}") + print(f" [!] Could not extract TLD from {final_url_from_request}. URL not updated despite potential change.") + else: + print(f" [] Same Domain: {final_url_from_request}") - else: - print(f" [] Same Domain: {final_url_from_request}") - - if label == "Original" or normalized_full_url == original_full_url : - entry_updated_in_this_run = True - - if not entry_updated_in_this_run: - print(f" [-] No Update for {key} after {len(unique_potential_urls)} attempts.") + else: + print(f" [-] No response for {key}") return updated_count > 0 @@ -240,10 +252,8 @@ def main(): if update_domain_entries(domain_data): save_domains(JSON_FILE_PATH, domain_data) print("\nUpdate complete. Some entries were modified.") - else: print("\nUpdate complete. No domains were modified.") - else: print("\nCannot proceed without domain data.") diff --git a/.github/workflows/update_domain.yml b/.github/workflows/update_domain.yml index 3d7a0bc..231c795 100644 --- a/.github/workflows/update_domain.yml +++ b/.github/workflows/update_domain.yml @@ -1,8 +1,8 @@ -name: Aggiorna Domini Periodicamente +name: Update domains on: schedule: - - cron: "*/45 * * * *" + - cron: "0 */2 * * *" workflow_dispatch: jobs: @@ -12,37 +12,38 @@ jobs: contents: write steps: - - name: Checkout del codice + - name: Checkout code uses: actions/checkout@v4 - name: Setup Python uses: actions/setup-python@v5 with: - python-version: '3.12' + python-version: '3.12' + + - name: Install dependencies + run: | + pip install httpx ua-generator requests + pip install --upgrade pip setuptools wheel - - name: Installa dipendenze - run: pip install httpx ua-generator - - - name: Configura DNS + - name: Configure DNS run: | sudo sh -c 'echo "nameserver 9.9.9.9" > /etc/resolv.conf' - sudo sh -c 'echo "nameserver 149.112.112.122" >> /etc/resolv.conf' cat /etc/resolv.conf - - name: Esegui lo script di aggiornamento domini - run: python domain_updater.py + - name: Execute domain update script + run: python .github/.domain/domain_update.py - - name: Commit e Push delle modifiche (se presenti) + - name: Commit and push changes (if any) run: | git config --global user.name 'github-actions[bot]' git config --global user.email 'github-actions[bot]@users.noreply.github.com' - # Controlla se domain.json è stato modificato - if ! git diff --quiet domain.json; then - git add domain.json - git commit -m "Aggiornamento automatico domini [skip ci]" - echo "Modifiche committate. Tentativo di push..." + # Check if domains.json was modified + if ! git diff --quiet .github/.domain/domains.json; then + git add .github/.domain/domains.json + git commit -m "Automatic domain update [skip ci]" + echo "Changes committed. Attempting to push..." git push else - echo "Nessuna modifica a domain.json da committare." - fi + echo "No changes to .github/.domain/domains.json to commit." + fi \ No newline at end of file From 73cc2662b80a2852292d75f90f78ea002106fa3c Mon Sep 17 00:00:00 2001 From: Alessandro Perazzetta <482310+AlessandroPerazzetta@users.noreply.github.com> Date: Sat, 31 May 2025 11:30:59 +0200 Subject: [PATCH 07/10] Dns check refactor (#328) * refactor: streamline proxy checking in search function * refactor: update DNS check method, try a real dns resolution instead of checking dns provider * refactor: enhance DNS resolution check to support multiple domains across platforms * refactor: replace os.socket with socket for DNS resolution consistency --------- Co-authored-by: None <62809003+Arrowar@users.noreply.github.com> --- StreamingCommunity/Util/os.py | 74 +++++++++++++++++++++++------------ StreamingCommunity/run.py | 17 ++++++-- 2 files changed, 63 insertions(+), 28 deletions(-) diff --git a/StreamingCommunity/Util/os.py b/StreamingCommunity/Util/os.py index 5e35f03..2d8f7d1 100644 --- a/StreamingCommunity/Util/os.py +++ b/StreamingCommunity/Util/os.py @@ -12,7 +12,7 @@ import inspect import subprocess import contextlib import importlib.metadata - +import socket # External library from unidecode import unidecode @@ -283,37 +283,61 @@ class InternManager(): else: return f"{bytes / (1024 * 1024):.2f} MB/s" - def check_dns_provider(self): + # def check_dns_provider(self): + # """ + # Check if the system's current DNS server matches any known DNS providers. + + # Returns: + # bool: True if the current DNS server matches a known provider, + # False if no match is found or in case of errors + # """ + # dns_providers = { + # "Cloudflare": ["1.1.1.1", "1.0.0.1"], + # "Google": ["8.8.8.8", "8.8.4.4"], + # "OpenDNS": ["208.67.222.222", "208.67.220.220"], + # "Quad9": ["9.9.9.9", "149.112.112.112"], + # "AdGuard": ["94.140.14.14", "94.140.15.15"], + # "Comodo": ["8.26.56.26", "8.20.247.20"], + # "Level3": ["209.244.0.3", "209.244.0.4"], + # "Norton": ["199.85.126.10", "199.85.127.10"], + # "CleanBrowsing": ["185.228.168.9", "185.228.169.9"], + # "Yandex": ["77.88.8.8", "77.88.8.1"] + # } + + # try: + # resolver = dns.resolver.Resolver() + # nameservers = resolver.nameservers + + # if not nameservers: + # return False + + # for server in nameservers: + # for provider, ips in dns_providers.items(): + # if server in ips: + # return True + # return False + + # except Exception: + # return False + + def check_dns_resolve(self): """ - Check if the system's current DNS server matches any known DNS providers. + Check if the system's current DNS server can resolve a domain name. + Works on both Windows and Unix-like systems. Returns: - bool: True if the current DNS server matches a known provider, - False if no match is found or in case of errors + bool: True if the current DNS server can resolve a domain name, + False if can't resolve or in case of errors """ - dns_providers = { - "Cloudflare": ["1.1.1.1", "1.0.0.1"], - "Google": ["8.8.8.8", "8.8.4.4"], - "OpenDNS": ["208.67.222.222", "208.67.220.220"], - "Quad9": ["9.9.9.9", "149.112.112.112"], - } + test_domains = ["github.com", "google.com", "microsoft.com", "amazon.com"] try: - resolver = dns.resolver.Resolver() - nameservers = resolver.nameservers - - if not nameservers: - return False - - for server in nameservers: - for provider, ips in dns_providers.items(): - if server in ips: - return True + for domain in test_domains: + # socket.gethostbyname() works consistently across all platforms + socket.gethostbyname(domain) + return True + except (socket.gaierror, socket.error): return False - - except Exception: - return False - class OsSummary: def __init__(self): diff --git a/StreamingCommunity/run.py b/StreamingCommunity/run.py index e37bd74..2db8a86 100644 --- a/StreamingCommunity/run.py +++ b/StreamingCommunity/run.py @@ -210,7 +210,19 @@ def main(script_id = 0): log_not = Logger() initialize() - if not internet_manager.check_dns_provider(): + # if not internet_manager.check_dns_provider(): + # print() + # console.print("[red]❌ ERROR: DNS configuration is required!") + # console.print("[red]The program cannot function correctly without proper DNS settings.") + # console.print("[yellow]Please configure one of these DNS servers:") + # console.print("[blue]• Cloudflare (1.1.1.1) 'https://developers.cloudflare.com/1.1.1.1/setup/windows/'") + # console.print("[blue]• Quad9 (9.9.9.9) 'https://docs.quad9.net/Setup_Guides/Windows/Windows_10/'") + # console.print("\n[yellow]⚠️ The program will not work until you configure your DNS settings.") + + # time.sleep(2) + # msg.ask("[yellow]Press Enter to continue ...") + + if not internet_manager.check_dns_resolve(): print() console.print("[red]❌ ERROR: DNS configuration is required!") console.print("[red]The program cannot function correctly without proper DNS settings.") @@ -219,8 +231,7 @@ def main(script_id = 0): console.print("[blue]• Quad9 (9.9.9.9) 'https://docs.quad9.net/Setup_Guides/Windows/Windows_10/'") console.print("\n[yellow]⚠️ The program will not work until you configure your DNS settings.") - time.sleep(2) - msg.ask("[yellow]Press Enter to continue ...") + os._exit(0) # Load search functions search_functions = load_search_functions() From 4b40b8ce225b3ded1c670d171d87f2268de5d4e9 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sat, 31 May 2025 12:17:33 +0000 Subject: [PATCH 08/10] Automatic domain update [skip ci] --- .github/.domain/domains.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/.domain/domains.json b/.github/.domain/domains.json index a7f588a..f3ae14d 100644 --- a/.github/.domain/domains.json +++ b/.github/.domain/domains.json @@ -48,10 +48,10 @@ "time_change": "2025-05-26 23:22:45" }, "streamingcommunity": { - "domain": "blog", - "full_url": "https://streamingunity.blog/", - "old_domain": "to", - "time_change": "2025-05-31 10:45:55" + "domain": "bio", + "full_url": "https://streamingunity.bio/", + "old_domain": "blog", + "time_change": "2025-05-31 12:17:33" }, "altadefinizionegratis": { "domain": "icu", From a45fd0d37e85173d79e4486cc5a83b2fe96166c8 Mon Sep 17 00:00:00 2001 From: Alessandro Perazzetta <482310+AlessandroPerazzetta@users.noreply.github.com> Date: Sat, 31 May 2025 20:07:30 +0200 Subject: [PATCH 09/10] Dns check (#332) * refactor: streamline proxy checking in search function * refactor: update DNS check method, try a real dns resolution instead of checking dns provider * refactor: enhance DNS resolution check to support multiple domains across platforms * refactor: replace os.socket with socket for DNS resolution consistency --------- Co-authored-by: None <62809003+Arrowar@users.noreply.github.com> From b8e28a30c0a58ff74e7fbfab03cf03810421cd90 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sun, 1 Jun 2025 01:02:20 +0000 Subject: [PATCH 10/10] Automatic domain update [skip ci] --- .github/.domain/domains.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/.domain/domains.json b/.github/.domain/domains.json index f3ae14d..7cd57d0 100644 --- a/.github/.domain/domains.json +++ b/.github/.domain/domains.json @@ -6,10 +6,10 @@ "time_change": "2025-03-19 12:20:19" }, "cb01new": { - "domain": "download", - "full_url": "https://cb01net.download/", - "old_domain": "my", - "time_change": "2025-05-26 22:23:24" + "domain": "life", + "full_url": "https://cb01net.life/", + "old_domain": "download", + "time_change": "2025-06-01 01:02:16" }, "animeunity": { "domain": "so",