';
+ updateStatusIndicator('Ready', 'No sites found in the JSON file.', 100);
}
} catch (error) {
console.error('Errore:', error);
@@ -171,6 +233,10 @@ async function loadSiteData() {
`;
+ if (statusIndicator) {
+ updateStatusIndicator('Error', `Failed to load: ${error.message}`, 0);
+ statusIndicator.querySelector('.status-icon').style.background = '#f44336';
+ }
}
}
diff --git a/.github/workflows/update_domain.yml b/.github/workflows/update_domain.yml
new file mode 100644
index 0000000..231c795
--- /dev/null
+++ b/.github/workflows/update_domain.yml
@@ -0,0 +1,49 @@
+name: Update domains
+
+on:
+ schedule:
+ - cron: "0 */2 * * *"
+ workflow_dispatch:
+
+jobs:
+ update-domains:
+ runs-on: ubuntu-latest
+ permissions:
+ contents: write
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Setup Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: '3.12'
+
+ - name: Install dependencies
+ run: |
+ pip install httpx ua-generator requests
+ pip install --upgrade pip setuptools wheel
+
+ - name: Configure DNS
+ run: |
+ sudo sh -c 'echo "nameserver 9.9.9.9" > /etc/resolv.conf'
+ cat /etc/resolv.conf
+
+ - name: Execute domain update script
+ run: python .github/.domain/domain_update.py
+
+ - name: Commit and push changes (if any)
+ run: |
+ git config --global user.name 'github-actions[bot]'
+ git config --global user.email 'github-actions[bot]@users.noreply.github.com'
+
+ # Check if domains.json was modified
+ if ! git diff --quiet .github/.domain/domains.json; then
+ git add .github/.domain/domains.json
+ git commit -m "Automatic domain update [skip ci]"
+ echo "Changes committed. Attempting to push..."
+ git push
+ else
+ echo "No changes to .github/.domain/domains.json to commit."
+ fi
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index 5cf4a3f..9322c75 100644
--- a/.gitignore
+++ b/.gitignore
@@ -52,5 +52,4 @@ cmd.txt
bot_config.json
scripts.json
active_requests.json
-domains.json
working_proxies.json
\ No newline at end of file
diff --git a/README.md b/README.md
index 7ff53c4..0780427 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,5 @@
-
+
diff --git a/StreamingCommunity/Api/Site/animeworld/util/ScrapeSerie.py b/StreamingCommunity/Api/Site/animeworld/util/ScrapeSerie.py
index 0dd36a7..d46f635 100644
--- a/StreamingCommunity/Api/Site/animeworld/util/ScrapeSerie.py
+++ b/StreamingCommunity/Api/Site/animeworld/util/ScrapeSerie.py
@@ -31,7 +31,8 @@ class ScrapSerie:
self.client = httpx.Client(
cookies={"sessionId": self.session_id},
headers={"User-Agent": get_userAgent(), "csrf-token": self.csrf_token},
- base_url=full_url
+ base_url=full_url,
+ verify=False
)
try:
diff --git a/StreamingCommunity/Api/Site/raiplay/__init__.py b/StreamingCommunity/Api/Site/raiplay/__init__.py
index d1b7e23..816d753 100644
--- a/StreamingCommunity/Api/Site/raiplay/__init__.py
+++ b/StreamingCommunity/Api/Site/raiplay/__init__.py
@@ -21,7 +21,7 @@ from .film import download_film
# Variable
indice = 5
_useFor = "Film_&_Serie"
-_priority = 1 # NOTE: Site search need the use of tmbd obj
+_priority = 0
_engineDownload = "hls"
_deprecate = False
diff --git a/StreamingCommunity/Api/Site/raiplay/site.py b/StreamingCommunity/Api/Site/raiplay/site.py
index c4a4b1e..ef95cbc 100644
--- a/StreamingCommunity/Api/Site/raiplay/site.py
+++ b/StreamingCommunity/Api/Site/raiplay/site.py
@@ -1,9 +1,5 @@
# 21.05.24
-import threading
-import queue
-
-
# External libraries
import httpx
from rich.console import Console
@@ -13,12 +9,9 @@ from rich.console import Console
from StreamingCommunity.Util.config_json import config_manager
from StreamingCommunity.Util.headers import get_userAgent
from StreamingCommunity.Util.table import TVShowManager
-from StreamingCommunity.Lib.TMBD.tmdb import tmdb
-
-
-# Logic class
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaManager
+from .util.ScrapeSerie import GetSerieInfo
# Variable
@@ -26,76 +19,33 @@ console = Console()
media_search_manager = MediaManager()
table_show_manager = TVShowManager()
max_timeout = config_manager.get_int("REQUESTS", "timeout")
-MAX_THREADS = 12
-def determine_media_type(title):
+def determine_media_type(item):
"""
- Use TMDB to determine if a title is a movie or TV show.
+ Determine if the item is a film or TV series by checking actual seasons count
+ using GetSerieInfo.
"""
try:
- # First search as a movie
- movie_results = tmdb._make_request("search/movie", {"query": title})
- movie_count = len(movie_results.get("results", []))
-
- # Then search as a TV show
- tv_results = tmdb._make_request("search/tv", {"query": title})
- tv_count = len(tv_results.get("results", []))
-
- # If results found in only one category, use that
- if movie_count > 0 and tv_count == 0:
- return "film"
- elif tv_count > 0 and movie_count == 0:
- return "tv"
-
- # If both have results, compare popularity
- if movie_count > 0 and tv_count > 0:
- top_movie = movie_results["results"][0]
- top_tv = tv_results["results"][0]
-
- return "film" if top_movie.get("popularity", 0) > top_tv.get("popularity", 0) else "tv"
+ # Extract program name from path_id
+ program_name = None
+ if item.get('path_id'):
+ parts = item['path_id'].strip('/').split('/')
+ if len(parts) >= 2:
+ program_name = parts[-1].split('.')[0]
- return "film"
+ if not program_name:
+ return "film"
+
+ scraper = GetSerieInfo(program_name)
+ scraper.collect_info_title()
+ return "tv" if scraper.getNumberSeason() > 0 else "film"
except Exception as e:
- console.log(f"Error determining media type with TMDB: {e}")
+ console.print(f"[red]Error determining media type: {e}[/red]")
return "film"
-def worker_determine_type(work_queue, result_dict, worker_id):
- """
- Worker function to process items from queue and determine media types.
-
- Parameters:
- - work_queue: Queue containing items to process
- - result_dict: Dictionary to store results
- - worker_id: ID of the worker thread
- """
- while not work_queue.empty():
- try:
- index, item = work_queue.get(block=False)
- title = item.get('titolo', '')
- media_type = determine_media_type(title)
-
- result_dict[index] = {
- 'id': item.get('id', ''),
- 'name': title,
- 'type': media_type,
- 'path_id': item.get('path_id', ''),
- 'url': f"https://www.raiplay.it{item.get('url', '')}",
- 'image': f"https://www.raiplay.it{item.get('immagine', '')}",
- }
-
- work_queue.task_done()
-
- except queue.Empty:
- break
-
- except Exception as e:
- console.log(f"Worker {worker_id} error: {e}")
- work_queue.task_done()
-
-
def title_search(query: str) -> int:
"""
Search for titles based on a search query.
@@ -141,33 +91,15 @@ def title_search(query: str) -> int:
data = response.json().get('agg').get('titoli').get('cards')
data = data[:15] if len(data) > 15 else data
- # Use multithreading to determine media types in parallel
- work_queue = queue.Queue()
- result_dict = {}
-
- # Add items to the work queue
- for i, item in enumerate(data):
- work_queue.put((i, item))
-
- # Create and start worker threads
- threads = []
- for i in range(min(MAX_THREADS, len(data))):
- thread = threading.Thread(
- target=worker_determine_type,
- args=(work_queue, result_dict, i),
- daemon=True
- )
- threads.append(thread)
- thread.start()
-
- # Wait for all threads to complete
- for thread in threads:
- thread.join()
-
- # Add all results to media manager in correct order
- for i in range(len(data)):
- if i in result_dict:
- media_search_manager.add_media(result_dict[i])
+ # Process each item and add to media manager
+ for item in data:
+ media_search_manager.add_media({
+ 'id': item.get('id', ''),
+ 'name': item.get('titolo', ''),
+ 'type': determine_media_type(item),
+ 'path_id': item.get('path_id', ''),
+ 'url': f"https://www.raiplay.it{item.get('url', '')}",
+ 'image': f"https://www.raiplay.it{item.get('immagine', '')}",
+ })
- # Return the number of titles found
return media_search_manager.get_length()
\ No newline at end of file
diff --git a/StreamingCommunity/Api/Site/raiplay/util/ScrapeSerie.py b/StreamingCommunity/Api/Site/raiplay/util/ScrapeSerie.py
index d54ec1f..b7bd863 100644
--- a/StreamingCommunity/Api/Site/raiplay/util/ScrapeSerie.py
+++ b/StreamingCommunity/Api/Site/raiplay/util/ScrapeSerie.py
@@ -30,28 +30,48 @@ class GetSerieInfo:
try:
program_url = f"{self.base_url}/programmi/{self.program_name}.json"
response = httpx.get(url=program_url, headers=get_headers(), timeout=max_timeout)
+
+ # If 404, content is not yet available
+ if response.status_code == 404:
+ logging.info(f"Content not yet available: {self.program_name}")
+ return
+
response.raise_for_status()
-
json_data = response.json()
# Look for seasons in the 'blocks' property
- for block in json_data.get('blocks'):
- if block.get('type') == 'RaiPlay Multimedia Block' and block.get('name', '').lower() == 'episodi':
- self.publishing_block_id = block.get('id')
-
- # Extract seasons from sets array
- for season_set in block.get('sets', []):
- if 'stagione' in season_set.get('name', '').lower():
- self.seasons_manager.add_season({
- 'id': season_set.get('id', ''),
- 'number': len(self.seasons_manager.seasons) + 1,
- 'name': season_set.get('name', ''),
- 'path': season_set.get('path_id', ''),
- 'episodes_count': season_set.get('episode_size', {}).get('number', 0)
- })
+ for block in json_data.get('blocks', []):
- except Exception as e:
+ # Check if block is a season block or episodi block
+ if block.get('type') == 'RaiPlay Multimedia Block':
+ if block.get('name', '').lower() == 'episodi':
+ self.publishing_block_id = block.get('id')
+
+ # Extract seasons from sets array
+ for season_set in block.get('sets', []):
+ if 'stagione' in season_set.get('name', '').lower():
+ self._add_season(season_set, block.get('id'))
+
+ elif 'stagione' in block.get('name', '').lower():
+ self.publishing_block_id = block.get('id')
+
+ # Extract season directly from block's sets
+ for season_set in block.get('sets', []):
+ self._add_season(season_set, block.get('id'))
+
+ except httpx.HTTPError as e:
logging.error(f"Error collecting series info: {e}")
+ except Exception as e:
+ logging.error(f"Unexpected error collecting series info: {e}")
+
+ def _add_season(self, season_set: dict, block_id: str):
+ self.seasons_manager.add_season({
+ 'id': season_set.get('id', ''),
+ 'number': len(self.seasons_manager.seasons) + 1,
+ 'name': season_set.get('name', ''),
+ 'path': season_set.get('path_id', ''),
+ 'episodes_count': season_set.get('episode_size', {}).get('number', 0)
+ })
def collect_info_season(self, number_season: int) -> None:
"""Get episodes for a specific season."""
diff --git a/StreamingCommunity/Upload/version.py b/StreamingCommunity/Upload/version.py
index a2df4df..535de5b 100644
--- a/StreamingCommunity/Upload/version.py
+++ b/StreamingCommunity/Upload/version.py
@@ -1,5 +1,5 @@
__title__ = 'StreamingCommunity'
-__version__ = '3.0.7'
+__version__ = '3.0.8'
__author__ = 'Arrowar'
__description__ = 'A command-line program to download film'
__copyright__ = 'Copyright 2024'
diff --git a/StreamingCommunity/Util/config_json.py b/StreamingCommunity/Util/config_json.py
index bea1edc..62f68a4 100644
--- a/StreamingCommunity/Util/config_json.py
+++ b/StreamingCommunity/Util/config_json.py
@@ -39,9 +39,6 @@ class ConfigManager:
# Get the actual path of the module file
current_file_path = os.path.abspath(__file__)
- # Navigate upwards to find the project root
- # Assuming this file is in a package structure like StreamingCommunity/Util/config_json.py
- # We need to go up 2 levels to reach the project root
base_path = os.path.dirname(os.path.dirname(os.path.dirname(current_file_path)))
# Initialize file paths
@@ -271,33 +268,32 @@ class ConfigManager:
self._load_site_data_from_file()
def _load_site_data_from_api(self) -> None:
- """Load site data from API."""
+ """Load site data from GitHub."""
+ domains_github_url = "https://raw.githubusercontent.com/Arrowar/StreamingCommunity/refs/heads/main/.github/.domain/domains.json"
headers = {
- "apikey": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6Inp2Zm5ncG94d3Jnc3duenl0YWRoIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NDAxNTIxNjMsImV4cCI6MjA1NTcyODE2M30.FNTCCMwi0QaKjOu8gtZsT5yQttUW8QiDDGXmzkn89QE",
- "Authorization": f"Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6Inp2Zm5ncG94d3Jnc3duenl0YWRoIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NDAxNTIxNjMsImV4cCI6MjA1NTcyODE2M30.FNTCCMwi0QaKjOu8gtZsT5yQttUW8QiDDGXmzkn89QE",
- "Content-Type": "application/json",
- "User-Agent": get_userAgent()
+ "User-Agent": get_userAgent()
}
try:
- console.print("[bold cyan]Retrieving site data from API...[/bold cyan]")
- response = requests.get("https://zvfngpoxwrgswnzytadh.supabase.co/rest/v1/public", timeout=8, headers=headers)
+ console.print(f"[bold cyan]Retrieving site data from GitHub:[/bold cyan] [green]{domains_github_url}[/green]")
+ response = requests.get(domains_github_url, timeout=8, headers=headers)
if response.ok:
- data = response.json()
- if data and len(data) > 0:
- self.configSite = data[0]['data']
-
- site_count = len(self.configSite) if isinstance(self.configSite, dict) else 0
-
- else:
- console.print("[bold yellow]API returned an empty data set[/bold yellow]")
+ self.configSite = response.json()
+
+ site_count = len(self.configSite) if isinstance(self.configSite, dict) else 0
+ console.print(f"[bold green]Site data loaded from GitHub:[/bold green] {site_count} streaming services found.")
+
else:
- console.print(f"[bold red]API request failed:[/bold red] HTTP {response.status_code}, {response.text[:100]}")
+ console.print(f"[bold red]GitHub request failed:[/bold red] HTTP {response.status_code}, {response.text[:100]}")
self._handle_site_data_fallback()
+ except json.JSONDecodeError as e:
+ console.print(f"[bold red]Error parsing JSON from GitHub:[/bold red] {str(e)}")
+ self._handle_site_data_fallback()
+
except Exception as e:
- console.print(f"[bold red]API connection error:[/bold red] {str(e)}")
+ console.print(f"[bold red]GitHub connection error:[/bold red] {str(e)}")
self._handle_site_data_fallback()
def _load_site_data_from_file(self) -> None:
@@ -562,7 +558,6 @@ class ConfigManager:
return section in config_source
-# Helper function to check the platform
def get_use_large_bar():
"""
Determine if the large bar feature should be enabled.
diff --git a/StreamingCommunity/Util/os.py b/StreamingCommunity/Util/os.py
index 87e8249..f98ba46 100644
--- a/StreamingCommunity/Util/os.py
+++ b/StreamingCommunity/Util/os.py
@@ -12,7 +12,7 @@ import inspect
import subprocess
import contextlib
import importlib.metadata
-
+import socket
# External library
from unidecode import unidecode
@@ -323,11 +323,24 @@ class InternManager():
def check_dns_resolve(self):
"""
Check if the system's current DNS server can resolve a domain name.
+ Works on both Windows and Unix-like systems.
Returns:
bool: True if the current DNS server can resolve a domain name,
False if can't resolve or in case of errors
+ bool: True if the current DNS server can resolve a domain name,
+ False if can't resolve or in case of errors
"""
+ test_domains = ["github.com", "google.com", "microsoft.com", "amazon.com"]
+
+ try:
+ for domain in test_domains:
+ # socket.gethostbyname() works consistently across all platforms
+ socket.gethostbyname(domain)
+ return True
+ except (socket.gaierror, socket.error):
+ return False
+
try:
resolver = dns.resolver.Resolver()
# Simple DNS resolution test - will raise an exception if it fails
diff --git a/setup.py b/setup.py
index 5a63b87..1fe021a 100644
--- a/setup.py
+++ b/setup.py
@@ -10,7 +10,7 @@ with open(os.path.join(os.path.dirname(__file__), "requirements.txt"), "r", enco
setup(
name="StreamingCommunity",
- version="3.0.7",
+ version="3.0.8",
long_description=read_readme(),
long_description_content_type="text/markdown",
author="Lovi-0",