diff --git a/.github/.domain/domain_update.py b/.github/.domain/domain_update.py
new file mode 100644
index 0000000..937661f
--- /dev/null
+++ b/.github/.domain/domain_update.py
@@ -0,0 +1,263 @@
+# 20.04.2024
+
+import re
+import os
+import json
+from datetime import datetime
+from urllib.parse import urlparse, urlunparse
+
+import httpx
+import ua_generator
+
+JSON_FILE_PATH = os.path.join(".github", ".domain", "domains.json")
+
+
+def load_domains(file_path):
+ if not os.path.exists(file_path):
+ print(f"Error: The file {file_path} was not found.")
+ return None
+
+ try:
+ with open(file_path, 'r', encoding='utf-8') as f:
+ return json.load(f)
+
+ except Exception as e:
+ print(f"Error reading the file {file_path}: {e}")
+ return None
+
+def save_domains(file_path, data):
+ try:
+ with open(file_path, 'w', encoding='utf-8') as f:
+ json.dump(data, f, indent=2, ensure_ascii=False)
+ print(f"Data successfully saved to {file_path}")
+
+ except Exception as e:
+ print(f"Error saving the file {file_path}: {e}")
+
+def get_new_tld(full_url):
+ try:
+ parsed_url = urlparse(full_url)
+ hostname = parsed_url.hostname
+ if hostname:
+ parts = hostname.split('.')
+ return parts[-1]
+
+ except Exception:
+ pass
+
+ return None
+
+def extract_domain_from_response(response, original_url):
+ if 'location' in response.headers:
+ return response.headers['location']
+
+ if str(response.url) != original_url:
+ return str(response.url)
+
+ try:
+ content_type = response.headers.get('content-type', '').lower()
+ if 'text/html' in content_type or 'text/plain' in content_type:
+ response_text = response.text
+
+ js_redirect_patterns = [
+ r'window\.location\.href\s*=\s*["\']([^"\']+)["\']',
+ r'window\.location\s*=\s*["\']([^"\']+)["\']',
+ r'location\.href\s*=\s*["\']([^"\']+)["\']',
+ r'document\.location\s*=\s*["\']([^"\']+)["\']'
+ ]
+
+ for pattern in js_redirect_patterns:
+ js_match = re.search(pattern, response_text, re.IGNORECASE)
+ if js_match:
+ return js_match.group(1)
+
+ meta_patterns = [
+ r']*http-equiv=["\']?refresh["\']?[^>]*content=["\'][^"\']*url=([^"\'>\s]+)',
+ r']*content=["\'][^"\']*url=([^"\'>\s]+)[^>]*http-equiv=["\']?refresh["\']?'
+ ]
+
+ for pattern in meta_patterns:
+ meta_match = re.search(pattern, response_text, re.IGNORECASE)
+ if meta_match:
+ return meta_match.group(1)
+
+ canonical_match = re.search(r']*rel=["\']?canonical["\']?[^>]*href=["\']([^"\']+)["\']', response_text, re.IGNORECASE)
+ if canonical_match:
+ return canonical_match.group(1)
+
+ base_match = re.search(r']*href=["\']([^"\']+)["\']', response_text, re.IGNORECASE)
+ if base_match:
+ return base_match.group(1)
+
+ error_redirect_patterns = [
+ r'[Rr]edirect(?:ed)?\s+to:?\s*([^\s<>"\']+)',
+ r'[Nn]ew\s+[Uu][Rr][Ll]:?\s*([^\s<>"\']+)',
+ r'[Mm]oved\s+to:?\s*([^\s<>"\']+)',
+ r'[Ff]ound\s+at:?\s*([^\s<>"\']+)'
+ ]
+
+ for pattern in error_redirect_patterns:
+ error_match = re.search(pattern, response_text)
+ if error_match:
+ potential_url = error_match.group(1)
+ if potential_url.startswith(('http://', 'https://', '//')):
+ return potential_url
+
+ except Exception as e:
+ print(f" [!] Error extracting from response content: {e}")
+
+ return None
+
+def try_url(url_to_try, headers, timeout=15):
+ try:
+ with httpx.Client(headers=headers, timeout=timeout, follow_redirects=False) as client:
+ response = client.get(url_to_try)
+
+ if response.status_code in [301, 302, 303, 307, 308]:
+ location = response.headers.get('location')
+ if location:
+ print(f" [+] Found redirect ({response.status_code}) to: {location}")
+ try:
+ final_response = client.get(location)
+ if 200 <= final_response.status_code < 400:
+ return final_response
+ else:
+ return httpx.Response(
+ status_code=200,
+ headers={"location": location},
+ content=b"",
+ request=response.request
+ )
+ except Exception:
+ return httpx.Response(
+ status_code=200,
+ headers={"location": location},
+ content=b"",
+ request=response.request
+ )
+
+ elif response.status_code in [403, 409, 429, 503]:
+ print(f" [!] HTTP {response.status_code} - attempting to extract redirect info")
+
+ location = response.headers.get('location')
+ if location:
+ print(f" [+] Found location header in error response: {location}")
+ return httpx.Response(
+ status_code=200,
+ headers={"location": location},
+ content=b"",
+ request=response.request
+ )
+
+ new_url = extract_domain_from_response(response, url_to_try)
+ if new_url and new_url != url_to_try:
+ print(f" [+] Found redirect URL in error response content: {new_url}")
+ return httpx.Response(
+ status_code=200,
+ headers={"location": new_url},
+ content=b"",
+ request=response.request
+ )
+
+ if 200 <= response.status_code < 400:
+ return response
+
+ print(f" [!] HTTP {response.status_code} for {url_to_try}")
+
+ except httpx.HTTPStatusError as http_err:
+ new_url = extract_domain_from_response(http_err.response, url_to_try)
+ if new_url:
+ print(f" [+] Found new URL from HTTPStatusError response: {new_url}")
+ return httpx.Response(
+ status_code=200,
+ headers={"location": new_url},
+ content=b"",
+ request=http_err.request
+ )
+ except Exception as e:
+ print(f" [!] Error for {url_to_try}: {type(e).__name__}")
+
+ return None
+
+def update_domain_entries(data):
+ if not data:
+ return False
+
+ updated_count = 0
+
+ for key, entry in data.items():
+ print(f"\n--- [DOMAIN] {key} ---")
+ original_full_url = entry.get("full_url")
+ original_domain_in_entry = entry.get("domain")
+
+ if not original_full_url:
+ print(f" [!] 'full_url' missing. Skipped.")
+ continue
+
+ ua = ua_generator.generate(device=('desktop', 'mobile'), browser=('chrome', 'edge', 'firefox', 'safari'))
+ current_headers = ua.headers.get()
+
+ print(f" [] Stored URL: {original_full_url}")
+ if original_domain_in_entry:
+ print(f" [] Stored Domain (TLD): {original_domain_in_entry}")
+
+ print(f" [] Testing URL: {original_full_url}")
+ response = try_url(original_full_url, current_headers)
+
+ if response:
+ final_url_from_request = str(response.url)
+ print(f" [+] Redirect/Response to: {final_url_from_request}")
+
+ parsed_final_url = urlparse(final_url_from_request)
+ normalized_full_url = urlunparse(parsed_final_url._replace(path='/', params='', query='', fragment=''))
+ if parsed_final_url.path == '' and not normalized_full_url.endswith('/'):
+ normalized_full_url += '/'
+
+ if normalized_full_url != final_url_from_request:
+ print(f" [+] Normalized URL: {normalized_full_url}")
+
+ if normalized_full_url != original_full_url:
+ new_tld_val = get_new_tld(final_url_from_request)
+
+ if new_tld_val:
+ entry["full_url"] = normalized_full_url
+
+ if new_tld_val != original_domain_in_entry:
+ print(f" [-] Domain TLD Changed: '{original_domain_in_entry}' -> '{new_tld_val}'")
+ entry["old_domain"] = original_domain_in_entry if original_domain_in_entry else entry.get("old_domain", "")
+ entry["domain"] = new_tld_val
+ entry["time_change"] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
+ print(f" [-] Domain & URL Updated: New TLD '{new_tld_val}', New URL '{normalized_full_url}'")
+ else:
+ entry["domain"] = new_tld_val
+ print(f" [-] URL Updated (TLD Unchanged '{new_tld_val}'): New URL '{normalized_full_url}'")
+
+ updated_count += 1
+
+ else:
+ print(f" [!] Could not extract TLD from {final_url_from_request}. URL not updated despite potential change.")
+ else:
+ print(f" [] Same Domain: {final_url_from_request}")
+
+ else:
+ print(f" [-] No response for {key}")
+
+ return updated_count > 0
+
+def main():
+ print("Starting domain update script...")
+ domain_data = load_domains(JSON_FILE_PATH)
+
+ if domain_data:
+ if update_domain_entries(domain_data):
+ save_domains(JSON_FILE_PATH, domain_data)
+ print("\nUpdate complete. Some entries were modified.")
+ else:
+ print("\nUpdate complete. No domains were modified.")
+ else:
+ print("\nCannot proceed without domain data.")
+
+ print("Script finished.")
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/.github/.domain/domains.json b/.github/.domain/domains.json
new file mode 100644
index 0000000..a7f588a
--- /dev/null
+++ b/.github/.domain/domains.json
@@ -0,0 +1,62 @@
+{
+ "1337xx": {
+ "domain": "to",
+ "full_url": "https://www.1337xx.to/",
+ "old_domain": "to",
+ "time_change": "2025-03-19 12:20:19"
+ },
+ "cb01new": {
+ "domain": "download",
+ "full_url": "https://cb01net.download/",
+ "old_domain": "my",
+ "time_change": "2025-05-26 22:23:24"
+ },
+ "animeunity": {
+ "domain": "so",
+ "full_url": "https://www.animeunity.so/",
+ "old_domain": "so",
+ "time_change": "2025-03-19 12:20:23"
+ },
+ "animeworld": {
+ "domain": "ac",
+ "full_url": "https://www.animeworld.ac/",
+ "old_domain": "ac",
+ "time_change": "2025-03-21 12:20:27"
+ },
+ "guardaserie": {
+ "domain": "meme",
+ "full_url": "https://guardaserie.meme/",
+ "old_domain": "meme",
+ "time_change": "2025-03-19 12:20:24"
+ },
+ "ddlstreamitaly": {
+ "domain": "co",
+ "full_url": "https://ddlstreamitaly.co/",
+ "old_domain": "co",
+ "time_change": "2025-03-19 12:20:26"
+ },
+ "streamingwatch": {
+ "domain": "org",
+ "full_url": "https://www.streamingwatch.org/",
+ "old_domain": "org",
+ "time_change": "2025-04-29 12:30:30"
+ },
+ "altadefinizione": {
+ "domain": "spa",
+ "full_url": "https://altadefinizione.spa/",
+ "old_domain": "locker",
+ "time_change": "2025-05-26 23:22:45"
+ },
+ "streamingcommunity": {
+ "domain": "blog",
+ "full_url": "https://streamingunity.blog/",
+ "old_domain": "to",
+ "time_change": "2025-05-31 10:45:55"
+ },
+ "altadefinizionegratis": {
+ "domain": "icu",
+ "full_url": "https://altadefinizionegratis.icu/",
+ "old_domain": "taipei",
+ "time_change": "2025-05-18 11:21:05"
+ }
+}
\ No newline at end of file
diff --git a/.github/.site/css/style.css b/.github/.site/css/style.css
index 35d95af..33d50fc 100644
--- a/.github/.site/css/style.css
+++ b/.github/.site/css/style.css
@@ -38,14 +38,11 @@ body {
flex-direction: column;
}
-header {
- background-color: var(--header-bg);
- backdrop-filter: blur(10px);
- position: fixed;
- width: 100%;
- padding: 15px 0;
- z-index: 1000;
- box-shadow: 0 2px 12px var(--shadow-color);
+.container {
+ max-width: 1400px;
+ margin: 0 auto;
+ padding: 20px;
+ flex: 1;
}
.header-container {
@@ -88,13 +85,6 @@ header {
font-size: 1.1rem;
}
-.container {
- max-width: 1400px;
- margin: 0 auto;
- padding: 20px;
- flex: 1;
-}
-
.site-grid {
display: grid;
grid-template-columns: repeat(auto-fill, minmax(300px, 1fr));
@@ -166,78 +156,6 @@ header {
color: var(--accent-color);
}
-.site-content {
- text-align: center;
- width: 100%;
-}
-
-.domain {
- color: var(--text-color);
- opacity: 0.8;
- font-size: 0.9rem;
- margin-bottom: 1.5rem;
- word-break: break-all;
-}
-
-.site-item a {
- margin-top: 1rem;
- background: linear-gradient(135deg, var(--primary-color), var(--secondary-color));
- color: white;
- text-decoration: none;
- font-weight: 500;
- padding: 12px 28px;
- border-radius: 8px;
- width: fit-content;
- margin: 0 auto;
- display: flex;
- align-items: center;
- gap: 8px;
-}
-
-.site-item a:hover {
- opacity: 0.9;
- transform: translateY(-2px);
-}
-
-.site-title {
- opacity: 0;
- position: absolute;
- top: 50%;
- left: 50%;
- transform: translate(-50%, -50%);
- background: rgba(0, 0, 0, 0.8);
- padding: 10px 20px;
- border-radius: 8px;
- transition: opacity 0.3s ease;
- color: white;
- font-size: 1.2rem;
- text-align: center;
- width: 80%;
- pointer-events: none;
- z-index: 2;
-}
-
-.site-item:hover .site-title {
- opacity: 1;
-}
-
-.site-item::after {
- content: '';
- position: absolute;
- top: 0;
- left: 0;
- right: 0;
- bottom: 0;
- background: rgba(0, 0, 0, 0.5);
- opacity: 0;
- transition: opacity 0.3s ease;
- pointer-events: none;
-}
-
-.site-item:hover::after {
- opacity: 1;
-}
-
.site-info {
display: flex;
flex-direction: column;
@@ -264,6 +182,211 @@ header {
opacity: 1;
}
+.site-status {
+ position: absolute;
+ top: 10px;
+ right: 10px;
+ width: 12px;
+ height: 12px;
+ border-radius: 50%;
+ background: #4CAF50;
+}
+
+.site-status.offline {
+ background: #f44336;
+}
+
+.status-indicator {
+ position: fixed;
+ top: 20px;
+ right: 20px;
+ background: var(--card-background);
+ border: 1px solid var(--border-color);
+ border-radius: 12px;
+ padding: 15px 20px;
+ box-shadow: 0 4px 20px var(--shadow-color);
+ z-index: 1001;
+ min-width: 280px;
+ max-width: 400px;
+ transition: all 0.3s ease;
+}
+
+.status-indicator.hidden {
+ opacity: 0;
+ transform: translateY(-20px);
+ pointer-events: none;
+}
+
+.status-header {
+ display: flex;
+ align-items: center;
+ gap: 10px;
+ margin-bottom: 15px;
+ font-weight: 600;
+ color: var(--primary-color);
+}
+
+.status-icon {
+ width: 20px;
+ height: 20px;
+ border: 2px solid var(--primary-color);
+ border-radius: 50%;
+ border-top-color: transparent;
+ animation: spin 1s linear infinite;
+}
+
+.status-icon.ready {
+ border: none;
+ background: #4CAF50;
+ animation: none;
+ position: relative;
+}
+
+.status-icon.ready::after {
+ content: '✓';
+ position: absolute;
+ top: 50%;
+ left: 50%;
+ transform: translate(-50%, -50%);
+ color: white;
+ font-size: 12px;
+ font-weight: bold;
+}
+
+@keyframes spin {
+ 0% { transform: rotate(0deg); }
+ 100% { transform: rotate(360deg); }
+}
+
+.status-text {
+ color: var(--text-color);
+ font-size: 0.9rem;
+ margin-bottom: 10px;
+}
+
+.checking-sites {
+ max-height: 200px;
+ overflow-y: auto;
+ background: var(--background-color);
+ border-radius: 8px;
+ padding: 10px;
+ border: 1px solid var(--border-color);
+}
+
+.checking-site {
+ display: flex;
+ align-items: center;
+ justify-content: between;
+ gap: 10px;
+ padding: 6px 8px;
+ margin-bottom: 4px;
+ border-radius: 6px;
+ background: var(--card-background);
+ font-size: 0.8rem;
+ color: var(--text-color);
+ transition: all 0.2s ease;
+}
+
+.checking-site.completed {
+ opacity: 0.6;
+ background: var(--card-hover);
+}
+
+.checking-site.online {
+ border-left: 3px solid #4CAF50;
+}
+
+.checking-site.offline {
+ border-left: 3px solid #f44336;
+}
+
+.checking-site .site-name {
+ flex: 1;
+ font-weight: 500;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ white-space: nowrap;
+}
+
+.checking-site .site-status-icon {
+ width: 12px;
+ height: 12px;
+ border-radius: 50%;
+ flex-shrink: 0;
+}
+
+.checking-site .site-status-icon.checking {
+ background: var(--primary-color);
+ animation: pulse 1s infinite;
+}
+
+.checking-site .site-status-icon.online {
+ background: #4CAF50;
+}
+
+.checking-site .site-status-icon.offline {
+ background: #f44336;
+}
+
+@keyframes pulse {
+ 0%, 100% { opacity: 1; }
+ 50% { opacity: 0.5; }
+}
+
+.progress-bar {
+ width: 100%;
+ height: 6px;
+ background: var(--background-color);
+ border-radius: 3px;
+ overflow: hidden;
+ margin-top: 10px;
+}
+
+.progress-fill {
+ height: 100%;
+ background: linear-gradient(90deg, var(--primary-color), var(--accent-color));
+ width: 0%;
+ transition: width 0.3s ease;
+ border-radius: 3px;
+}
+
+.loader {
+ width: 48px;
+ height: 48px;
+ border: 3px solid var(--primary-color);
+ border-bottom-color: transparent;
+ border-radius: 50%;
+ display: inline-block;
+ position: relative;
+ box-sizing: border-box;
+ animation: rotation 1s linear infinite;
+}
+
+.loader::after {
+ content: '';
+ position: absolute;
+ box-sizing: border-box;
+ left: 0;
+ top: 0;
+ width: 48px;
+ height: 48px;
+ border-radius: 50%;
+ border: 3px solid transparent;
+ border-bottom-color: var(--accent-color);
+ animation: rotationBack 0.5s linear infinite;
+ transform: rotate(45deg);
+}
+
+@keyframes rotation {
+ 0% { transform: rotate(0deg) }
+ 100% { transform: rotate(360deg) }
+}
+
+@keyframes rotationBack {
+ 0% { transform: rotate(0deg) }
+ 100% { transform: rotate(-360deg) }
+}
+
footer {
background: var(--card-background);
border-top: 1px solid var(--border-color);
@@ -355,26 +478,6 @@ footer {
transform: scale(1.2);
}
-.github-stats {
- display: flex;
- gap: 10px;
- margin-top: 10px;
- font-size: 0.8rem;
-}
-
-.github-badge {
- background-color: var(--background-color);
- padding: 4px 8px;
- border-radius: 4px;
- display: flex;
- align-items: center;
- gap: 4px;
-}
-
-.github-badge i {
- color: var(--accent-color);
-}
-
.footer-description {
margin-top: 15px;
font-size: 0.9rem;
@@ -383,103 +486,13 @@ footer {
line-height: 1.5;
}
-.update-info {
- text-align: center;
- margin-top: 30px;
- padding-top: 30px;
- border-top: 1px solid var(--border-color);
-}
-
.update-note {
color: var(--accent-color);
font-size: 0.9rem;
opacity: 0.9;
}
-.theme-toggle {
- position: relative;
- top: unset;
- right: unset;
- z-index: 1;
-}
-
-.theme-toggle input {
- display: none;
-}
-
-.theme-toggle label {
- cursor: pointer;
- padding: 8px;
- background: var(--background-color);
- border-radius: 50%;
- display: flex;
- align-items: center;
- justify-content: center;
- box-shadow: 0 0 10px var(--shadow-color);
- border: 1px solid var(--border-color);
- transition: all 0.3s ease;
-}
-
-.theme-toggle label:hover {
- border-color: var(--primary-color);
- transform: translateY(-2px);
-}
-
-.theme-toggle .fa-sun {
- display: none;
- color: #ffd700;
-}
-
-.theme-toggle .fa-moon {
- color: #8c52ff;
-}
-
-.theme-toggle input:checked ~ label .fa-sun {
- display: block;
-}
-
-.theme-toggle input:checked ~ label .fa-moon {
- display: none;
-}
-
-.loader {
- width: 48px;
- height: 48px;
- border: 3px solid var(--primary-color);
- border-bottom-color: transparent;
- border-radius: 50%;
- display: inline-block;
- position: relative;
- box-sizing: border-box;
- animation: rotation 1s linear infinite;
-}
-
-.loader::after {
- content: '';
- position: absolute;
- box-sizing: border-box;
- left: 0;
- top: 0;
- width: 48px;
- height: 48px;
- border-radius: 50%;
- border: 3px solid transparent;
- border-bottom-color: var(--accent-color);
- animation: rotationBack 0.5s linear infinite;
- transform: rotate(45deg);
-}
-
-@keyframes rotation {
- 0% { transform: rotate(0deg) }
- 100% { transform: rotate(360deg) }
-}
-
-@keyframes rotationBack {
- 0% { transform: rotate(0deg) }
- 100% { transform: rotate(-360deg) }
-}
-
-/* Improved Responsiveness */
+/* Responsiveness */
@media (max-width: 768px) {
.site-grid {
grid-template-columns: repeat(auto-fill, minmax(250px, 1fr));
@@ -496,11 +509,7 @@ footer {
grid-template-columns: 1fr;
gap: 20px;
padding: 15px;
- }
-
- .theme-toggle {
- top: 10px;
- right: 10px;
+ text-align: center;
}
.header-container {
@@ -517,27 +526,6 @@ footer {
width: 100%;
justify-content: center;
}
-}
-
-@media (max-width: 480px) {
- .site-grid {
- grid-template-columns: 1fr;
- }
-
- .site-item {
- min-height: 220px;
- }
-
- .container {
- padding: 10px;
- }
-}
-
-@media (max-width: 768px) {
- .footer-content {
- grid-template-columns: 1fr;
- text-align: center;
- }
.footer-title::after {
left: 50%;
@@ -557,83 +545,16 @@ footer {
}
}
-.time-change {
- color: var(--text-color);
- opacity: 0.7;
- font-size: 0.85rem;
- margin-bottom: 0.5rem;
- word-break: break-all;
-}
+@media (max-width: 480px) {
+ .site-grid {
+ grid-template-columns: 1fr;
+ }
-.label {
- color: var(--accent-color);
- font-weight: 500;
-}
-
-.controls-container {
- display: flex;
- justify-content: space-between;
- align-items: center;
- margin-bottom: 20px;
- padding: 15px 20px;
- background: var(--card-background);
- border-radius: 12px;
- border: 1px solid var(--border-color);
-}
-
-.grid-controls {
- display: flex;
- align-items: center;
- gap: 10px;
-}
-
-.grid-controls label {
- color: var(--text-color);
- font-weight: 500;
-}
-
-.grid-controls select {
- padding: 8px 12px;
- border-radius: 8px;
- border: 1px solid var(--border-color);
- background: var(--background-color);
- color: var(--text-color);
- cursor: pointer;
- transition: all 0.3s ease;
-}
-
-.grid-controls select:hover {
- border-color: var(--primary-color);
-}
-
-.sites-stats {
- display: flex;
- gap: 20px;
- align-items: center;
-}
-
-.total-sites, .last-update-global {
- display: flex;
- align-items: center;
- gap: 8px;
- color: var(--text-color);
- font-size: 0.9rem;
-}
-
-.total-sites i, .last-update-global i {
- color: var(--primary-color);
-}
-
-.site-status {
- position: absolute;
- top: 10px;
- right: 10px;
- width: 12px;
- height: 12px;
- border-radius: 50%;
- background: #4CAF50;
-}
-
-.site-status.offline {
- background: #f44336;
+ .site-item {
+ min-height: 220px;
+ }
+
+ .container {
+ padding: 10px;
+ }
}
\ No newline at end of file
diff --git a/.github/.site/js/script.js b/.github/.site/js/script.js
index 5a9f34c..727e297 100644
--- a/.github/.site/js/script.js
+++ b/.github/.site/js/script.js
@@ -1,32 +1,82 @@
document.documentElement.setAttribute('data-theme', 'dark');
-function initGridControls() {
- const gridSize = document.getElementById('grid-size');
- const siteGrid = document.querySelector('.site-grid');
-
- gridSize.addEventListener('change', function() {
- switch(this.value) {
- case 'small':
- siteGrid.style.gridTemplateColumns = 'repeat(auto-fill, minmax(200px, 1fr))';
- break;
- case 'medium':
- siteGrid.style.gridTemplateColumns = 'repeat(auto-fill, minmax(300px, 1fr))';
- break;
- case 'large':
- siteGrid.style.gridTemplateColumns = 'repeat(auto-fill, minmax(400px, 1fr))';
- break;
- }
- localStorage.setItem('preferredGridSize', this.value);
- });
+let statusIndicator = null;
+let checkingSites = new Map();
+let totalSites = 0;
+let completedSites = 0;
- const savedSize = localStorage.getItem('preferredGridSize');
- if (savedSize) {
- gridSize.value = savedSize;
- gridSize.dispatchEvent(new Event('change'));
+function createStatusIndicator() {
+ statusIndicator = document.createElement('div');
+ statusIndicator.className = 'status-indicator';
+ statusIndicator.innerHTML = `
+
+ Initializing site checks...
+
+
+ `;
+ document.body.appendChild(statusIndicator);
+ return statusIndicator;
+}
+
+function updateStatusIndicator(status, text, progress = 0) {
+ if (!statusIndicator) return;
+
+ const statusIcon = statusIndicator.querySelector('.status-icon');
+ const statusTitle = statusIndicator.querySelector('.status-title');
+ const statusText = statusIndicator.querySelector('.status-text');
+ const progressFill = statusIndicator.querySelector('.progress-fill');
+
+ statusTitle.textContent = status;
+ statusText.textContent = text;
+ progressFill.style.width = `${progress}%`;
+
+ if (status === 'Ready') {
+ statusIcon.classList.add('ready');
+ setTimeout(() => {
+ statusIndicator.classList.add('hidden');
+ setTimeout(() => statusIndicator.remove(), 300);
+ }, 2000);
}
}
-async function checkSiteStatus(url) {
+function addSiteToCheck(siteName, siteUrl) {
+ if (!statusIndicator) return;
+
+ const checkingSitesContainer = statusIndicator.querySelector('.checking-sites');
+ const siteElement = document.createElement('div');
+ siteElement.className = 'checking-site';
+ siteElement.innerHTML = `
+ ${siteName}
+
+ `;
+ checkingSitesContainer.appendChild(siteElement);
+ checkingSites.set(siteName, siteElement);
+}
+
+function updateSiteStatus(siteName, isOnline) {
+ const siteElement = checkingSites.get(siteName);
+ if (!siteElement) return;
+
+ const statusIcon = siteElement.querySelector('.site-status-icon');
+ statusIcon.classList.remove('checking');
+ statusIcon.classList.add(isOnline ? 'online' : 'offline');
+ siteElement.classList.add('completed', isOnline ? 'online' : 'offline');
+
+ completedSites++;
+ const progress = (completedSites / totalSites) * 100;
+ updateStatusIndicator(
+ 'Checking Sites...',
+ `Checked ${completedSites}/${totalSites} sites`,
+ progress
+ );
+}
+
+async function checkSiteStatus(url, siteName) {
try {
console.log(`Checking status for: ${url}`);
const controller = new AbortController();
@@ -46,66 +96,75 @@ async function checkSiteStatus(url) {
const isOnline = response.type === 'opaque';
console.log(`Site ${url} is ${isOnline ? 'online' : 'offline'} (Type: ${response.type})`);
+
+ if (siteName) {
+ updateSiteStatus(siteName, isOnline);
+ }
+
return isOnline;
} catch (error) {
console.log(`Error checking ${url}:`, error.message);
+
+ if (siteName) {
+ updateSiteStatus(siteName, false);
+ }
+
return false;
}
}
-const supabaseUrl = 'https://zvfngpoxwrgswnzytadh.supabase.co';
-const supabaseKey = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6Inp2Zm5ncG94d3Jnc3duenl0YWRoIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NDAxNTIxNjMsImV4cCI6MjA1NTcyODE2M30.FNTCCMwi0QaKjOu8gtZsT5yQttUW8QiDDGXmzkn89QE';
+const domainsJsonUrl = 'https://raw.githubusercontent.com/Arrowar/StreamingCommunity/refs/heads/main/.github/.domain/domains.json';
async function loadSiteData() {
try {
- console.log('Starting to load site data...');
+ console.log('Starting to load site data from GitHub...');
+
+ createStatusIndicator();
+ updateStatusIndicator('Loading...', 'Fetching site data from GitHub repository...', 0);
+
const siteList = document.getElementById('site-list');
- siteList.innerHTML = '';
-
- const headers = {
- 'accept': '*/*',
- 'accept-language': 'it-IT,it;q=0.9,en-US;q=0.8,en;q=0.7',
- 'apikey': supabaseKey,
- 'authorization': `Bearer ${supabaseKey}`,
- 'content-type': 'application/json',
- 'cache-control': 'no-cache',
- 'pragma': 'no-cache',
- 'range': '0-9'
- };
-
- console.log('Fetching from Supabase with headers:', headers);
- const response = await fetch(`${supabaseUrl}/rest/v1/public?select=*`, {
- method: 'GET',
- headers: headers
- });
+
+ console.log(`Fetching from GitHub: ${domainsJsonUrl}`);
+ const response = await fetch(domainsJsonUrl);
if (!response.ok) throw new Error(`HTTP error! Status: ${response.status}`);
- const data = await response.json();
+ const configSite = await response.json(); // Directly get the site data object
- siteList.innerHTML = ''; if (data && data.length > 0) {
- console.log('Raw data from Supabase:', data);
- const configSite = data[0].data;
- console.log('Parsed config site:', configSite);
- let totalSites = Object.keys(configSite).length;
+ siteList.innerHTML = '';
+
+ if (configSite && Object.keys(configSite).length > 0) { // Check if configSite is a non-empty object
+ totalSites = Object.keys(configSite).length;
+ completedSites = 0;
let latestUpdate = new Date(0);
document.getElementById('sites-count').textContent = totalSites;
+
+ updateStatusIndicator('Checking Sites...', `Starting checks for ${totalSites} sites...`, 0);
+
+ Object.entries(configSite).forEach(([siteName, site]) => {
+ addSiteToCheck(siteName, site.full_url);
+ });
- for (const siteName in configSite) {
- const site = configSite[siteName];
+ const statusChecks = Object.entries(configSite).map(async ([siteName, site]) => {
+ const isOnline = await checkSiteStatus(site.full_url, siteName);
+ return { siteName, site, isOnline };
+ });
+
+ const results = await Promise.all(statusChecks);
+
+ updateStatusIndicator('Ready', 'All sites checked successfully!', 100);
+
+ results.forEach(({ siteName, site, isOnline }) => {
const siteItem = document.createElement('div');
siteItem.className = 'site-item';
siteItem.style.cursor = 'pointer';
- // Add status indicator
const statusDot = document.createElement('div');
statusDot.className = 'site-status';
- const isOnline = await checkSiteStatus(site.full_url);
if (!isOnline) statusDot.classList.add('offline');
siteItem.appendChild(statusDot);
- // Update latest update time
const updateTime = new Date(site.time_change);
if (updateTime > latestUpdate) {
latestUpdate = updateTime;
@@ -133,7 +192,9 @@ async function loadSiteData() {
oldDomain.className = 'old-domain';
oldDomain.innerHTML = ` ${site.old_domain}`;
siteInfo.appendChild(oldDomain);
- } siteItem.addEventListener('click', function() {
+ }
+
+ siteItem.addEventListener('click', function() {
window.open(site.full_url, '_blank', 'noopener,noreferrer');
});
@@ -150,7 +211,7 @@ async function loadSiteData() {
siteItem.appendChild(siteTitle);
siteItem.appendChild(siteInfo);
siteList.appendChild(siteItem);
- }
+ });
const formattedDate = latestUpdate.toLocaleDateString('it-IT', {
year: 'numeric',
@@ -162,6 +223,7 @@ async function loadSiteData() {
document.getElementById('last-update-time').textContent = formattedDate;
} else {
siteList.innerHTML = 'No sites available
';
+ updateStatusIndicator('Ready', 'No sites found in the JSON file.', 100);
}
} catch (error) {
console.error('Errore:', error);
@@ -171,6 +233,10 @@ async function loadSiteData() {
`;
+ if (statusIndicator) {
+ updateStatusIndicator('Error', `Failed to load: ${error.message}`, 0);
+ statusIndicator.querySelector('.status-icon').style.background = '#f44336';
+ }
}
}
diff --git a/.github/workflows/update_domain.yml b/.github/workflows/update_domain.yml
new file mode 100644
index 0000000..231c795
--- /dev/null
+++ b/.github/workflows/update_domain.yml
@@ -0,0 +1,49 @@
+name: Update domains
+
+on:
+ schedule:
+ - cron: "0 */2 * * *"
+ workflow_dispatch:
+
+jobs:
+ update-domains:
+ runs-on: ubuntu-latest
+ permissions:
+ contents: write
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Setup Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: '3.12'
+
+ - name: Install dependencies
+ run: |
+ pip install httpx ua-generator requests
+ pip install --upgrade pip setuptools wheel
+
+ - name: Configure DNS
+ run: |
+ sudo sh -c 'echo "nameserver 9.9.9.9" > /etc/resolv.conf'
+ cat /etc/resolv.conf
+
+ - name: Execute domain update script
+ run: python .github/.domain/domain_update.py
+
+ - name: Commit and push changes (if any)
+ run: |
+ git config --global user.name 'github-actions[bot]'
+ git config --global user.email 'github-actions[bot]@users.noreply.github.com'
+
+ # Check if domains.json was modified
+ if ! git diff --quiet .github/.domain/domains.json; then
+ git add .github/.domain/domains.json
+ git commit -m "Automatic domain update [skip ci]"
+ echo "Changes committed. Attempting to push..."
+ git push
+ else
+ echo "No changes to .github/.domain/domains.json to commit."
+ fi
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index 5cf4a3f..9322c75 100644
--- a/.gitignore
+++ b/.gitignore
@@ -52,5 +52,4 @@ cmd.txt
bot_config.json
scripts.json
active_requests.json
-domains.json
working_proxies.json
\ No newline at end of file
diff --git a/README.md b/README.md
index 7ff53c4..0780427 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,5 @@
-
+
diff --git a/StreamingCommunity/Api/Site/animeworld/util/ScrapeSerie.py b/StreamingCommunity/Api/Site/animeworld/util/ScrapeSerie.py
index 0dd36a7..d46f635 100644
--- a/StreamingCommunity/Api/Site/animeworld/util/ScrapeSerie.py
+++ b/StreamingCommunity/Api/Site/animeworld/util/ScrapeSerie.py
@@ -31,7 +31,8 @@ class ScrapSerie:
self.client = httpx.Client(
cookies={"sessionId": self.session_id},
headers={"User-Agent": get_userAgent(), "csrf-token": self.csrf_token},
- base_url=full_url
+ base_url=full_url,
+ verify=False
)
try:
diff --git a/StreamingCommunity/Api/Site/raiplay/__init__.py b/StreamingCommunity/Api/Site/raiplay/__init__.py
index d1b7e23..816d753 100644
--- a/StreamingCommunity/Api/Site/raiplay/__init__.py
+++ b/StreamingCommunity/Api/Site/raiplay/__init__.py
@@ -21,7 +21,7 @@ from .film import download_film
# Variable
indice = 5
_useFor = "Film_&_Serie"
-_priority = 1 # NOTE: Site search need the use of tmbd obj
+_priority = 0
_engineDownload = "hls"
_deprecate = False
diff --git a/StreamingCommunity/Api/Site/raiplay/site.py b/StreamingCommunity/Api/Site/raiplay/site.py
index c4a4b1e..ef95cbc 100644
--- a/StreamingCommunity/Api/Site/raiplay/site.py
+++ b/StreamingCommunity/Api/Site/raiplay/site.py
@@ -1,9 +1,5 @@
# 21.05.24
-import threading
-import queue
-
-
# External libraries
import httpx
from rich.console import Console
@@ -13,12 +9,9 @@ from rich.console import Console
from StreamingCommunity.Util.config_json import config_manager
from StreamingCommunity.Util.headers import get_userAgent
from StreamingCommunity.Util.table import TVShowManager
-from StreamingCommunity.Lib.TMBD.tmdb import tmdb
-
-
-# Logic class
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaManager
+from .util.ScrapeSerie import GetSerieInfo
# Variable
@@ -26,76 +19,33 @@ console = Console()
media_search_manager = MediaManager()
table_show_manager = TVShowManager()
max_timeout = config_manager.get_int("REQUESTS", "timeout")
-MAX_THREADS = 12
-def determine_media_type(title):
+def determine_media_type(item):
"""
- Use TMDB to determine if a title is a movie or TV show.
+ Determine if the item is a film or TV series by checking actual seasons count
+ using GetSerieInfo.
"""
try:
- # First search as a movie
- movie_results = tmdb._make_request("search/movie", {"query": title})
- movie_count = len(movie_results.get("results", []))
-
- # Then search as a TV show
- tv_results = tmdb._make_request("search/tv", {"query": title})
- tv_count = len(tv_results.get("results", []))
-
- # If results found in only one category, use that
- if movie_count > 0 and tv_count == 0:
- return "film"
- elif tv_count > 0 and movie_count == 0:
- return "tv"
-
- # If both have results, compare popularity
- if movie_count > 0 and tv_count > 0:
- top_movie = movie_results["results"][0]
- top_tv = tv_results["results"][0]
-
- return "film" if top_movie.get("popularity", 0) > top_tv.get("popularity", 0) else "tv"
+ # Extract program name from path_id
+ program_name = None
+ if item.get('path_id'):
+ parts = item['path_id'].strip('/').split('/')
+ if len(parts) >= 2:
+ program_name = parts[-1].split('.')[0]
- return "film"
+ if not program_name:
+ return "film"
+
+ scraper = GetSerieInfo(program_name)
+ scraper.collect_info_title()
+ return "tv" if scraper.getNumberSeason() > 0 else "film"
except Exception as e:
- console.log(f"Error determining media type with TMDB: {e}")
+ console.print(f"[red]Error determining media type: {e}[/red]")
return "film"
-def worker_determine_type(work_queue, result_dict, worker_id):
- """
- Worker function to process items from queue and determine media types.
-
- Parameters:
- - work_queue: Queue containing items to process
- - result_dict: Dictionary to store results
- - worker_id: ID of the worker thread
- """
- while not work_queue.empty():
- try:
- index, item = work_queue.get(block=False)
- title = item.get('titolo', '')
- media_type = determine_media_type(title)
-
- result_dict[index] = {
- 'id': item.get('id', ''),
- 'name': title,
- 'type': media_type,
- 'path_id': item.get('path_id', ''),
- 'url': f"https://www.raiplay.it{item.get('url', '')}",
- 'image': f"https://www.raiplay.it{item.get('immagine', '')}",
- }
-
- work_queue.task_done()
-
- except queue.Empty:
- break
-
- except Exception as e:
- console.log(f"Worker {worker_id} error: {e}")
- work_queue.task_done()
-
-
def title_search(query: str) -> int:
"""
Search for titles based on a search query.
@@ -141,33 +91,15 @@ def title_search(query: str) -> int:
data = response.json().get('agg').get('titoli').get('cards')
data = data[:15] if len(data) > 15 else data
- # Use multithreading to determine media types in parallel
- work_queue = queue.Queue()
- result_dict = {}
-
- # Add items to the work queue
- for i, item in enumerate(data):
- work_queue.put((i, item))
-
- # Create and start worker threads
- threads = []
- for i in range(min(MAX_THREADS, len(data))):
- thread = threading.Thread(
- target=worker_determine_type,
- args=(work_queue, result_dict, i),
- daemon=True
- )
- threads.append(thread)
- thread.start()
-
- # Wait for all threads to complete
- for thread in threads:
- thread.join()
-
- # Add all results to media manager in correct order
- for i in range(len(data)):
- if i in result_dict:
- media_search_manager.add_media(result_dict[i])
+ # Process each item and add to media manager
+ for item in data:
+ media_search_manager.add_media({
+ 'id': item.get('id', ''),
+ 'name': item.get('titolo', ''),
+ 'type': determine_media_type(item),
+ 'path_id': item.get('path_id', ''),
+ 'url': f"https://www.raiplay.it{item.get('url', '')}",
+ 'image': f"https://www.raiplay.it{item.get('immagine', '')}",
+ })
- # Return the number of titles found
return media_search_manager.get_length()
\ No newline at end of file
diff --git a/StreamingCommunity/Api/Site/raiplay/util/ScrapeSerie.py b/StreamingCommunity/Api/Site/raiplay/util/ScrapeSerie.py
index d54ec1f..b7bd863 100644
--- a/StreamingCommunity/Api/Site/raiplay/util/ScrapeSerie.py
+++ b/StreamingCommunity/Api/Site/raiplay/util/ScrapeSerie.py
@@ -30,28 +30,48 @@ class GetSerieInfo:
try:
program_url = f"{self.base_url}/programmi/{self.program_name}.json"
response = httpx.get(url=program_url, headers=get_headers(), timeout=max_timeout)
+
+ # If 404, content is not yet available
+ if response.status_code == 404:
+ logging.info(f"Content not yet available: {self.program_name}")
+ return
+
response.raise_for_status()
-
json_data = response.json()
# Look for seasons in the 'blocks' property
- for block in json_data.get('blocks'):
- if block.get('type') == 'RaiPlay Multimedia Block' and block.get('name', '').lower() == 'episodi':
- self.publishing_block_id = block.get('id')
-
- # Extract seasons from sets array
- for season_set in block.get('sets', []):
- if 'stagione' in season_set.get('name', '').lower():
- self.seasons_manager.add_season({
- 'id': season_set.get('id', ''),
- 'number': len(self.seasons_manager.seasons) + 1,
- 'name': season_set.get('name', ''),
- 'path': season_set.get('path_id', ''),
- 'episodes_count': season_set.get('episode_size', {}).get('number', 0)
- })
+ for block in json_data.get('blocks', []):
- except Exception as e:
+ # Check if block is a season block or episodi block
+ if block.get('type') == 'RaiPlay Multimedia Block':
+ if block.get('name', '').lower() == 'episodi':
+ self.publishing_block_id = block.get('id')
+
+ # Extract seasons from sets array
+ for season_set in block.get('sets', []):
+ if 'stagione' in season_set.get('name', '').lower():
+ self._add_season(season_set, block.get('id'))
+
+ elif 'stagione' in block.get('name', '').lower():
+ self.publishing_block_id = block.get('id')
+
+ # Extract season directly from block's sets
+ for season_set in block.get('sets', []):
+ self._add_season(season_set, block.get('id'))
+
+ except httpx.HTTPError as e:
logging.error(f"Error collecting series info: {e}")
+ except Exception as e:
+ logging.error(f"Unexpected error collecting series info: {e}")
+
+ def _add_season(self, season_set: dict, block_id: str):
+ self.seasons_manager.add_season({
+ 'id': season_set.get('id', ''),
+ 'number': len(self.seasons_manager.seasons) + 1,
+ 'name': season_set.get('name', ''),
+ 'path': season_set.get('path_id', ''),
+ 'episodes_count': season_set.get('episode_size', {}).get('number', 0)
+ })
def collect_info_season(self, number_season: int) -> None:
"""Get episodes for a specific season."""
diff --git a/StreamingCommunity/Upload/version.py b/StreamingCommunity/Upload/version.py
index a2df4df..535de5b 100644
--- a/StreamingCommunity/Upload/version.py
+++ b/StreamingCommunity/Upload/version.py
@@ -1,5 +1,5 @@
__title__ = 'StreamingCommunity'
-__version__ = '3.0.7'
+__version__ = '3.0.8'
__author__ = 'Arrowar'
__description__ = 'A command-line program to download film'
__copyright__ = 'Copyright 2024'
diff --git a/StreamingCommunity/Util/config_json.py b/StreamingCommunity/Util/config_json.py
index bea1edc..62f68a4 100644
--- a/StreamingCommunity/Util/config_json.py
+++ b/StreamingCommunity/Util/config_json.py
@@ -39,9 +39,6 @@ class ConfigManager:
# Get the actual path of the module file
current_file_path = os.path.abspath(__file__)
- # Navigate upwards to find the project root
- # Assuming this file is in a package structure like StreamingCommunity/Util/config_json.py
- # We need to go up 2 levels to reach the project root
base_path = os.path.dirname(os.path.dirname(os.path.dirname(current_file_path)))
# Initialize file paths
@@ -271,33 +268,32 @@ class ConfigManager:
self._load_site_data_from_file()
def _load_site_data_from_api(self) -> None:
- """Load site data from API."""
+ """Load site data from GitHub."""
+ domains_github_url = "https://raw.githubusercontent.com/Arrowar/StreamingCommunity/refs/heads/main/.github/.domain/domains.json"
headers = {
- "apikey": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6Inp2Zm5ncG94d3Jnc3duenl0YWRoIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NDAxNTIxNjMsImV4cCI6MjA1NTcyODE2M30.FNTCCMwi0QaKjOu8gtZsT5yQttUW8QiDDGXmzkn89QE",
- "Authorization": f"Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6Inp2Zm5ncG94d3Jnc3duenl0YWRoIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NDAxNTIxNjMsImV4cCI6MjA1NTcyODE2M30.FNTCCMwi0QaKjOu8gtZsT5yQttUW8QiDDGXmzkn89QE",
- "Content-Type": "application/json",
- "User-Agent": get_userAgent()
+ "User-Agent": get_userAgent()
}
try:
- console.print("[bold cyan]Retrieving site data from API...[/bold cyan]")
- response = requests.get("https://zvfngpoxwrgswnzytadh.supabase.co/rest/v1/public", timeout=8, headers=headers)
+ console.print(f"[bold cyan]Retrieving site data from GitHub:[/bold cyan] [green]{domains_github_url}[/green]")
+ response = requests.get(domains_github_url, timeout=8, headers=headers)
if response.ok:
- data = response.json()
- if data and len(data) > 0:
- self.configSite = data[0]['data']
-
- site_count = len(self.configSite) if isinstance(self.configSite, dict) else 0
-
- else:
- console.print("[bold yellow]API returned an empty data set[/bold yellow]")
+ self.configSite = response.json()
+
+ site_count = len(self.configSite) if isinstance(self.configSite, dict) else 0
+ console.print(f"[bold green]Site data loaded from GitHub:[/bold green] {site_count} streaming services found.")
+
else:
- console.print(f"[bold red]API request failed:[/bold red] HTTP {response.status_code}, {response.text[:100]}")
+ console.print(f"[bold red]GitHub request failed:[/bold red] HTTP {response.status_code}, {response.text[:100]}")
self._handle_site_data_fallback()
+ except json.JSONDecodeError as e:
+ console.print(f"[bold red]Error parsing JSON from GitHub:[/bold red] {str(e)}")
+ self._handle_site_data_fallback()
+
except Exception as e:
- console.print(f"[bold red]API connection error:[/bold red] {str(e)}")
+ console.print(f"[bold red]GitHub connection error:[/bold red] {str(e)}")
self._handle_site_data_fallback()
def _load_site_data_from_file(self) -> None:
@@ -562,7 +558,6 @@ class ConfigManager:
return section in config_source
-# Helper function to check the platform
def get_use_large_bar():
"""
Determine if the large bar feature should be enabled.
diff --git a/StreamingCommunity/Util/os.py b/StreamingCommunity/Util/os.py
index 490076c..2d8f7d1 100644
--- a/StreamingCommunity/Util/os.py
+++ b/StreamingCommunity/Util/os.py
@@ -329,8 +329,8 @@ class InternManager():
bool: True if the current DNS server can resolve a domain name,
False if can't resolve or in case of errors
"""
-
test_domains = ["github.com", "google.com", "microsoft.com", "amazon.com"]
+
try:
for domain in test_domains:
# socket.gethostbyname() works consistently across all platforms
diff --git a/setup.py b/setup.py
index 5a63b87..1fe021a 100644
--- a/setup.py
+++ b/setup.py
@@ -10,7 +10,7 @@ with open(os.path.join(os.path.dirname(__file__), "requirements.txt"), "r", enco
setup(
name="StreamingCommunity",
- version="3.0.7",
+ version="3.0.8",
long_description=read_readme(),
long_description_content_type="text/markdown",
author="Lovi-0",