mirror of
https://github.com/Arrowar/StreamingCommunity.git
synced 2025-06-07 12:05:35 +00:00
Remove audo domain update sc e au.
This commit is contained in:
parent
4791de098d
commit
9c74f17e31
@ -1,7 +1,5 @@
|
|||||||
# 21.05.24
|
# 21.05.24
|
||||||
|
|
||||||
from .get_domain import grab_au_top_level_domain as extract_domain
|
|
||||||
|
|
||||||
from .manage_ep import (
|
from .manage_ep import (
|
||||||
manage_selection,
|
manage_selection,
|
||||||
map_episode_title
|
map_episode_title
|
||||||
|
@ -1,108 +0,0 @@
|
|||||||
# 02.04.24
|
|
||||||
|
|
||||||
import os
|
|
||||||
import threading
|
|
||||||
import logging
|
|
||||||
|
|
||||||
|
|
||||||
# External libraries
|
|
||||||
import httpx
|
|
||||||
|
|
||||||
|
|
||||||
# Internal utilities
|
|
||||||
from Src.Lib.Google import search as google_search
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def check_url_for_content(url: str, content: str) -> bool:
|
|
||||||
"""
|
|
||||||
Check if a URL contains specific content.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
- url (str): The URL to check.
|
|
||||||
- content (str): The content to search for in the response.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
bool: True if the content is found, False otherwise.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
|
|
||||||
logging.info(f"Test site to extract domain: {url}")
|
|
||||||
response = httpx.get(url, timeout = 1)
|
|
||||||
response.raise_for_status()
|
|
||||||
|
|
||||||
if content in response.text:
|
|
||||||
return True
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
pass
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def grab_top_level_domain(base_url: str, target_content: str) -> str:
|
|
||||||
"""
|
|
||||||
Get the top-level domain (TLD) from a list of URLs.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
- base_url (str): The base URL to construct complete URLs.
|
|
||||||
- target_content (str): The content to search for in the response.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The found TLD, if any.
|
|
||||||
"""
|
|
||||||
results = []
|
|
||||||
threads = []
|
|
||||||
path_file = os.path.join("Test", "data", "TLD", "tld_list.txt")
|
|
||||||
logging.info(f"Load file: {path_file}")
|
|
||||||
|
|
||||||
def url_checker(url: str):
|
|
||||||
if check_url_for_content(url, target_content):
|
|
||||||
results.append(url.split(".")[-1])
|
|
||||||
|
|
||||||
if not os.path.exists(path_file):
|
|
||||||
raise FileNotFoundError("The file 'tld_list.txt' does not exist.")
|
|
||||||
|
|
||||||
with open(path_file, "r") as file:
|
|
||||||
urls = [f"{base_url}.{x.strip().lower()}" for x in file]
|
|
||||||
|
|
||||||
for url in urls:
|
|
||||||
thread = threading.Thread(target=url_checker, args=(url,))
|
|
||||||
thread.start()
|
|
||||||
threads.append(thread)
|
|
||||||
|
|
||||||
for thread in threads:
|
|
||||||
thread.join()
|
|
||||||
|
|
||||||
if results:
|
|
||||||
return results[-1]
|
|
||||||
|
|
||||||
|
|
||||||
def grab_top_level_domain_light(query: str) -> str:
|
|
||||||
"""
|
|
||||||
Get the top-level domain (TLD) using a light method via Google search.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
- query (str): The search query for Google search.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The found TLD, if any.
|
|
||||||
"""
|
|
||||||
for result in google_search(query, num=1, stop=1, pause=2):
|
|
||||||
return result.split(".", 2)[-1].replace("/", "")
|
|
||||||
|
|
||||||
|
|
||||||
def grab_au_top_level_domain(method: str) -> str:
|
|
||||||
"""
|
|
||||||
Get the top-level domain (TLD) for Anime Unity.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
- method (str): The method to use to obtain the TLD ("light" or "strong").
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The found TLD, if any.
|
|
||||||
"""
|
|
||||||
if method == "light":
|
|
||||||
return grab_top_level_domain_light("animeunity")
|
|
||||||
elif method == "strong":
|
|
||||||
return grab_top_level_domain("https://www.animeunity", '<meta name="author" content="AnimeUnity Staff">')
|
|
@ -17,7 +17,6 @@ from Src.Util._jsonConfig import config_manager
|
|||||||
|
|
||||||
|
|
||||||
# Logic class
|
# Logic class
|
||||||
from .Core.Util import extract_domain
|
|
||||||
from .Core.Class.SearchType import MediaManager, MediaItem
|
from .Core.Class.SearchType import MediaManager, MediaItem
|
||||||
|
|
||||||
|
|
||||||
@ -80,24 +79,11 @@ def update_domain():
|
|||||||
|
|
||||||
console.log(f"[cyan]Test site: [red]https://{SITE_NAME}.{DOMAIN_NOW}")
|
console.log(f"[cyan]Test site: [red]https://{SITE_NAME}.{DOMAIN_NOW}")
|
||||||
response = httpx.get(f"https://www.{SITE_NAME}.{DOMAIN_NOW}")
|
response = httpx.get(f"https://www.{SITE_NAME}.{DOMAIN_NOW}")
|
||||||
response.status_code
|
response.raise_for_status()
|
||||||
|
|
||||||
# If the current site is inaccessible, try to obtain a new domain
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|
||||||
# Get new domain
|
console.log("[red]Upload domain")
|
||||||
console.print("[red]\nExtract new DOMAIN from TLD list.")
|
|
||||||
new_domain = extract_domain(method="light")
|
|
||||||
console.log(f"[cyan]Extract new domain: [red]{new_domain}")
|
|
||||||
|
|
||||||
if new_domain:
|
|
||||||
|
|
||||||
# Update configuration with the new domain
|
|
||||||
config_manager.set_key('SITE', SITE_NAME, new_domain)
|
|
||||||
config_manager.write_config()
|
|
||||||
|
|
||||||
else:
|
|
||||||
logging.error("Failed to find a new animeunity domain")
|
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
|
|
||||||
|
@ -226,3 +226,5 @@ class VideoSource:
|
|||||||
final_url = urlunparse(new_url) # Construct the final URL from the modified parts
|
final_url = urlunparse(new_url) # Construct the final URL from the modified parts
|
||||||
|
|
||||||
return final_url
|
return final_url
|
||||||
|
|
||||||
|
|
@ -1,7 +1,5 @@
|
|||||||
# 21.05.24
|
# 21.05.24
|
||||||
|
|
||||||
from .get_domain import grab_sc_top_level_domain as extract_domain
|
|
||||||
|
|
||||||
from .manage_ep import (
|
from .manage_ep import (
|
||||||
manage_selection,
|
manage_selection,
|
||||||
map_episode_title
|
map_episode_title
|
||||||
|
@ -1,106 +0,0 @@
|
|||||||
# 02.04.24
|
|
||||||
|
|
||||||
import os
|
|
||||||
import threading
|
|
||||||
import logging
|
|
||||||
|
|
||||||
|
|
||||||
# External library
|
|
||||||
import httpx
|
|
||||||
|
|
||||||
|
|
||||||
# Internal utilities
|
|
||||||
from Src.Lib.Google import search as google_search
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def check_url_for_content(url: str, content: str) -> bool:
|
|
||||||
"""
|
|
||||||
Check if a URL contains specific content.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
- url (str): The URL to check.
|
|
||||||
- content (str): The content to search for in the response.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
bool: True if the content is found, False otherwise.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
|
|
||||||
logging.info(f"Test site to extract domain: {url}")
|
|
||||||
response = httpx.get(url, timeout = 1)
|
|
||||||
response.raise_for_status()
|
|
||||||
|
|
||||||
if content in response.text:
|
|
||||||
return True
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
pass
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def grab_top_level_domain(base_url: str, target_content: str) -> str:
|
|
||||||
"""
|
|
||||||
Get the top-level domain (TLD) from a list of URLs.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
- base_url (str): The base URL to construct complete URLs.
|
|
||||||
- target_content (str): The content to search for in the response.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The found TLD, if any.
|
|
||||||
"""
|
|
||||||
results = []
|
|
||||||
threads = []
|
|
||||||
path_file = os.path.join("Test", "data", "TLD", "tld_list.txt")
|
|
||||||
logging.info(f"Load file: {path_file}")
|
|
||||||
|
|
||||||
def url_checker(url: str):
|
|
||||||
if check_url_for_content(url, target_content):
|
|
||||||
results.append(url.split(".")[-1])
|
|
||||||
|
|
||||||
if not os.path.exists(path_file):
|
|
||||||
raise FileNotFoundError("The file 'tld_list.txt' does not exist.")
|
|
||||||
|
|
||||||
with open(path_file, "r") as file:
|
|
||||||
urls = [f"{base_url}.{x.strip().lower()}" for x in file]
|
|
||||||
|
|
||||||
for url in urls:
|
|
||||||
thread = threading.Thread(target=url_checker, args=(url,))
|
|
||||||
thread.start()
|
|
||||||
threads.append(thread)
|
|
||||||
|
|
||||||
for thread in threads:
|
|
||||||
thread.join()
|
|
||||||
|
|
||||||
if results:
|
|
||||||
return results[-1]
|
|
||||||
|
|
||||||
|
|
||||||
def grab_top_level_domain_light(query: str) -> str:
|
|
||||||
"""
|
|
||||||
Get the top-level domain (TLD) using a light method via Google search.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
- query (str): The search query for Google search.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The found TLD, if any.
|
|
||||||
"""
|
|
||||||
for result in google_search(query, num=1, stop=1, pause=2):
|
|
||||||
return result.split(".", 2)[-1].replace("/", "")
|
|
||||||
|
|
||||||
|
|
||||||
def grab_sc_top_level_domain(method: str) -> str:
|
|
||||||
"""
|
|
||||||
Get the top-level domain (TLD) for the streaming community.
|
|
||||||
Args:
|
|
||||||
method (str): The method to use to obtain the TLD ("light" or "strong").
|
|
||||||
Returns:
|
|
||||||
str: The found TLD, if any.
|
|
||||||
"""
|
|
||||||
if method == "light":
|
|
||||||
return grab_top_level_domain_light("streaming community")
|
|
||||||
elif method == "strong":
|
|
||||||
return grab_top_level_domain("https://streamingcommunity", '<meta name="author" content="StreamingCommunity">')
|
|
@ -21,7 +21,6 @@ from Src.Util.table import TVShowManager
|
|||||||
|
|
||||||
|
|
||||||
# Logic class
|
# Logic class
|
||||||
from .Core.Util import extract_domain
|
|
||||||
from .Core.Class.SearchType import MediaManager, MediaItem
|
from .Core.Class.SearchType import MediaManager, MediaItem
|
||||||
|
|
||||||
|
|
||||||
@ -102,6 +101,8 @@ def get_version_and_domain(new_domain = None) -> Tuple[str, str]:
|
|||||||
# Make requests to site to get text
|
# Make requests to site to get text
|
||||||
console.print(f"[cyan]Test site[white]: [red]https://{SITE_NAME}.{config_domain}")
|
console.print(f"[cyan]Test site[white]: [red]https://{SITE_NAME}.{config_domain}")
|
||||||
response = httpx.get(f"https://{SITE_NAME}.{config_domain}")
|
response = httpx.get(f"https://{SITE_NAME}.{config_domain}")
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
console.print(f"[cyan]Test respost site[white]: [red]{response.status_code} \n")
|
console.print(f"[cyan]Test respost site[white]: [red]{response.status_code} \n")
|
||||||
|
|
||||||
# Extract version from the response
|
# Extract version from the response
|
||||||
@ -111,16 +112,8 @@ def get_version_and_domain(new_domain = None) -> Tuple[str, str]:
|
|||||||
|
|
||||||
except:
|
except:
|
||||||
|
|
||||||
console.print("[red]\nExtract new DOMAIN from TLD list.")
|
console.log("[red]Upload domain.")
|
||||||
new_domain = extract_domain(method="light")
|
sys.exit(0)
|
||||||
console.log(f"[cyan]Extract new domain: [red]{new_domain}")
|
|
||||||
|
|
||||||
# Update the domain in the configuration file
|
|
||||||
config_manager.set_key('SITE', SITE_NAME, str(new_domain))
|
|
||||||
config_manager.write_config()
|
|
||||||
|
|
||||||
# Retry to get the version and domain
|
|
||||||
return get_version_and_domain(new_domain)
|
|
||||||
|
|
||||||
|
|
||||||
def title_search(title_search: str, domain: str) -> int:
|
def title_search(title_search: str, domain: str) -> int:
|
||||||
|
@ -52,7 +52,7 @@
|
|||||||
"force_resolution": -1
|
"force_resolution": -1
|
||||||
},
|
},
|
||||||
"SITE": {
|
"SITE": {
|
||||||
"streamingcommunity": "foo",
|
"streamingcommunity": "boston",
|
||||||
"animeunity": "to",
|
"animeunity": "to",
|
||||||
"altadefinizione": "vodka",
|
"altadefinizione": "vodka",
|
||||||
"guardaserie": "ceo",
|
"guardaserie": "ceo",
|
||||||
|
Loading…
x
Reference in New Issue
Block a user