Update readme

This commit is contained in:
Lovi 2025-01-02 19:00:21 +01:00
parent 441e761953
commit 5d71f81b61
3 changed files with 53 additions and 15 deletions

View File

@ -400,15 +400,16 @@ The `run-container` command mounts also the `config.json` file, so any change to
| Website | Status |
|:-------------------|:------:|
| 1337xx | ✅ |
| Altadefinizione | ✅ |
| AnimeUnity | ✅ |
| Ilcorsaronero | ✅ |
| CB01New | ✅ |
| DDLStreamItaly | ✅ |
| GuardaSerie | ✅ |
| MostraGuarda | ✅ |
| StreamingCommunity | ✅ |
| [1337xx](https://1337xx.to/) | ✅ |
| [Altadefinizione](https://altadefinizione.prof/) | ✅ |
| [AnimeUnity](https://animeunity.so/) | ✅ |
| [Ilcorsaronero](https://ilcorsaronero.link/) | ✅ |
| [CB01New](https://cb01new.pics/) | ✅ |
| [DDLStreamItaly](https://ddlstreamitaly.co/) | ✅ |
| [GuardaSerie](https://guardaserie.academy/) | ✅ |
| [MostraGuarda](https://mostraguarda.stream/) | ✅ |
| [StreamingCommunity](https://streamingcommunity.prof/) | ✅ |
# Tutorials

View File

@ -146,11 +146,9 @@ def search_domain(site_name: str, base_url: str, get_first: bool = False):
# Get configuration values
max_timeout = config_manager.get_int("REQUESTS", "timeout")
domain = str(config_manager.get_dict("SITE", site_name)['domain'])
test_url = f"{base_url}.{domain}"
try:
# Test current domain configuration
test_url = f"{base_url}.{domain}"
if validate_url(test_url, max_timeout):
parsed_url = urlparse(test_url)
tld = parsed_url.netloc.split('.')[-1]
@ -163,7 +161,7 @@ def search_domain(site_name: str, base_url: str, get_first: bool = False):
# Perform Google search if current domain fails
query = base_url.split("/")[-1]
search_results = list(search(query, num_results=10, lang="it"))
search_results = list(search(query, num_results=15, lang="it"))
console.print(f"Google search: {search_results}")
def normalize_for_comparison(url):

View File

@ -20,6 +20,7 @@ from StreamingCommunity.Api.Template.Util import search_domain
# Variable
console = Console()
README_PATH = "README.md"
def load_site_names():
@ -78,6 +79,39 @@ def load_site_names():
return site_names
def update_readme(site_names):
if not os.path.exists(README_PATH):
console.print(f"[red]README file not found at {README_PATH}")
return
with open(README_PATH, "r", encoding="utf-8") as file:
lines = file.readlines()
updated_lines = []
for line in lines:
if line.startswith("| [") and "|" in line:
site_name = line.split("[")[1].split("]")[0]
alias = f"{site_name.lower()}"
if alias in site_names:
domain_to_use, _ = search_domain(site_name=alias, base_url=f"https://{alias}", get_first=True)
print("Update line: ", line)
if site_name == "animeunity":
updated_line = f"| [{site_name}](https://www.{alias}.{domain_to_use}/) | ✅ |\n"
else:
updated_line = f"| [{site_name}](https://{alias}.{domain_to_use}/) | ✅ |\n"
print("To: ", updated_line.strip())
updated_lines.append(updated_line)
continue
updated_lines.append(line)
with open(README_PATH, "w", encoding="utf-8") as file:
file.writelines(updated_lines)
if __name__ == "__main__":
site_names = load_site_names()
for alias, (site_name, use_for) in site_names.items():
@ -85,6 +119,11 @@ if __name__ == "__main__":
if site_name == "animeunity":
domain_to_use, _ = search_domain(site_name=site_name, base_url=f"https://www.{site_name}", get_first=True)
else:
domain_to_use, _ = search_domain(site_name=site_name, base_url=f"https://{site_name}", get_first=True)
domain_to_use, _ = search_domain(site_name=site_name, base_url=f"https://{site_name}", get_first=True)
# Update readme
print("\n")
print("Return domain: ", domain_to_use)
update_readme(alias)