mirror of
https://github.com/Arrowar/StreamingCommunity.git
synced 2025-06-06 19:45:24 +00:00
Add ffmpeg controll
This commit is contained in:
parent
16bd1c28a1
commit
20eae18cff
@ -15,8 +15,6 @@ from Src.Lib.Request import requests
|
|||||||
from Src.Util.headers import get_headers
|
from Src.Util.headers import get_headers
|
||||||
from Src.Util.console import console
|
from Src.Util.console import console
|
||||||
from Src.Util._jsonConfig import config_manager
|
from Src.Util._jsonConfig import config_manager
|
||||||
from Src.Lib.Unidecode import transliterate
|
|
||||||
|
|
||||||
|
|
||||||
# Logic class
|
# Logic class
|
||||||
from .Core.Class.SearchType import MediaManager, MediaItem
|
from .Core.Class.SearchType import MediaManager, MediaItem
|
||||||
@ -46,7 +44,7 @@ def title_search(title_search: str) -> int:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
# Send request to search for titles
|
# Send request to search for titles
|
||||||
response = requests.get(f"https://{AD_SITE_NAME}.{AD_DOMAIN_NOW}/page/1/?story={transliterate(title_search).replace(' ', '+')}&do=search&subaction=search&titleonly=3")
|
response = requests.get(f"https://{AD_SITE_NAME}.{AD_DOMAIN_NOW}/page/1/?story={title_search.replace(' ', '+')}&do=search&subaction=search&titleonly=3")
|
||||||
|
|
||||||
# Create soup and find table
|
# Create soup and find table
|
||||||
soup = BeautifulSoup(response.text, "html.parser")
|
soup = BeautifulSoup(response.text, "html.parser")
|
||||||
|
@ -7,7 +7,6 @@ from typing import List
|
|||||||
|
|
||||||
# Internal utilities
|
# Internal utilities
|
||||||
from Src.Util._jsonConfig import config_manager
|
from Src.Util._jsonConfig import config_manager
|
||||||
from Src.Lib.Unidecode import transliterate
|
|
||||||
|
|
||||||
|
|
||||||
# Logic class
|
# Logic class
|
||||||
@ -70,7 +69,6 @@ def map_episode_title(tv_name: str, episode: Episode, number_season: int):
|
|||||||
|
|
||||||
# Additional fix
|
# Additional fix
|
||||||
map_episode_temp = map_episode_temp.replace(".", "_")
|
map_episode_temp = map_episode_temp.replace(".", "_")
|
||||||
map_episode_temp = transliterate(map_episode_temp)
|
|
||||||
|
|
||||||
logging.info(f"Map episode string return: {map_episode_temp}")
|
logging.info(f"Map episode string return: {map_episode_temp}")
|
||||||
return map_episode_temp
|
return map_episode_temp
|
||||||
|
@ -17,7 +17,6 @@ from Src.Lib.Request import requests
|
|||||||
from Src.Util.headers import get_headers
|
from Src.Util.headers import get_headers
|
||||||
from Src.Util.console import console
|
from Src.Util.console import console
|
||||||
from Src.Util._jsonConfig import config_manager
|
from Src.Util._jsonConfig import config_manager
|
||||||
from Src.Lib.Unidecode import transliterate
|
|
||||||
|
|
||||||
|
|
||||||
# Logic class
|
# Logic class
|
||||||
@ -137,7 +136,7 @@ def title_search(title_search: str, domain: str) -> int:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
# Send request to search for titles ( replace à to a and space to "+" )
|
# Send request to search for titles ( replace à to a and space to "+" )
|
||||||
response = requests.get(f"https://{SC_SITE_NAME}.{domain}/api/search?q={transliterate(title_search).replace(' ', '+')}", headers={'user-agent': get_headers()})
|
response = requests.get(f"https://{SC_SITE_NAME}.{domain}/api/search?q={title_search.replace(' ', '+')}", headers={'user-agent': get_headers()})
|
||||||
|
|
||||||
# Add found titles to media search manager
|
# Add found titles to media search manager
|
||||||
for dict_title in response.json()['data']:
|
for dict_title in response.json()['data']:
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
import time
|
||||||
import logging
|
import logging
|
||||||
import shutil
|
import shutil
|
||||||
import threading
|
import threading
|
||||||
@ -18,7 +19,7 @@ except: pass
|
|||||||
# Internal utilities
|
# Internal utilities
|
||||||
from Src.Util._jsonConfig import config_manager
|
from Src.Util._jsonConfig import config_manager
|
||||||
from Src.Util.os import check_file_existence
|
from Src.Util.os import check_file_existence
|
||||||
from .util import has_audio_stream, need_to_force_to_ts
|
from .util import has_audio_stream, need_to_force_to_ts, check_ffmpeg_input
|
||||||
from .capture import capture_ffmpeg_real_time
|
from .capture import capture_ffmpeg_real_time
|
||||||
|
|
||||||
|
|
||||||
@ -28,6 +29,7 @@ DEBUG_FFMPEG = "debug" if DEBUG_MODE else "error"
|
|||||||
USE_CODECS = config_manager.get_bool("M3U8_FILTER", "use_codec")
|
USE_CODECS = config_manager.get_bool("M3U8_FILTER", "use_codec")
|
||||||
USE_GPU = config_manager.get_bool("M3U8_FILTER", "use_gpu")
|
USE_GPU = config_manager.get_bool("M3U8_FILTER", "use_gpu")
|
||||||
FFMPEG_DEFAULT_PRESET = config_manager.get("M3U8_FILTER", "default_preset")
|
FFMPEG_DEFAULT_PRESET = config_manager.get("M3U8_FILTER", "default_preset")
|
||||||
|
CHECK_OUTPUT_CONVERSION = config_manager.get_bool("M3U8_FILTER", "check_output_conversion")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -306,13 +308,19 @@ def join_video(video_path: str, out_path: str, vcodec: str = None, acodec: str =
|
|||||||
ffmpeg_cmd += [out_path, "-y"]
|
ffmpeg_cmd += [out_path, "-y"]
|
||||||
logging.info(f"FFmpeg command: {ffmpeg_cmd}")
|
logging.info(f"FFmpeg command: {ffmpeg_cmd}")
|
||||||
|
|
||||||
# --> Run
|
# Run join
|
||||||
if DEBUG_MODE:
|
if DEBUG_MODE:
|
||||||
subprocess.run(ffmpeg_cmd, check=True)
|
subprocess.run(ffmpeg_cmd, check=True)
|
||||||
else:
|
else:
|
||||||
capture_ffmpeg_real_time(ffmpeg_cmd, "[cyan]Join video")
|
capture_ffmpeg_real_time(ffmpeg_cmd, "[cyan]Join video")
|
||||||
print()
|
print()
|
||||||
|
|
||||||
|
# Check file
|
||||||
|
if CHECK_OUTPUT_CONVERSION:
|
||||||
|
time.sleep(0.5)
|
||||||
|
check_ffmpeg_input(out_path)
|
||||||
|
|
||||||
|
|
||||||
def join_audios(video_path: str, audio_tracks: List[Dict[str, str]], out_path: str, vcodec: str = 'copy', acodec: str = 'aac', bitrate: str = '192k'):
|
def join_audios(video_path: str, audio_tracks: List[Dict[str, str]], out_path: str, vcodec: str = 'copy', acodec: str = 'aac', bitrate: str = '192k'):
|
||||||
"""
|
"""
|
||||||
Joins audio tracks with a video file using FFmpeg.
|
Joins audio tracks with a video file using FFmpeg.
|
||||||
@ -351,14 +359,21 @@ def join_audios(video_path: str, audio_tracks: List[Dict[str, str]], out_path: s
|
|||||||
ffmpeg_cmd += [out_path, "-y"]
|
ffmpeg_cmd += [out_path, "-y"]
|
||||||
logging.info(f"FFmpeg command: {ffmpeg_cmd}")
|
logging.info(f"FFmpeg command: {ffmpeg_cmd}")
|
||||||
|
|
||||||
# --> Run
|
# Run join
|
||||||
if DEBUG_MODE:
|
if DEBUG_MODE:
|
||||||
subprocess.run(ffmpeg_cmd, check=True)
|
subprocess.run(ffmpeg_cmd, check=True)
|
||||||
else:
|
else:
|
||||||
capture_ffmpeg_real_time(ffmpeg_cmd, "[cyan]Join audio")
|
capture_ffmpeg_real_time(ffmpeg_cmd, "[cyan]Join audio")
|
||||||
print()
|
print()
|
||||||
|
|
||||||
def join_subtitle(video_path: str, subtitles_list: List[Dict[str, str]], output_file: str):
|
|
||||||
|
# Check file
|
||||||
|
if CHECK_OUTPUT_CONVERSION:
|
||||||
|
time.sleep(0.5)
|
||||||
|
check_ffmpeg_input(out_path)
|
||||||
|
|
||||||
|
|
||||||
|
def join_subtitle(video_path: str, subtitles_list: List[Dict[str, str]], out_path: str):
|
||||||
"""
|
"""
|
||||||
Joins subtitles with a video file using FFmpeg.
|
Joins subtitles with a video file using FFmpeg.
|
||||||
|
|
||||||
@ -366,7 +381,7 @@ def join_subtitle(video_path: str, subtitles_list: List[Dict[str, str]], output_
|
|||||||
- video (str): The path to the video file.
|
- video (str): The path to the video file.
|
||||||
- subtitles_list (list[dict[str, str]]): A list of dictionaries containing information about subtitles.
|
- subtitles_list (list[dict[str, str]]): A list of dictionaries containing information about subtitles.
|
||||||
Each dictionary should contain the 'path' key with the path to the subtitle file and the 'name' key with the name of the subtitle.
|
Each dictionary should contain the 'path' key with the path to the subtitle file and the 'name' key with the name of the subtitle.
|
||||||
- output_file (str): The path to save the output file.
|
- out_path (str): The path to save the output file.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not check_file_existence(video_path):
|
if not check_file_existence(video_path):
|
||||||
@ -399,12 +414,17 @@ def join_subtitle(video_path: str, subtitles_list: List[Dict[str, str]], output_
|
|||||||
ffmpeg_cmd.extend(['-c', 'copy', '-c:s', 'mov_text'])
|
ffmpeg_cmd.extend(['-c', 'copy', '-c:s', 'mov_text'])
|
||||||
|
|
||||||
# Overwrite
|
# Overwrite
|
||||||
ffmpeg_cmd += [output_file, "-y"]
|
ffmpeg_cmd += [out_path, "-y"]
|
||||||
logging.info(f"FFmpeg command: {ffmpeg_cmd}")
|
logging.info(f"FFmpeg command: {ffmpeg_cmd}")
|
||||||
|
|
||||||
# --> Run
|
# Run join
|
||||||
if DEBUG_MODE:
|
if DEBUG_MODE:
|
||||||
subprocess.run(ffmpeg_cmd, check=True)
|
subprocess.run(ffmpeg_cmd, check=True)
|
||||||
else:
|
else:
|
||||||
capture_ffmpeg_real_time(ffmpeg_cmd, "[cyan]Join subtitle")
|
capture_ffmpeg_real_time(ffmpeg_cmd, "[cyan]Join subtitle")
|
||||||
print()
|
print()
|
||||||
|
|
||||||
|
# Check file
|
||||||
|
if CHECK_OUTPUT_CONVERSION:
|
||||||
|
time.sleep(0.5)
|
||||||
|
check_ffmpeg_input(out_path)
|
||||||
|
@ -141,6 +141,7 @@ def get_ffprobe_info(file_path):
|
|||||||
logging.error(f"Failed to parse JSON output from ffprobe for file {file_path}: {e}")
|
logging.error(f"Failed to parse JSON output from ffprobe for file {file_path}: {e}")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def is_png_format_or_codec(file_info):
|
def is_png_format_or_codec(file_info):
|
||||||
"""
|
"""
|
||||||
Check if the format is 'png_pipe' or if any codec is 'png'.
|
Check if the format is 'png_pipe' or if any codec is 'png'.
|
||||||
@ -155,6 +156,7 @@ def is_png_format_or_codec(file_info):
|
|||||||
return False
|
return False
|
||||||
return file_info['format_name'] == 'png_pipe' or 'png' in file_info['codec_names']
|
return file_info['format_name'] == 'png_pipe' or 'png' in file_info['codec_names']
|
||||||
|
|
||||||
|
|
||||||
def need_to_force_to_ts(file_path):
|
def need_to_force_to_ts(file_path):
|
||||||
"""
|
"""
|
||||||
Get if a file to TS format if it is in PNG format or contains a PNG codec.
|
Get if a file to TS format if it is in PNG format or contains a PNG codec.
|
||||||
@ -168,3 +170,43 @@ def need_to_force_to_ts(file_path):
|
|||||||
if is_png_format_or_codec(file_info):
|
if is_png_format_or_codec(file_info):
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def check_ffmpeg_input(input_file):
|
||||||
|
"""
|
||||||
|
Check if an input file can be processed by FFmpeg.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
input_file (str): Path to the input file.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if the input file is valid and can be processed by FFmpeg, False otherwise.
|
||||||
|
"""
|
||||||
|
command = [
|
||||||
|
'ffmpeg', '-v', 'error', '-i', input_file, '-f', 'null', '-'
|
||||||
|
]
|
||||||
|
logging.info(f"FFmpeg command check: {command}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Run the FFmpeg command and capture output
|
||||||
|
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
|
||||||
|
# Check the exit status
|
||||||
|
if result.returncode != 0:
|
||||||
|
logging.error("FFmpeg encountered an error with the input file:")
|
||||||
|
logging.error(result.stderr.decode('utf-8'))
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Optionally, you can analyze the output to check for specific errors
|
||||||
|
stderr_output = result.stderr.decode('utf-8')
|
||||||
|
if 'error' in stderr_output.lower():
|
||||||
|
logging.error("FFmpeg reported an error in the input file:")
|
||||||
|
logging.error(stderr_output)
|
||||||
|
return False
|
||||||
|
|
||||||
|
logging.info(f"Input file is valid: {input_file}")
|
||||||
|
return True
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"An unexpected error occurred: {e}")
|
||||||
|
return False
|
||||||
|
@ -22,7 +22,6 @@ from Src.Util.os import (
|
|||||||
reduce_base_name,
|
reduce_base_name,
|
||||||
remove_special_characters
|
remove_special_characters
|
||||||
)
|
)
|
||||||
from Src.Lib.Unidecode import transliterate
|
|
||||||
from Src.Util.file_validator import can_create_file
|
from Src.Util.file_validator import can_create_file
|
||||||
|
|
||||||
|
|
||||||
@ -42,6 +41,10 @@ from .segments import M3U8_Segments
|
|||||||
from ..E_Table import report_table
|
from ..E_Table import report_table
|
||||||
|
|
||||||
|
|
||||||
|
# External library
|
||||||
|
from unidecode import unidecode as transliterate
|
||||||
|
|
||||||
|
|
||||||
# Config
|
# Config
|
||||||
DOWNLOAD_SPECIFIC_AUDIO = config_manager.get_list('M3U8_FILTER', 'specific_list_audio')
|
DOWNLOAD_SPECIFIC_AUDIO = config_manager.get_list('M3U8_FILTER', 'specific_list_audio')
|
||||||
DOWNLOAD_SPECIFIC_SUBTITLE = config_manager.get_list('M3U8_FILTER', 'specific_list_subtitles')
|
DOWNLOAD_SPECIFIC_SUBTITLE = config_manager.get_list('M3U8_FILTER', 'specific_list_subtitles')
|
||||||
@ -449,6 +452,12 @@ class Downloader():
|
|||||||
- out_path (str): Path of the output file.
|
- out_path (str): Path of the output file.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# Check if file to rename exist
|
||||||
|
logging.info(f"Check if end file converted exist: {out_path}")
|
||||||
|
if not os.path.exists(out_path):
|
||||||
|
logging.info("Video file converted not exist.")
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
# Rename the output file to the desired output filename if not exist
|
# Rename the output file to the desired output filename if not exist
|
||||||
if not os.path.exists(self.output_filename):
|
if not os.path.exists(self.output_filename):
|
||||||
os.rename(out_path, self.output_filename)
|
os.rename(out_path, self.output_filename)
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
# 20.02.24
|
# 20.02.24
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
from collections import deque
|
from collections import deque
|
||||||
|
|
||||||
|
|
||||||
@ -34,10 +36,14 @@ class M3U8_Ts_Estimator:
|
|||||||
Add a file size to the list of file sizes.
|
Add a file size to the list of file sizes.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
- size (float): The size of the ts file to be added.
|
- size (int): The size of the ts file to be added.
|
||||||
- size_download (int): Single size of the ts file.
|
- size_download (int): Single size of the ts file.
|
||||||
- duration (float): Time to download segment file.
|
- duration (float): Time to download segment file.
|
||||||
"""
|
"""
|
||||||
|
if size <= 0 or size_download <= 0 or duration <= 0:
|
||||||
|
logging.error("Invalid input values: size=%d, size_download=%d, duration=%f", size, size_download, duration)
|
||||||
|
return
|
||||||
|
|
||||||
self.ts_file_sizes.append(size)
|
self.ts_file_sizes.append(size)
|
||||||
self.now_downloaded_size += size_download
|
self.now_downloaded_size += size_download
|
||||||
|
|
||||||
@ -45,8 +51,14 @@ class M3U8_Ts_Estimator:
|
|||||||
if len(self.smoothed_speeds) <= 3:
|
if len(self.smoothed_speeds) <= 3:
|
||||||
size_download = size_download / self.tqdm_workers
|
size_download = size_download / self.tqdm_workers
|
||||||
|
|
||||||
# Calculate mbps
|
try:
|
||||||
speed_mbps = (size_download * 8) / (duration * 1_000_000) * self.tqdm_workers
|
# Calculate mbps
|
||||||
|
speed_mbps = (size_download * 8) / (duration * 1_000_000) * self.tqdm_workers
|
||||||
|
|
||||||
|
except ZeroDivisionError as e:
|
||||||
|
logging.error("Division by zero error while calculating speed: %s", e)
|
||||||
|
return
|
||||||
|
|
||||||
self.list_speeds.append(speed_mbps)
|
self.list_speeds.append(speed_mbps)
|
||||||
|
|
||||||
# Calculate moving average
|
# Calculate moving average
|
||||||
@ -62,17 +74,25 @@ class M3U8_Ts_Estimator:
|
|||||||
Calculate the total size of the files.
|
Calculate the total size of the files.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
float: The mean size of the files in a human-readable format.
|
str: The mean size of the files in a human-readable format.
|
||||||
"""
|
"""
|
||||||
|
try:
|
||||||
|
if len(self.ts_file_sizes) == 0:
|
||||||
|
raise ValueError("No file sizes available to calculate total size.")
|
||||||
|
|
||||||
if len(self.ts_file_sizes) == 0:
|
total_size = sum(self.ts_file_sizes)
|
||||||
return 0
|
mean_size = total_size / len(self.ts_file_sizes)
|
||||||
|
|
||||||
total_size = sum(self.ts_file_sizes)
|
# Return formatted mean size
|
||||||
mean_size = total_size / len(self.ts_file_sizes)
|
return format_size(mean_size)
|
||||||
|
|
||||||
# Return format mean
|
except ZeroDivisionError as e:
|
||||||
return format_size(mean_size)
|
logging.error("Division by zero error occurred: %s", e)
|
||||||
|
return "0B"
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logging.error("An unexpected error occurred: %s", e)
|
||||||
|
return "Error"
|
||||||
|
|
||||||
def get_average_speed(self) -> float:
|
def get_average_speed(self) -> float:
|
||||||
"""
|
"""
|
||||||
|
@ -1,37 +0,0 @@
|
|||||||
# 16.05.24
|
|
||||||
|
|
||||||
from unidecode import unidecode
|
|
||||||
|
|
||||||
|
|
||||||
# Internal utilities
|
|
||||||
from .alphabet import alpha_mappings
|
|
||||||
from .symbols import symbols_mapping
|
|
||||||
from .math_symbol import math_symbols_mapping
|
|
||||||
from .misc_symbols import misc_symbols_mapping
|
|
||||||
from .quantifiers import quantifiers_mapping
|
|
||||||
from .geometry import geometry_mapping
|
|
||||||
from .additional_math import additional_math_mapping
|
|
||||||
from .currency import currency_mapping
|
|
||||||
from .units_of_measurement import units_of_measurement_mapping
|
|
||||||
from .other import miscellaneous_symbols_mapping
|
|
||||||
|
|
||||||
|
|
||||||
all_mappings = {
|
|
||||||
**alpha_mappings,
|
|
||||||
**symbols_mapping,
|
|
||||||
**math_symbols_mapping,
|
|
||||||
**misc_symbols_mapping,
|
|
||||||
**quantifiers_mapping,
|
|
||||||
**geometry_mapping,
|
|
||||||
**additional_math_mapping,
|
|
||||||
**currency_mapping,
|
|
||||||
**units_of_measurement_mapping,
|
|
||||||
**miscellaneous_symbols_mapping
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def transliterate(text):
|
|
||||||
translated_text = ''.join(all_mappings.get(c, c) for c in text)
|
|
||||||
transliterated_text = unidecode(translated_text)
|
|
||||||
return transliterated_text
|
|
@ -1,10 +0,0 @@
|
|||||||
# 16.05.24
|
|
||||||
|
|
||||||
additional_math_mapping = {
|
|
||||||
'∛': 'cube root',
|
|
||||||
'∜': 'fourth root',
|
|
||||||
'⊛': 'circled times',
|
|
||||||
'⊚': 'circled dot',
|
|
||||||
'⊡': 'circled times',
|
|
||||||
'⊙': 'circled dot'
|
|
||||||
}
|
|
@ -1,141 +0,0 @@
|
|||||||
# 16.05.24
|
|
||||||
|
|
||||||
alpha_mappings = {
|
|
||||||
|
|
||||||
# Latin Alphabet
|
|
||||||
'À': 'A', 'Á': 'A', 'Â': 'A', 'Ã': 'A', 'Ä': 'A', 'Å': 'A', 'Æ': 'AE',
|
|
||||||
'Ç': 'C', 'È': 'E', 'É': 'E', 'Ê': 'E', 'Ë': 'E',
|
|
||||||
'Ì': 'I', 'Í': 'I', 'Î': 'I', 'Ï': 'I',
|
|
||||||
'Ð': 'D', 'Ñ': 'N',
|
|
||||||
'Ò': 'O', 'Ó': 'O', 'Ô': 'O', 'Õ': 'O', 'Ö': 'O', 'Ø': 'O',
|
|
||||||
'Ù': 'U', 'Ú': 'U', 'Û': 'U', 'Ü': 'U',
|
|
||||||
'Ý': 'Y',
|
|
||||||
'Þ': 'TH',
|
|
||||||
'ß': 'ss',
|
|
||||||
'à': 'a', 'á': 'a', 'â': 'a', 'ã': 'a', 'ä': 'a', 'å': 'a', 'æ': 'ae',
|
|
||||||
'ç': 'c', 'è': 'e', 'é': 'e', 'ê': 'e', 'ë': 'e',
|
|
||||||
'ì': 'i', 'í': 'i', 'î': 'i', 'ï': 'i',
|
|
||||||
'ð': 'd', 'ñ': 'n',
|
|
||||||
'ò': 'o', 'ó': 'o', 'ô': 'o', 'õ': 'o', 'ö': 'o', 'ø': 'o',
|
|
||||||
'ù': 'u', 'ú': 'u', 'û': 'u', 'ü': 'u',
|
|
||||||
'ý': 'y', 'þ': 'th', 'ÿ': 'y',
|
|
||||||
'Ā': 'A', 'ā': 'a', 'Ă': 'A', 'ă': 'a', 'Ą': 'A', 'ą': 'a',
|
|
||||||
'Ć': 'C', 'ć': 'c', 'Ĉ': 'C', 'ĉ': 'c', 'Ċ': 'C', 'ċ': 'c', 'Č': 'C', 'č': 'c',
|
|
||||||
'Ď': 'D', 'ď': 'd', 'Đ': 'D', 'đ': 'd',
|
|
||||||
'Ē': 'E', 'ē': 'e', 'Ĕ': 'E', 'ĕ': 'e', 'Ė': 'E', 'ė': 'e', 'Ę': 'E', 'ę': 'e', 'Ě': 'E', 'ě': 'e',
|
|
||||||
'Ĝ': 'G', 'ĝ': 'g', 'Ğ': 'G', 'ğ': 'g', 'Ġ': 'G', 'ġ': 'g', 'Ģ': 'G', 'ģ': 'g',
|
|
||||||
'Ĥ': 'H', 'ĥ': 'h', 'Ħ': 'H', 'ħ': 'h',
|
|
||||||
'Ĩ': 'I', 'ĩ': 'i', 'Ī': 'I', 'ī': 'i', 'Ĭ': 'I', 'ĭ': 'i', 'Į': 'I', 'į': 'i',
|
|
||||||
'İ': 'I', 'ı': 'i',
|
|
||||||
'IJ': 'IJ', 'ij': 'ij',
|
|
||||||
'Ĵ': 'J', 'ĵ': 'j',
|
|
||||||
'Ķ': 'K', 'ķ': 'k', 'ĸ': 'k',
|
|
||||||
'Ĺ': 'L', 'ĺ': 'l', 'Ļ': 'L', 'ļ': 'l', 'Ľ': 'L', 'ľ': 'l', 'Ŀ': 'L', 'ŀ': 'l', 'Ł': 'L', 'ł': 'l',
|
|
||||||
'Ń': 'N', 'ń': 'n', 'Ņ': 'N', 'ņ': 'n', 'Ň': 'N', 'ň': 'n', 'ʼn': 'n', 'Ŋ': 'N', 'ŋ': 'n',
|
|
||||||
'Ō': 'O', 'ō': 'o', 'Ŏ': 'O', 'ŏ': 'o', 'Ő': 'O', 'ő': 'o',
|
|
||||||
'Œ': 'OE', 'œ': 'oe',
|
|
||||||
'Ŕ': 'R', 'ŕ': 'r', 'Ŗ': 'R', 'ŗ': 'r', 'Ř': 'R', 'ř': 'r',
|
|
||||||
'Ś': 'S', 'ś': 's', 'Ŝ': 'S', 'ŝ': 's', 'Ş': 'S', 'ş': 's', 'Š': 'S', 'š': 's',
|
|
||||||
'Ţ': 'T', 'ţ': 't', 'Ť': 'T', 'ť': 't', 'Ŧ': 'T', 'ŧ': 't',
|
|
||||||
'Ũ': 'U', 'ũ': 'u', 'Ū': 'U', 'ū': 'u', 'Ŭ': 'U', 'ŭ': 'u', 'Ů': 'U', 'ů': 'u',
|
|
||||||
'Ű': 'U', 'ű': 'u', 'Ų': 'U', 'ų': 'u',
|
|
||||||
'Ŵ': 'W', 'ŵ': 'w',
|
|
||||||
'Ŷ': 'Y', 'ŷ': 'y', 'Ÿ': 'Y',
|
|
||||||
'Ź': 'Z', 'ź': 'z', 'Ż': 'Z', 'ż': 'z', 'Ž': 'Z', 'ž': 'z',
|
|
||||||
'ƒ': 'f',
|
|
||||||
'Ơ': 'O', 'ơ': 'o', 'Ư': 'U', 'ư': 'u',
|
|
||||||
'Ǎ': 'A', 'ǎ': 'a', 'Ǐ': 'I', 'ǐ': 'i', 'Ǒ': 'O', 'ǒ': 'o', 'Ǔ': 'U', 'ǔ': 'u',
|
|
||||||
'Ǖ': 'U', 'ǖ': 'u', 'Ǘ': 'U', 'ǘ': 'u', 'Ǚ': 'U', 'ǚ': 'u', 'Ǜ': 'U', 'ǜ': 'u',
|
|
||||||
'Ǻ': 'A', 'ǻ': 'a', 'Ǽ': 'AE', 'ǽ': 'ae', 'Ǿ': 'O', 'ǿ': 'o',
|
|
||||||
|
|
||||||
# Cyrillic Alphabet
|
|
||||||
'А': 'A', 'Б': 'B', 'В': 'V', 'Г': 'G', 'Д': 'D', 'Е': 'E', 'Ё': 'E', 'Ж': 'Zh', 'З': 'Z', 'И': 'I',
|
|
||||||
'Й': 'Y', 'К': 'K', 'Л': 'L', 'М': 'M', 'Н': 'N', 'О': 'O', 'П': 'P', 'Р': 'R', 'С': 'S', 'Т': 'T',
|
|
||||||
'У': 'U', 'Ф': 'F', 'Х': 'Kh', 'Ц': 'Ts', 'Ч': 'Ch', 'Ш': 'Sh', 'Щ': 'Shch', 'Ъ': '', 'Ы': 'Y',
|
|
||||||
'Ь': '', 'Э': 'E', 'Ю': 'Yu', 'Я': 'Ya', 'а': 'a', 'б': 'b', 'в': 'v', 'г': 'g', 'д': 'd', 'е': 'e',
|
|
||||||
'ё': 'e', 'ж': 'zh', 'з': 'z', 'и': 'i', 'й': 'y', 'к': 'k', 'л': 'l', 'м': 'm', 'н': 'n', 'о': 'o',
|
|
||||||
'п': 'p', 'р': 'r', 'с': 's', 'т': 't', 'у': 'u', 'ф': 'f', 'х': 'kh', 'ц': 'ts', 'ч': 'ch', 'ш': 'sh',
|
|
||||||
'щ': 'shch', 'ъ': '', 'ы': 'y', 'ь': '', 'э': 'e', 'ю': 'yu', 'я': 'ya',
|
|
||||||
|
|
||||||
# Greek Alphabet
|
|
||||||
'Α': 'A', 'Β': 'B', 'Γ': 'G', 'Δ': 'D', 'Ε': 'E', 'Ζ': 'Z', 'Η': 'E', 'Θ': 'Th', 'Ι': 'I', 'Κ': 'K',
|
|
||||||
'Λ': 'L', 'Μ': 'M', 'Ν': 'N', 'Ξ': 'X', 'Ο': 'O', 'Π': 'P', 'Ρ': 'R', 'Σ': 'S', 'Τ': 'T', 'Υ': 'Y',
|
|
||||||
'Φ': 'Ph', 'Χ': 'Ch', 'Ψ': 'Ps', 'Ω': 'O', 'α': 'a', 'β': 'b', 'γ': 'g', 'δ': 'd', 'ε': 'e', 'ζ': 'z',
|
|
||||||
'η': 'e', 'θ': 'th', 'ι': 'i', 'κ': 'k', 'λ': 'l', 'μ': 'm', 'ν': 'n', 'ξ': 'x', 'ο': 'o', 'π': 'p',
|
|
||||||
'ρ': 'r', 'σ': 's', 'τ': 't', 'υ': 'y', 'φ': 'ph', 'χ': 'ch', 'ψ': 'ps', 'ω': 'o',
|
|
||||||
|
|
||||||
# Arabic Alphabet
|
|
||||||
'ا': 'a', 'ب': 'b', 'ت': 't', 'ث': 'th', 'ج': 'j', 'ح': 'h', 'خ': 'kh', 'د': 'd', 'ذ': 'dh', 'ر': 'r',
|
|
||||||
'ز': 'z', 'س': 's', 'ش': 'sh', 'ص': 's', 'ض': 'd', 'ط': 't', 'ظ': 'z', 'ع': 'a', 'غ': 'gh', 'ف': 'f',
|
|
||||||
'ق': 'q', 'ك': 'k', 'ل': 'l', 'م': 'm', 'ن': 'n', 'ه': 'h', 'و': 'w', 'ي': 'y', 'ء': "'", 'آ': 'a',
|
|
||||||
'أ': 'a', 'ؤ': 'w', 'إ': 'i', 'ئ': 'y', 'ى': 'a', 'ة': 't',
|
|
||||||
|
|
||||||
# Hebrew Alphabet
|
|
||||||
'א': 'a', 'ב': 'b', 'ג': 'g', 'ד': 'd', 'ה': 'h', 'ו': 'v', 'ז': 'z', 'ח': 'h', 'ט': 't', 'י': 'y',
|
|
||||||
'כ': 'k', 'ל': 'l', 'מ': 'm', 'נ': 'n', 'ס': 's', 'ע': 'a', 'פ': 'p', 'צ': 'ts', 'ק': 'k', 'ר': 'r',
|
|
||||||
'ש': 'sh', 'ת': 't', 'ך': 'k', 'ם': 'm', 'ן': 'n', 'ף': 'p', 'ץ': 'ts',
|
|
||||||
|
|
||||||
# Japanese (Kana)
|
|
||||||
'あ': 'a', 'い': 'i', 'う': 'u', 'え': 'e', 'お': 'o', 'か': 'ka', 'き': 'ki', 'く': 'ku', 'け': 'ke', 'こ': 'ko',
|
|
||||||
'ア': 'a', 'イ': 'i', 'ウ': 'u', 'エ': 'e', 'オ': 'o', 'カ': 'ka', 'キ': 'ki', 'ク': 'ku', 'ケ': 'ke', 'コ': 'ko',
|
|
||||||
|
|
||||||
# Korean (Hangul)
|
|
||||||
'가': 'ga', '나': 'na', '다': 'da', '라': 'ra', '마': 'ma', '바': 'ba', '사': 'sa', '아': 'a', '자': 'ja', '차': 'cha',
|
|
||||||
'카': 'ka', '타': 'ta', '파': 'pa', '하': 'ha', '각': 'gak', '낙': 'nak', '닥': 'dak', '락': 'rak', '막': 'mak', '박': 'bak',
|
|
||||||
'삭': 'sak', '악': 'ak', '작': 'jak', '착': 'chak', '캄': 'kam', '탐': 'tam', '팜': 'pam', '함': 'ham',
|
|
||||||
|
|
||||||
# Chinese (Pinyin)
|
|
||||||
'阿': 'a', '爸': 'ba', '从': 'cong', '的': 'de', '额': 'e', '发': 'fa', '个': 'ge', '和': 'he', '你': 'ni', '我': 'wo',
|
|
||||||
'是': 'shi', '不': 'bu', '了': 'le', '有': 'you', '他': 'ta', '这': 'zhe', '那': 'na', '她': 'ta', '很': 'hen', '吗': 'ma',
|
|
||||||
|
|
||||||
# Devanagari (Hindi)
|
|
||||||
'अ': 'a', 'आ': 'aa', 'इ': 'i', 'ई': 'ii', 'उ': 'u', 'ऊ': 'uu', 'ए': 'e', 'ऐ': 'ai', 'ओ': 'o', 'औ': 'au',
|
|
||||||
'क': 'ka', 'ख': 'kha', 'ग': 'ga', 'घ': 'gha', 'च': 'cha', 'छ': 'chha', 'ज': 'ja', 'झ': 'jha', 'ट': 'ta', 'ठ': 'tha',
|
|
||||||
'ड': 'da', 'ढ': 'dha', 'ण': 'na', 'त': 'ta', 'थ': 'tha', 'द': 'da', 'ध': 'dha', 'न': 'na', 'प': 'pa', 'फ': 'fa',
|
|
||||||
'ब': 'ba', 'भ': 'bha', 'म': 'ma', 'य': 'ya', 'र': 'ra', 'ल': 'la', 'व': 'va', 'श': 'sha', 'ष': 'sha', 'स': 'sa',
|
|
||||||
'ह': 'ha',
|
|
||||||
|
|
||||||
# Thai Alphabet
|
|
||||||
'ก': 'k', 'ข': 'kh', 'ฃ': 'kh', 'ค': 'kh', 'ฅ': 'kh', 'ฆ': 'kh', 'ง': 'ng', 'จ': 'ch', 'ฉ': 'ch', 'ช': 'ch',
|
|
||||||
'ซ': 's', 'ฌ': 'ch', 'ญ': 'y', 'ฎ': 'd', 'ฏ': 't', 'ฐ': 'th', 'ฑ': 'th', 'ฒ': 'th', 'ณ': 'n', 'ด': 'd',
|
|
||||||
'ต': 't', 'ถ': 'th', 'ท': 'th', 'ธ': 'th', 'น': 'n', 'บ': 'b', 'ป': 'p', 'ผ': 'ph', 'ฝ': 'f', 'พ': 'ph',
|
|
||||||
'ฟ': 'f', 'ภ': 'ph', 'ม': 'm', 'ย': 'y', 'ร': 'r', 'ฤ': 'rue', 'ล': 'l', 'ฦ': 'lue', 'ว': 'w', 'ศ': 's',
|
|
||||||
'ษ': 's', 'ส': 's', 'ห': 'h', 'ฬ': 'l', 'อ': 'o', 'ฮ': 'h',
|
|
||||||
|
|
||||||
# Vietnamit Alphabet
|
|
||||||
'À': 'A', 'Á': 'A', 'Â': 'A', 'Ã': 'A', 'Ä': 'A', 'Å': 'A', 'Æ': 'AE', 'Ă': 'A', 'Ắ': 'A', 'Ằ': 'A', 'Ẳ': 'A', 'Ẵ': 'A', 'Ặ': 'A',
|
|
||||||
'Ấ': 'A', 'Ầ': 'A', 'Ẩ': 'A', 'Ẫ': 'A', 'Ậ': 'A',
|
|
||||||
'Đ': 'D',
|
|
||||||
'È': 'E', 'É': 'E', 'Ê': 'E', 'Ẽ': 'E', 'Ë': 'E',
|
|
||||||
'Ề': 'E', 'Ế': 'E', 'Ễ': 'E', 'Ể': 'E', 'Ệ': 'E',
|
|
||||||
'Ì': 'I', 'Í': 'I', 'Ĩ': 'I', 'Ï': 'I',
|
|
||||||
'Ỉ': 'I', 'Ị': 'I',
|
|
||||||
'Ồ': 'O', 'Ố': 'O', 'Ỗ': 'O', 'Ổ': 'O', 'Ộ': 'O',
|
|
||||||
'Ò': 'O', 'Ó': 'O', 'Ô': 'O', 'Õ': 'O', 'Ö': 'O',
|
|
||||||
'Ờ': 'O', 'Ớ': 'O', 'Ỡ': 'O', 'Ở': 'O', 'Ợ': 'O',
|
|
||||||
'Ơ': 'O', 'Ớ': 'O', 'Ờ': 'O', 'Ở': 'O', 'Ỡ': 'O', 'Ợ': 'O',
|
|
||||||
'Ù': 'U', 'Ú': 'U', 'Ũ': 'U', 'Ụ': 'U',
|
|
||||||
'Ủ': 'U', 'Ứ': 'U', 'Ừ': 'U', 'Ử': 'U', 'Ữ': 'U', 'Ự': 'U',
|
|
||||||
'Ỳ': 'Y', 'Ý': 'Y', 'Ỹ': 'Y', 'Ỷ': 'Y', 'Ỵ': 'Y',
|
|
||||||
'à': 'a', 'á': 'a', 'â': 'a', 'ã': 'a', 'ä': 'a', 'å': 'a', 'æ': 'ae',
|
|
||||||
'ă': 'a', 'ắ': 'a', 'ằ': 'a', 'ẳ': 'a', 'ẵ': 'a', 'ặ': 'a',
|
|
||||||
'ấ': 'a', 'ầ': 'a', 'ẩ': 'a', 'ẫ': 'a', 'ậ': 'a',
|
|
||||||
'đ': 'd',
|
|
||||||
'è': 'e', 'é': 'e', 'ê': 'e', 'ẽ': 'e', 'ë': 'e',
|
|
||||||
'ề': 'e', 'ế': 'e', 'ễ': 'e', 'ể': 'e', 'ệ': 'e',
|
|
||||||
'ì': 'i', 'í': 'i', 'ĩ': 'i', 'ï': 'i',
|
|
||||||
'ỉ': 'i', 'ị': 'i',
|
|
||||||
'ò': 'o', 'ó': 'o', 'ô': 'o', 'õ': 'o', 'ö': 'o',
|
|
||||||
'ồ': 'o', 'ố': 'o', 'ỗ': 'o', 'ổ': 'o', 'ộ': 'o',
|
|
||||||
'ờ': 'o', 'ớ': 'o', 'ỡ': 'o', 'ở': 'o', 'ợ': 'o',
|
|
||||||
'ơ': 'o', 'ớ': 'o', 'ờ': 'o', 'ở': 'o', 'ỡ': 'o', 'ợ': 'o',
|
|
||||||
'ù': 'u', 'ú': 'u', 'ũ': 'u', 'ụ': 'u',
|
|
||||||
'ủ': 'u', 'ứ': 'u', 'ừ': 'u', 'ử': 'u', 'ữ': 'u', 'ự': 'u',
|
|
||||||
'ỳ': 'y', 'ý': 'y', 'ỹ': 'y', 'ỷ': 'y', 'ỵ': 'y',
|
|
||||||
|
|
||||||
# Devanagari
|
|
||||||
'अ': 'a', 'आ': 'aa', 'इ': 'i', 'ई': 'ii', 'उ': 'u', 'ऊ': 'uu', 'ए': 'e', 'ऐ': 'ai', 'ओ': 'o', 'औ': 'au', 'क': 'ka', 'ख': 'kha', 'ग': 'ga', 'घ': 'gha', 'ङ': 'nga', 'च': 'cha',
|
|
||||||
'छ': 'chha', 'ज': 'ja', 'झ': 'jha', 'ञ': 'nya', 'ट': 'ta', 'ठ': 'tha', 'ड': 'da', 'ढ': 'dha', 'ण': 'na', 'त': 'ta', 'थ': 'tha', 'द': 'da', 'ध': 'dha', 'न': 'na', 'प': 'pa',
|
|
||||||
'फ': 'pha', 'ब': 'ba', 'भ': 'bha', 'म': 'ma', 'य': 'ya', 'र': 'ra', 'ल': 'la', 'व': 'va', 'श': 'sha', 'ष': 'ssha', 'स': 'sa', 'ह': 'ha', 'ा': 'a',
|
|
||||||
|
|
||||||
}
|
|
@ -1,22 +0,0 @@
|
|||||||
# 16.05.24
|
|
||||||
|
|
||||||
currency_mapping = {
|
|
||||||
'₿': 'bitcoin',
|
|
||||||
'₵': 'cedi',
|
|
||||||
'¢': 'cent',
|
|
||||||
'₡': 'colon',
|
|
||||||
'₢': 'cruzeiro',
|
|
||||||
'₫': 'dong',
|
|
||||||
'֏': 'dram',
|
|
||||||
'€': 'euro',
|
|
||||||
'₯': 'drachma',
|
|
||||||
'₣': 'franc',
|
|
||||||
'₤': 'lira',
|
|
||||||
'₾': 'lari',
|
|
||||||
'₨': 'rupee',
|
|
||||||
'৳': 'taka',
|
|
||||||
'₮': 'tugrik',
|
|
||||||
'₩': 'won',
|
|
||||||
'¥': 'yen',
|
|
||||||
'₴': 'hryvnia'
|
|
||||||
}
|
|
@ -1,9 +0,0 @@
|
|||||||
# 16.05.24
|
|
||||||
|
|
||||||
geometry_mapping = {
|
|
||||||
'∠': 'angle',
|
|
||||||
'∟': 'right angle',
|
|
||||||
'∥': 'parallel',
|
|
||||||
'⊥': 'perpendicular',
|
|
||||||
'∥': 'parallel to'
|
|
||||||
}
|
|
@ -1,8 +0,0 @@
|
|||||||
# 16.05.24
|
|
||||||
|
|
||||||
math_symbols_mapping = {
|
|
||||||
'≡': '===', '≠': '!=', '≈': '~~', '≪': '<<', '≫': '>>',
|
|
||||||
'⊂': 'sub', '⊃': 'sup', '⊆': 'subeq', '⊇': 'supeq',
|
|
||||||
'∪': 'U', '∩': 'n', '∅': 'empty', '∈': 'in', '∉': 'notin', '∀': 'forall', '∃': 'exists',
|
|
||||||
'⊥': 'perp', '∠': 'angle', '∟': 'langle', '∣': 'vert', '∴': 'therefore'
|
|
||||||
}
|
|
@ -1,12 +0,0 @@
|
|||||||
# 16.05.24
|
|
||||||
|
|
||||||
misc_symbols_mapping = {
|
|
||||||
'↻': 'clockwise',
|
|
||||||
'↺': 'counterclockwise',
|
|
||||||
'⇒': 'implies',
|
|
||||||
'⇐': 'is implied by',
|
|
||||||
'⇑': 'upward',
|
|
||||||
'⇓': 'downward',
|
|
||||||
'∽': 'similar to',
|
|
||||||
'∼': 'approximately'
|
|
||||||
}
|
|
@ -1,5 +0,0 @@
|
|||||||
# 16.05.24
|
|
||||||
|
|
||||||
miscellaneous_symbols_mapping = {
|
|
||||||
'ộ': 'o',
|
|
||||||
}
|
|
@ -1,6 +0,0 @@
|
|||||||
# 16.05.24
|
|
||||||
|
|
||||||
quantifiers_mapping = {
|
|
||||||
'∀': 'for all',
|
|
||||||
'∃': 'there exists'
|
|
||||||
}
|
|
@ -1,11 +0,0 @@
|
|||||||
# 16.05.24
|
|
||||||
|
|
||||||
symbols_mapping = {
|
|
||||||
'→': '-', '←': '-', '↑': '^', '↓': 'v',
|
|
||||||
'↔': '<->', '↕': '||', '⇄': '<=>', '⇅': '||',
|
|
||||||
'°': 'deg', '±': '+/-', 'µ': 'u', '×': 'x', '÷': '/',
|
|
||||||
'≤': '<=', '≥': '>=', '≠': '!=', '≈': '~=', '∑': 'sum', '∏': 'prod',
|
|
||||||
'√': 'sqrt', '∞': 'inf', '∫': 'int', '∇': 'grad', '∂': 'd', '∴': 'therefore', '∵': 'because',
|
|
||||||
'¬': 'not', '∧': 'and', '∨': 'or', '⊕': 'xor', '⊗': 'times',
|
|
||||||
'α': 'alpha', 'β': 'beta', 'γ': 'gamma', 'δ': 'delta', 'ε': 'epsilon', 'ζ': 'zeta', 'η': 'eta', 'θ': 'theta', 'ι': 'iota', 'κ': 'kappa', 'λ': 'lambda', 'μ': 'mu', 'ν': 'nu', 'ξ': 'xi', 'ο': 'omicron', 'π': 'pi', 'ρ': 'rho', 'σ': 'sigma', 'τ': 'tau', 'υ': 'upsilon', 'φ': 'phi', 'χ': 'chi', 'ψ': 'psi', 'ω': 'omega'
|
|
||||||
}
|
|
@ -1,16 +0,0 @@
|
|||||||
# 16.05.24
|
|
||||||
|
|
||||||
units_of_measurement_mapping = {
|
|
||||||
'℃': 'degrees Celsius',
|
|
||||||
'℉': 'degrees Fahrenheit',
|
|
||||||
'°C': 'degrees Celsius',
|
|
||||||
'°F': 'degrees Fahrenheit',
|
|
||||||
'm²': 'square meters',
|
|
||||||
'm³': 'cubic meters',
|
|
||||||
'cm²': 'square centimeters',
|
|
||||||
'cm³': 'cubic centimeters',
|
|
||||||
'mm²': 'square millimeters',
|
|
||||||
'mm³': 'cubic millimeters',
|
|
||||||
'km²': 'square kilometers',
|
|
||||||
'km³': 'cubic kilometers',
|
|
||||||
}
|
|
@ -325,6 +325,9 @@ def format_size(size_bytes: float) -> str:
|
|||||||
str: The formatted size.
|
str: The formatted size.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
if size_bytes <= 0:
|
||||||
|
return "0B"
|
||||||
|
|
||||||
units = ['B', 'KB', 'MB', 'GB', 'TB']
|
units = ['B', 'KB', 'MB', 'GB', 'TB']
|
||||||
unit_index = 0
|
unit_index = 0
|
||||||
|
|
||||||
|
@ -1,31 +0,0 @@
|
|||||||
# 15.05.24
|
|
||||||
|
|
||||||
# Fix import
|
|
||||||
import sys
|
|
||||||
import os
|
|
||||||
src_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
|
|
||||||
sys.path.append(src_path)
|
|
||||||
|
|
||||||
|
|
||||||
# Import
|
|
||||||
from Src.Lib.Unidecode import transliterate
|
|
||||||
from Src.Util.file_validator import can_create_file
|
|
||||||
import unittest
|
|
||||||
|
|
||||||
|
|
||||||
class TestTransliterate(unittest.TestCase):
|
|
||||||
def test_transliterate(self):
|
|
||||||
|
|
||||||
# Data test
|
|
||||||
string_data = "Il caffè è un'esperienza, così come il gelato."
|
|
||||||
expected_result = "Il caffe e un'esperienza, cosi come il gelato."
|
|
||||||
|
|
||||||
# Call the function
|
|
||||||
result = transliterate(string_data)
|
|
||||||
|
|
||||||
# Assert
|
|
||||||
self.assertEqual(result, expected_result)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
unittest.main()
|
|
@ -24,6 +24,7 @@
|
|||||||
"use_codec": false,
|
"use_codec": false,
|
||||||
"use_gpu": false,
|
"use_gpu": false,
|
||||||
"default_preset": "ultrafast",
|
"default_preset": "ultrafast",
|
||||||
|
"check_output_conversion": false,
|
||||||
"cleanup_tmp_folder": true,
|
"cleanup_tmp_folder": true,
|
||||||
"specific_list_audio": ["ita"],
|
"specific_list_audio": ["ita"],
|
||||||
"specific_list_subtitles": ["eng"]
|
"specific_list_subtitles": ["eng"]
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
bs4
|
bs4
|
||||||
tqdm
|
tqdm
|
||||||
rich
|
rich
|
||||||
unidecode
|
unidecode
|
Loading…
x
Reference in New Issue
Block a user