mirror of
https://github.com/tcsenpai/agenticSeek.git
synced 2025-06-06 11:05:26 +00:00
feat : multilingual agent router
This commit is contained in:
parent
5992fdd659
commit
704509560a
Binary file not shown.
Before Width: | Height: | Size: 129 KiB |
Binary file not shown.
Before Width: | Height: | Size: 825 KiB |
Binary file not shown.
Before Width: | Height: | Size: 103 KiB |
Binary file not shown.
Before Width: | Height: | Size: 182 KiB |
Binary file not shown.
Before Width: | Height: | Size: 344 KiB |
@ -187,7 +187,7 @@ class BrowserAgent(Agent):
|
|||||||
def conclude_prompt(self, user_query: str) -> str:
|
def conclude_prompt(self, user_query: str) -> str:
|
||||||
annotated_notes = [f"{i+1}: {note.lower().replace('note:', '')}" for i, note in enumerate(self.notes)]
|
annotated_notes = [f"{i+1}: {note.lower().replace('note:', '')}" for i, note in enumerate(self.notes)]
|
||||||
search_note = '\n'.join(annotated_notes)
|
search_note = '\n'.join(annotated_notes)
|
||||||
print("AI research notes:\n", search_note)
|
pretty_print(f"AI notes:\n{search_note}", color="success")
|
||||||
return f"""
|
return f"""
|
||||||
Following a human request:
|
Following a human request:
|
||||||
{user_query}
|
{user_query}
|
||||||
|
@ -275,7 +275,6 @@ class Browser:
|
|||||||
def get_form_inputs(self) -> List[str]:
|
def get_form_inputs(self) -> List[str]:
|
||||||
"""Extract all input from the page and return them."""
|
"""Extract all input from the page and return them."""
|
||||||
try:
|
try:
|
||||||
#input_elements = self.driver.find_elements(By.TAG_NAME, "input")
|
|
||||||
input_elements = self.find_all_inputs()
|
input_elements = self.find_all_inputs()
|
||||||
if not input_elements:
|
if not input_elements:
|
||||||
return ["No input forms found on the page."]
|
return ["No input forms found on the page."]
|
||||||
@ -409,7 +408,7 @@ class Browser:
|
|||||||
self.driver.execute_script(
|
self.driver.execute_script(
|
||||||
"window.scrollTo(0, document.body.scrollHeight);"
|
"window.scrollTo(0, document.body.scrollHeight);"
|
||||||
)
|
)
|
||||||
time.sleep(1) # Wait for scroll to complete
|
time.sleep(1)
|
||||||
return True
|
return True
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.error(f"Error scrolling: {str(e)}")
|
self.logger.error(f"Error scrolling: {str(e)}")
|
||||||
|
@ -1,17 +1,35 @@
|
|||||||
from typing import List, Tuple, Type, Dict, Tuple
|
from typing import List, Tuple, Type, Dict, Tuple
|
||||||
import langid
|
|
||||||
import re
|
import re
|
||||||
|
import langid
|
||||||
import nltk
|
import nltk
|
||||||
from nltk.sentiment.vader import SentimentIntensityAnalyzer
|
from nltk.sentiment.vader import SentimentIntensityAnalyzer
|
||||||
|
from transformers import MarianMTModel, MarianTokenizer
|
||||||
|
|
||||||
|
from sources.utility import pretty_print, animate_thinking
|
||||||
|
|
||||||
class LanguageUtility:
|
class LanguageUtility:
|
||||||
"""LanguageUtility for language, or emotion identification"""
|
"""LanguageUtility for language, or emotion identification"""
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
|
self.sid = None
|
||||||
|
self.translators_tokenizer = None
|
||||||
|
self.translators_model = None
|
||||||
|
self.load_model()
|
||||||
|
|
||||||
|
def load_model(self) -> None:
|
||||||
|
animate_thinking("Loading language utility...", color="status")
|
||||||
try:
|
try:
|
||||||
nltk.data.find('vader_lexicon')
|
nltk.data.find('vader_lexicon')
|
||||||
except LookupError:
|
except LookupError:
|
||||||
nltk.download('vader_lexicon')
|
nltk.download('vader_lexicon')
|
||||||
self.sid = SentimentIntensityAnalyzer()
|
self.sid = SentimentIntensityAnalyzer()
|
||||||
|
self.translators_tokenizer = {
|
||||||
|
"fr": MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-fr-en"),
|
||||||
|
"zh": MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-zh-en")
|
||||||
|
}
|
||||||
|
self.translators_model = {
|
||||||
|
"fr": MarianMTModel.from_pretrained("Helsinki-NLP/opus-mt-fr-en"),
|
||||||
|
"zh": MarianMTModel.from_pretrained("Helsinki-NLP/opus-mt-zh-en")
|
||||||
|
}
|
||||||
|
|
||||||
def detect_language(self, text: str) -> str:
|
def detect_language(self, text: str) -> str:
|
||||||
"""
|
"""
|
||||||
@ -24,6 +42,25 @@ class LanguageUtility:
|
|||||||
lang, score = langid.classify(text)
|
lang, score = langid.classify(text)
|
||||||
return lang
|
return lang
|
||||||
|
|
||||||
|
def translate(self, text: str, origin_lang: str) -> str:
|
||||||
|
"""
|
||||||
|
Translate the given text to English
|
||||||
|
Args:
|
||||||
|
text: string to translate
|
||||||
|
origin_lang: ISO language code
|
||||||
|
Returns: translated str
|
||||||
|
"""
|
||||||
|
if origin_lang == "en":
|
||||||
|
return text
|
||||||
|
if origin_lang not in self.translators_tokenizer:
|
||||||
|
pretty_print(f"Language {origin_lang} not supported for translation", color="error")
|
||||||
|
return text
|
||||||
|
tokenizer = self.translators_tokenizer[origin_lang]
|
||||||
|
inputs = tokenizer(text, return_tensors="pt", padding=True)
|
||||||
|
model = self.translators_model[origin_lang]
|
||||||
|
translation = model.generate(**inputs)
|
||||||
|
return tokenizer.decode(translation[0], skip_special_tokens=True)
|
||||||
|
|
||||||
def detect_emotion(self, text: str) -> str:
|
def detect_emotion(self, text: str) -> str:
|
||||||
"""
|
"""
|
||||||
Detect the dominant emotion in the given text
|
Detect the dominant emotion in the given text
|
||||||
@ -75,11 +112,12 @@ if __name__ == "__main__":
|
|||||||
|
|
||||||
test_texts = [
|
test_texts = [
|
||||||
"I am so happy today!",
|
"I am so happy today!",
|
||||||
"Qué tristeza siento ahora",
|
|
||||||
"我不要去巴黎",
|
"我不要去巴黎",
|
||||||
"La vie c'est cool"
|
"La vie c'est cool"
|
||||||
]
|
]
|
||||||
for text in test_texts:
|
for text in test_texts:
|
||||||
print(f"\nAnalyzing: {text}")
|
pretty_print("Analyzing...", color="status")
|
||||||
|
pretty_print(f"Language: {detector.detect_language(text)}", color="status")
|
||||||
result = detector.analyze(text)
|
result = detector.analyze(text)
|
||||||
print(result)
|
trans = detector.translate(text, result['language'])
|
||||||
|
pretty_print(f"Translation: {trans} - from: {result['language']} - Emotion: {result['emotions']}")
|
@ -67,9 +67,9 @@ class Memory():
|
|||||||
|
|
||||||
def load_memory(self, agent_type: str = "casual_agent") -> None:
|
def load_memory(self, agent_type: str = "casual_agent") -> None:
|
||||||
"""Load the memory from the last session."""
|
"""Load the memory from the last session."""
|
||||||
pretty_print(f"Loading {agent_type} past memories... ", color="status")
|
|
||||||
if self.session_recovered == True:
|
if self.session_recovered == True:
|
||||||
return
|
return
|
||||||
|
pretty_print(f"Loading {agent_type} past memories... ", color="status")
|
||||||
save_path = os.path.join(self.conversation_folder, agent_type)
|
save_path = os.path.join(self.conversation_folder, agent_type)
|
||||||
if not os.path.exists(save_path):
|
if not os.path.exists(save_path):
|
||||||
pretty_print("No memory to load.", color="success")
|
pretty_print("No memory to load.", color="success")
|
||||||
|
@ -8,14 +8,13 @@ from adaptive_classifier import AdaptiveClassifier
|
|||||||
|
|
||||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
|
||||||
from sources.agents.agent import Agent
|
from sources.agents.agent import Agent
|
||||||
from sources.agents.code_agent import CoderAgent
|
from sources.agents.code_agent import CoderAgent
|
||||||
from sources.agents.casual_agent import CasualAgent
|
from sources.agents.casual_agent import CasualAgent
|
||||||
from sources.agents.planner_agent import FileAgent
|
from sources.agents.planner_agent import FileAgent
|
||||||
from sources.agents.browser_agent import BrowserAgent
|
from sources.agents.browser_agent import BrowserAgent
|
||||||
from sources.language import LanguageUtility
|
from sources.language import LanguageUtility
|
||||||
from sources.utility import pretty_print
|
from sources.utility import pretty_print, animate_thinking, timer_decorator
|
||||||
|
|
||||||
class AgentRouter:
|
class AgentRouter:
|
||||||
"""
|
"""
|
||||||
@ -24,13 +23,22 @@ class AgentRouter:
|
|||||||
def __init__(self, agents: list):
|
def __init__(self, agents: list):
|
||||||
self.agents = agents
|
self.agents = agents
|
||||||
self.lang_analysis = LanguageUtility()
|
self.lang_analysis = LanguageUtility()
|
||||||
self.pipelines = {
|
self.pipelines = self.load_pipelines()
|
||||||
"bart": pipeline("zero-shot-classification", model="facebook/bart-large-mnli")
|
|
||||||
}
|
|
||||||
self.talk_classifier = self.load_llm_router()
|
self.talk_classifier = self.load_llm_router()
|
||||||
self.complexity_classifier = self.load_llm_router()
|
self.complexity_classifier = self.load_llm_router()
|
||||||
self.learn_few_shots_tasks()
|
self.learn_few_shots_tasks()
|
||||||
self.learn_few_shots_complexity()
|
self.learn_few_shots_complexity()
|
||||||
|
|
||||||
|
def load_pipelines(self) -> Dict[str, Type[pipeline]]:
|
||||||
|
"""
|
||||||
|
Load the pipelines for the text classification used for routing.
|
||||||
|
returns:
|
||||||
|
Dict[str, Type[pipeline]]: The loaded pipelines
|
||||||
|
"""
|
||||||
|
animate_thinking("Loading zero-shot pipeline...", color="status")
|
||||||
|
return {
|
||||||
|
"bart": pipeline("zero-shot-classification", model="facebook/bart-large-mnli")
|
||||||
|
}
|
||||||
|
|
||||||
def load_llm_router(self) -> AdaptiveClassifier:
|
def load_llm_router(self) -> AdaptiveClassifier:
|
||||||
"""
|
"""
|
||||||
@ -42,6 +50,7 @@ class AgentRouter:
|
|||||||
"""
|
"""
|
||||||
path = "../llm_router" if __name__ == "__main__" else "./llm_router"
|
path = "../llm_router" if __name__ == "__main__" else "./llm_router"
|
||||||
try:
|
try:
|
||||||
|
animate_thinking("Loading LLM router model...", color="status")
|
||||||
talk_classifier = AdaptiveClassifier.from_pretrained(path)
|
talk_classifier = AdaptiveClassifier.from_pretrained(path)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise Exception("Failed to load the routing model. Please run the dl_safetensors.sh script inside llm_router/ directory to download the model.")
|
raise Exception("Failed to load the routing model. Please run the dl_safetensors.sh script inside llm_router/ directory to download the model.")
|
||||||
@ -302,25 +311,14 @@ class AgentRouter:
|
|||||||
pretty_print(f"Agent choice -> BART: {bart} ({final_score_bart}) LLM-router: {llm_router} ({final_score_llm})")
|
pretty_print(f"Agent choice -> BART: {bart} ({final_score_bart}) LLM-router: {llm_router} ({final_score_llm})")
|
||||||
return bart if final_score_bart > final_score_llm else llm_router
|
return bart if final_score_bart > final_score_llm else llm_router
|
||||||
|
|
||||||
def classify_text(self, text: str, threshold: float = 0.4) -> list:
|
def find_first_sentence(self, text: str) -> str:
|
||||||
"""
|
|
||||||
Classify the text using the LLM router and BART model.
|
|
||||||
"""
|
|
||||||
first_sentence = None
|
first_sentence = None
|
||||||
lang = "en"
|
|
||||||
for line in text.split("\n"):
|
for line in text.split("\n"):
|
||||||
first_sentence = line.strip()
|
first_sentence = line.strip()
|
||||||
break
|
break
|
||||||
if first_sentence is None:
|
if first_sentence is None:
|
||||||
first_sentence = text
|
first_sentence = text
|
||||||
try:
|
return first_sentence
|
||||||
lang = self.lang_analysis.detect_language(first_sentence)
|
|
||||||
# no multilanguage support yet
|
|
||||||
labels = [agent.role["en"] for agent in self.agents]
|
|
||||||
result = self.router_vote(first_sentence, labels, log_confidence=False)
|
|
||||||
except Exception as e:
|
|
||||||
raise e
|
|
||||||
return result, lang
|
|
||||||
|
|
||||||
def estimate_complexity(self, text: str) -> str:
|
def estimate_complexity(self, text: str) -> str:
|
||||||
"""
|
"""
|
||||||
@ -328,7 +326,7 @@ class AgentRouter:
|
|||||||
Args:
|
Args:
|
||||||
text: The input text
|
text: The input text
|
||||||
Returns:
|
Returns:
|
||||||
str: The estimated complexity
|
str: The estimated complexity
|
||||||
"""
|
"""
|
||||||
predictions = self.complexity_classifier.predict(text)
|
predictions = self.complexity_classifier.predict(text)
|
||||||
predictions = sorted(predictions, key=lambda x: x[1], reverse=True)
|
predictions = sorted(predictions, key=lambda x: x[1], reverse=True)
|
||||||
@ -358,12 +356,6 @@ class AgentRouter:
|
|||||||
pretty_print(f"Error finding planner agent. Please add a planner agent to the list of agents.", color="failure")
|
pretty_print(f"Error finding planner agent. Please add a planner agent to the list of agents.", color="failure")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def multi_language_message(self, text: str):
|
|
||||||
pretty_print(f"选择代理时出错。路由系统尚不支持多语言", color="failure")
|
|
||||||
pretty_print(f"エージェントの選択エラー。ルーティングシステムはまだ多言語に対応していません", color="failure")
|
|
||||||
pretty_print(f"Erreur lors du choix de l'agent. Le système de routage n'est pas encore multilingue.", color="failure")
|
|
||||||
pretty_print(f"Error al elegir agente. El sistema de enrutamiento aún no es multilingüe.", color="failure")
|
|
||||||
|
|
||||||
def select_agent(self, text: str) -> Agent:
|
def select_agent(self, text: str) -> Agent:
|
||||||
"""
|
"""
|
||||||
Select the appropriate agent based on the text.
|
Select the appropriate agent based on the text.
|
||||||
@ -374,16 +366,21 @@ class AgentRouter:
|
|||||||
"""
|
"""
|
||||||
if len(self.agents) == 0:
|
if len(self.agents) == 0:
|
||||||
return self.agents[0]
|
return self.agents[0]
|
||||||
|
lang = self.lang_analysis.detect_language(text)
|
||||||
|
text = self.find_first_sentence(text)
|
||||||
|
text = self.lang_analysis.translate(text, lang)
|
||||||
|
labels = [agent.role["en"] for agent in self.agents]
|
||||||
complexity = self.estimate_complexity(text)
|
complexity = self.estimate_complexity(text)
|
||||||
best_agent, lang = self.classify_text(text)
|
|
||||||
if lang != "en":
|
|
||||||
self.multi_language_message(text)
|
|
||||||
if complexity == None:
|
if complexity == None:
|
||||||
pretty_print(f"Humm, the task seem complex but you gave very little information. can you clarify?", color="info")
|
pretty_print(f"Humm, the task seem complex but you gave very little information. can you clarify?", color="info")
|
||||||
return None
|
return None
|
||||||
if complexity == "HIGH" and lang == "en":
|
if complexity == "HIGH":
|
||||||
pretty_print(f"Complex task detected, routing to planner agent.", color="info")
|
pretty_print(f"Complex task detected, routing to planner agent.", color="info")
|
||||||
return self.find_planner_agent()
|
return self.find_planner_agent()
|
||||||
|
try:
|
||||||
|
best_agent = self.router_vote(text, labels, log_confidence=False)
|
||||||
|
except Exception as e:
|
||||||
|
raise e
|
||||||
for agent in self.agents:
|
for agent in self.agents:
|
||||||
if best_agent == agent.role["en"]:
|
if best_agent == agent.role["en"]:
|
||||||
pretty_print(f"Selected agent: {agent.agent_name} (roles: {agent.role[lang]})", color="warning")
|
pretty_print(f"Selected agent: {agent.agent_name} (roles: {agent.role[lang]})", color="warning")
|
||||||
@ -393,52 +390,52 @@ class AgentRouter:
|
|||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
agents = [
|
agents = [
|
||||||
CasualAgent("jarvis", "../prompts/casual_agent.txt", None),
|
CasualAgent("jarvis", "../prompts/base/casual_agent.txt", None),
|
||||||
BrowserAgent("browser", "../prompts/planner_agent.txt", None),
|
BrowserAgent("browser", "../prompts/base/planner_agent.txt", None),
|
||||||
CoderAgent("coder", "../prompts/coder_agent.txt", None),
|
CoderAgent("coder", "../prompts/base/coder_agent.txt", None),
|
||||||
FileAgent("file", "../prompts/coder_agent.txt", None)
|
FileAgent("file", "../prompts/base/coder_agent.txt", None)
|
||||||
]
|
]
|
||||||
router = AgentRouter(agents)
|
router = AgentRouter(agents)
|
||||||
texts = [
|
texts = [
|
||||||
"hi",
|
"hi",
|
||||||
#"你好",
|
"你好",
|
||||||
#"Bonjour",
|
"Bonjour",
|
||||||
"Write a python script to check if the device on my network is connected to the internet",
|
"Write a python script to check if the device on my network is connected to the internet",
|
||||||
# "Peut tu écrire un script python qui vérifie si l'appareil sur mon réseau est connecté à internet?",
|
"Peut tu écrire un script python qui vérifie si l'appareil sur mon réseau est connecté à internet?",
|
||||||
# "写一个Python脚本,检查我网络上的设备是否连接到互联网",
|
"写一个Python脚本,检查我网络上的设备是否连接到互联网",
|
||||||
"Hey could you search the web for the latest news on the tesla stock market ?",
|
"Hey could you search the web for the latest news on the tesla stock market ?",
|
||||||
# "嘿,你能搜索网页上关于股票市场的最新新闻吗?",
|
"嘿,你能搜索网页上关于股票市场的最新新闻吗?",
|
||||||
# "Yo, cherche sur internet comment va tesla en bourse.",
|
"Yo, cherche sur internet comment va tesla en bourse.",
|
||||||
"I would like you to search for weather api and then make an app using this API",
|
"I would like you to search for weather api and then make an app using this API",
|
||||||
# "我想让你搜索天气API,然后用这个API做一个应用程序",
|
"我想让你搜索天气API,然后用这个API做一个应用程序",
|
||||||
# "J'aimerais que tu cherche une api météo et que l'utilise pour faire une application",
|
"J'aimerais que tu cherche une api météo et que l'utilise pour faire une application",
|
||||||
"Plan a 3-day trip to New York, including flights and hotels.",
|
"Plan a 3-day trip to New York, including flights and hotels.",
|
||||||
# "计划一次为期3天的纽约之旅,包括机票和酒店。",
|
"计划一次为期3天的纽约之旅,包括机票和酒店。",
|
||||||
# "Planifie un trip de 3 jours à Paris, y compris les vols et hotels.",
|
"Planifie un trip de 3 jours à Paris, y compris les vols et hotels.",
|
||||||
"Find on the web the latest research papers on AI.",
|
"Find on the web the latest research papers on AI.",
|
||||||
# "在网上找到最新的人工智能研究论文。",
|
"在网上找到最新的人工智能研究论文。",
|
||||||
# "Trouve moi les derniers articles de recherche sur l'IA sur internet",
|
"Trouve moi les derniers articles de recherche sur l'IA sur internet",
|
||||||
"Help me write a C++ program to sort an array",
|
"Help me write a C++ program to sort an array",
|
||||||
"Tell me what France been up to lately",
|
"Tell me what France been up to lately",
|
||||||
# "告诉我法国最近在做什么",
|
"告诉我法国最近在做什么",
|
||||||
# "Dis moi ce que la France a fait récemment",
|
"Dis moi ce que la France a fait récemment",
|
||||||
"Who is Sergio Pesto ?",
|
"Who is Sergio Pesto ?",
|
||||||
# "谁是Sergio Pesto?",
|
"谁是Sergio Pesto?",
|
||||||
# "Qui est Sergio Pesto ?",
|
"Qui est Sergio Pesto ?",
|
||||||
# "帮我写一个C++程序来排序数组",
|
"帮我写一个C++程序来排序数组",
|
||||||
# "Aide moi à faire un programme c++ pour trier une array.",
|
"Aide moi à faire un programme c++ pour trier une array.",
|
||||||
"What’s the weather like today? Oh, and can you find a good weather app?",
|
"What’s the weather like today? Oh, and can you find a good weather app?",
|
||||||
# "今天天气怎么样?哦,你还能找到一个好的天气应用程序吗?",
|
"今天天气怎么样?哦,你还能找到一个好的天气应用程序吗?",
|
||||||
# "La météo est comment aujourd'hui ? oh et trouve moi une bonne appli météo tant que tu y est.",
|
"La météo est comment aujourd'hui ? oh et trouve moi une bonne appli météo tant que tu y est.",
|
||||||
"Can you debug this Java code? It’s not working.",
|
"Can you debug this Java code? It’s not working.",
|
||||||
# "你能调试这段Java代码吗?它不起作用。",
|
"你能调试这段Java代码吗?它不起作用。",
|
||||||
# "Peut tu m'aider à debugger ce code java, ça marche pas",
|
"Peut tu m'aider à debugger ce code java, ça marche pas",
|
||||||
#"Can you browse the web and find me a 4090 for cheap?",
|
"Can you browse the web and find me a 4090 for cheap?",
|
||||||
#"你能浏览网页,为我找一个便宜的4090吗?",
|
"你能浏览网页,为我找一个便宜的4090吗?",
|
||||||
#"Peut tu chercher sur internet et me trouver une 4090 pas cher ?",
|
"Peut tu chercher sur internet et me trouver une 4090 pas cher ?",
|
||||||
#"Hey, can you find the old_project.zip file somewhere on my drive?",
|
"Hey, can you find the old_project.zip file somewhere on my drive?",
|
||||||
#"嘿,你能在我驱动器上找到old_project.zip文件吗?",
|
"嘿,你能在我驱动器上找到old_project.zip文件吗?",
|
||||||
#"Hé trouve moi le old_project.zip, il est quelque part sur mon disque.",
|
"Hé trouve moi le old_project.zip, il est quelque part sur mon disque.",
|
||||||
"Tell me a funny story",
|
"Tell me a funny story",
|
||||||
"给我讲一个有趣的故事",
|
"给我讲一个有趣的故事",
|
||||||
"Raconte moi une histoire drole"
|
"Raconte moi une histoire drole"
|
||||||
|
@ -28,6 +28,9 @@ unsafe_commands_unix = [
|
|||||||
"parted", # Disk partitioning
|
"parted", # Disk partitioning
|
||||||
"chroot", # Change root directory
|
"chroot", # Change root directory
|
||||||
"route" # Routing table management
|
"route" # Routing table management
|
||||||
|
"--force", # Force flag for many commands
|
||||||
|
"rebase", # Rebase git repository
|
||||||
|
"git ." # Git commands, feel free to remove it but i dont want to risk agenticSeek pushing to its own repo lol (see 56b5db7)
|
||||||
]
|
]
|
||||||
|
|
||||||
unsafe_commands_windows = [
|
unsafe_commands_windows = [
|
||||||
|
Loading…
x
Reference in New Issue
Block a user