feat : signal handler now agent specific instead of global

This commit is contained in:
martin legrand 2025-04-06 19:46:52 +02:00
parent 26e9dbcd40
commit bb67df8f42
5 changed files with 9 additions and 9 deletions

View File

@ -1,7 +1,6 @@
#!/usr/bin python3 #!/usr/bin python3
import sys import sys
import signal
import argparse import argparse
import configparser import configparser
@ -17,12 +16,7 @@ warnings.filterwarnings("ignore")
config = configparser.ConfigParser() config = configparser.ConfigParser()
config.read('config.ini') config.read('config.ini')
def handleInterrupt(signum, frame):
sys.exit(0)
def main(): def main():
signal.signal(signal.SIGINT, handler=handleInterrupt)
pretty_print("Initializing...", color="status") pretty_print("Initializing...", color="status")
provider = Provider(provider_name=config["MAIN"]["provider_name"], provider = Provider(provider_name=config["MAIN"]["provider_name"],
model=config["MAIN"]["provider_model"], model=config["MAIN"]["provider_model"],

View File

@ -41,6 +41,7 @@ def setup():
def get_updated_sentence(): def get_updated_sentence():
if not generator: if not generator:
return jsonify({"error": "Generator not initialized"}), 405 return jsonify({"error": "Generator not initialized"}), 405
print(generator.get_status())
return generator.get_status() return generator.get_status()
if __name__ == '__main__': if __name__ == '__main__':

View File

@ -296,7 +296,7 @@ class BrowserAgent(Agent):
prompt = self.make_newsearch_prompt(user_prompt, search_result) prompt = self.make_newsearch_prompt(user_prompt, search_result)
unvisited = [None] unvisited = [None]
while not complete: while not complete:
answer, reasoning = self.llm_decide(prompt, show_reasoning = True) answer, reasoning = self.llm_decide(prompt, show_reasoning = False)
extracted_form = self.extract_form(answer) extracted_form = self.extract_form(answer)
if len(extracted_form) > 0: if len(extracted_form) > 0:

View File

@ -51,7 +51,7 @@ class CoderAgent(Agent):
self.wait_message(speech_module) self.wait_message(speech_module)
answer, reasoning = self.llm_request() answer, reasoning = self.llm_request()
if clarify_trigger in answer: if clarify_trigger in answer:
return answer.replace(clarify_trigger, ""), reasoning return answer, reasoning
if not "```" in answer: if not "```" in answer:
self.last_answer = answer self.last_answer = answer
break break

View File

@ -162,6 +162,7 @@ class AgentRouter:
("Search my drive for a file called vacation_photos_2023.jpg.", "files"), ("Search my drive for a file called vacation_photos_2023.jpg.", "files"),
("Help me organize my desktop files into folders by type.", "files"), ("Help me organize my desktop files into folders by type.", "files"),
("Whats your favorite movie and why?", "talk"), ("Whats your favorite movie and why?", "talk"),
("what directory are you in ?", "files"),
("Search my drive for a file named budget_2024.xlsx", "files"), ("Search my drive for a file named budget_2024.xlsx", "files"),
("Write a Python function to sort a list of dictionaries by key", "code"), ("Write a Python function to sort a list of dictionaries by key", "code"),
("Find the latest updates on quantum computing on the web", "web"), ("Find the latest updates on quantum computing on the web", "web"),
@ -330,7 +331,11 @@ class AgentRouter:
Returns: Returns:
str: The estimated complexity str: The estimated complexity
""" """
predictions = self.complexity_classifier.predict(text) try:
predictions = self.complexity_classifier.predict(text)
except Exception as e:
pretty_print(f"Error in estimate_complexity: {str(e)}", color="failure")
return "LOW"
predictions = sorted(predictions, key=lambda x: x[1], reverse=True) predictions = sorted(predictions, key=lambda x: x[1], reverse=True)
if len(predictions) == 0: if len(predictions) == 0:
return "LOW" return "LOW"