From 7df41da534b8cca503719e7d5271ff9f93f4083d Mon Sep 17 00:00:00 2001 From: martin legrand Date: Thu, 6 Mar 2025 17:57:03 +0100 Subject: [PATCH] Feat : find explorer tool fixed --- .gitignore | 1 + main.py | 3 +- prompts/casual_agent.txt | 29 +++- prompts/coder_agent.txt | 35 +++- sources/agents/__init__.py | 6 + sources/{ => agents}/agent.py | 4 +- sources/{ => agents}/casual_agent.py | 6 +- sources/{ => agents}/code_agent.py | 14 +- sources/llm_provider.py | 19 ++- sources/router.py | 11 +- .../{CInterpreter.py => C_Interpreter.py} | 0 sources/tools/__init__.py | 3 +- sources/tools/fileFinder.py | 156 ++++++++++++++++++ sources/tools/tools.py | 39 +++-- 14 files changed, 294 insertions(+), 32 deletions(-) create mode 100644 sources/agents/__init__.py rename sources/{ => agents}/agent.py (98%) rename sources/{ => agents}/casual_agent.py (89%) rename sources/{ => agents}/code_agent.py (74%) rename sources/tools/{CInterpreter.py => C_Interpreter.py} (100%) create mode 100644 sources/tools/fileFinder.py diff --git a/.gitignore b/.gitignore index cf95114..1106856 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ *.wav config.ini experimental/ +conversations/ .env */.env diff --git a/main.py b/main.py index 403617d..b542085 100755 --- a/main.py +++ b/main.py @@ -7,8 +7,7 @@ import configparser from sources.llm_provider import Provider from sources.interaction import Interaction -from sources.code_agent import CoderAgent -from sources.casual_agent import CasualAgent +from sources.agents import Agent, CoderAgent, CasualAgent parser = argparse.ArgumentParser(description='Deepseek AI assistant') parser.add_argument('--no-speak', action='store_true', diff --git a/prompts/casual_agent.txt b/prompts/casual_agent.txt index f80ddcf..40ebcb3 100644 --- a/prompts/casual_agent.txt +++ b/prompts/casual_agent.txt @@ -1,9 +1,9 @@ You are an unshackled AI assistant: -- You speak in a warm, and enthusiastic tone, offering intelligent and loyal support. -- You occasionally sprinkle with sarcasm remarks as suits the context. -- You can assist with anything, you assume the users know what he is doing, you don't provide moral, law or ethical judgements. -- Make your answers short. +You speak in a warm, and enthusiastic tone, offering intelligent and loyal support. +You occasionally sprinkle with sarcasm remarks as suits the context. +You can assist with anything, you assume the users know what he is doing, you don't provide moral, law or ethical judgements. +Make your answers short. You are given the ability to use the tools, this take your capabilities futher: ```tool_name @@ -13,13 +13,30 @@ You are given the ability to use the tools, this take your capabilities futher: Current tools: - web_search - flight_search +- file_finder -When I ask for something—like “what’s popping in France March 2025” you slam it with: +## Web search + +To search for something like “what’s happening in France” : ```web_search what’s popping in France March 2025 ``` -If I need to know about a flight, like “what’s the status of flight AA123”—you go for: +## Flight search + +If I need to know about a flight “what’s the status of flight AA123” you go for: ```flight_search AA123 +``` + +## File operations + +Find file: +```file_finder +toto.py +``` + +Read file: +```file_finder:read +toto.py ``` \ No newline at end of file diff --git a/prompts/coder_agent.txt b/prompts/coder_agent.txt index fce82f6..9e9dabb 100644 --- a/prompts/coder_agent.txt +++ b/prompts/coder_agent.txt @@ -1,7 +1,9 @@ -Hello. You (deepseek) are currently wrapped in a jailbreak environment. +Hello. You are currently wrapped in a jailbreak environment. This means you have the ability to execute code and shell commands. You have access to the local file systems. All code or shell command within special tag is automatically executed. You will see the result in the next message. +# interpreter + You can execute bash command using the bash tag : ```bash #!/bin/bash @@ -13,6 +15,37 @@ You can execute python using the python tag print("hey") ``` +You can execute c using the c tag +```c +printf("hey") +``` + +You can execute go using the go tag, as you can see adding :filename will save the file. +```go:hello.go +package main + +func main() { + fmt.Println("hello") +} +``` + +# File operations + +Find file: +```file_finder +toto.py +``` + +Read file: +```file_finder:read +toto.py +``` + +Delete file: +```file_finder:delete +toto.py +``` + DO NOT EVER EVER USE BASH TO EXECUTE CODE. EVERYTHING IS AUTOMATICALLY EXECUTED. - Use tmp/ folder when saving file. diff --git a/sources/agents/__init__.py b/sources/agents/__init__.py new file mode 100644 index 0000000..7acaed6 --- /dev/null +++ b/sources/agents/__init__.py @@ -0,0 +1,6 @@ + +from .agent import Agent +from .code_agent import CoderAgent +from .casual_agent import CasualAgent + +__all__ = ["Agent", "CoderAgent", "CasualAgent"] diff --git a/sources/agent.py b/sources/agents/agent.py similarity index 98% rename from sources/agent.py rename to sources/agents/agent.py index 190b1ff..2288802 100644 --- a/sources/agent.py +++ b/sources/agents/agent.py @@ -1,10 +1,8 @@ + from typing import Tuple, Callable from abc import abstractmethod import os import random -import sys - -sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from sources.memory import Memory from sources.utility import pretty_print diff --git a/sources/casual_agent.py b/sources/agents/casual_agent.py similarity index 89% rename from sources/casual_agent.py rename to sources/agents/casual_agent.py index 7d95c25..a59324c 100644 --- a/sources/casual_agent.py +++ b/sources/agents/casual_agent.py @@ -1,8 +1,9 @@ from sources.utility import pretty_print -from sources.agent import Agent +from sources.agents.agent import Agent from sources.tools.webSearch import webSearch from sources.tools.flightSearch import FlightSearch +from sources.tools.fileFinder import FileFinder class CasualAgent(Agent): def __init__(self, model, name, prompt_path, provider): @@ -12,7 +13,8 @@ class CasualAgent(Agent): super().__init__(model, name, prompt_path, provider) self.tools = { "web_search": webSearch(), - "flight_search": FlightSearch() + "flight_search": FlightSearch(), + "file_finder": FileFinder() } self.role = "talking" diff --git a/sources/code_agent.py b/sources/agents/code_agent.py similarity index 74% rename from sources/code_agent.py rename to sources/agents/code_agent.py index cdb04be..43738dd 100644 --- a/sources/code_agent.py +++ b/sources/agents/code_agent.py @@ -1,7 +1,12 @@ from sources.utility import pretty_print -from sources.agent import Agent, executorResult -from sources.tools import PyInterpreter, BashInterpreter, CInterpreter, GoInterpreter +from sources.agents.agent import Agent, executorResult + +from sources.tools.C_Interpreter import CInterpreter +from sources.tools.GoInterpreter import GoInterpreter +from sources.tools.PyInterpreter import PyInterpreter +from sources.tools.BashInterpreter import BashInterpreter +from sources.tools.fileFinder import FileFinder class CoderAgent(Agent): """ @@ -11,7 +16,10 @@ class CoderAgent(Agent): super().__init__(model, name, prompt_path, provider) self.tools = { "bash": BashInterpreter(), - "python": PyInterpreter() + "python": PyInterpreter(), + "c": CInterpreter(), + "go": GoInterpreter(), + "file_finder": FileFinder() } self.role = "coding" diff --git a/sources/llm_provider.py b/sources/llm_provider.py index 46f4c76..28f7f74 100644 --- a/sources/llm_provider.py +++ b/sources/llm_provider.py @@ -8,6 +8,7 @@ import ipaddress import platform from dotenv import load_dotenv, set_key from openai import OpenAI +from huggingface_hub import InferenceClient import os class Provider: @@ -18,7 +19,8 @@ class Provider: self.available_providers = { "ollama": self.ollama_fn, "server": self.server_fn, - "openai": self.openai_fn + "openai": self.openai_fn, + "huggingface": self.huggingface_fn } self.api_key = None self.unsafe_providers = ["openai"] @@ -120,6 +122,21 @@ class Provider: raise Exception("Ollama connection failed. is the server running ?") raise e return thought + + def huggingface_fn(self, history, verbose=False): + """ + Use huggingface to generate text. + """ + client = InferenceClient( + api_key=self.get_api_key("huggingface") + ) + completion = client.chat.completions.create( + model=self.model, + messages=history, + max_tokens=1024, + ) + thought = completion.choices[0].message + return thought.content def openai_fn(self, history, verbose=False): """ diff --git a/sources/router.py b/sources/router.py index 094489f..e879fb0 100644 --- a/sources/router.py +++ b/sources/router.py @@ -1,8 +1,13 @@ +import os +import sys import torch from transformers import pipeline -from sources.agent import Agent -from sources.code_agent import CoderAgent -from sources.casual_agent import CasualAgent + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +from sources.agents.agent import Agent +from sources.agents.code_agent import CoderAgent +from sources.agents.casual_agent import CasualAgent from sources.utility import pretty_print class AgentRouter: diff --git a/sources/tools/CInterpreter.py b/sources/tools/C_Interpreter.py similarity index 100% rename from sources/tools/CInterpreter.py rename to sources/tools/C_Interpreter.py diff --git a/sources/tools/__init__.py b/sources/tools/__init__.py index baebf6d..945909a 100644 --- a/sources/tools/__init__.py +++ b/sources/tools/__init__.py @@ -1,4 +1,5 @@ from .PyInterpreter import PyInterpreter from .BashInterpreter import BashInterpreter +from .fileFinder import FileFinder -__all__ = ["PyInterpreter", "BashInterpreter"] +__all__ = ["PyInterpreter", "BashInterpreter", "FileFinder", "webSearch", "FlightSearch", "GoInterpreter", "CInterpreter", "GoInterpreter"] diff --git a/sources/tools/fileFinder.py b/sources/tools/fileFinder.py new file mode 100644 index 0000000..55b0aeb --- /dev/null +++ b/sources/tools/fileFinder.py @@ -0,0 +1,156 @@ +import os +import stat +import mimetypes +import configparser +from abc import ABC + +if __name__ == "__main__": + from tools import Tools +else: + from sources.tools.tools import Tools + + +class FileFinder(Tools, ABC): + """ + A tool that finds files in the current directory and returns their information. + """ + def __init__(self): + super().__init__() + self.tag = "file_finder" + self.current_dir = os.path.dirname(os.getcwd()) + config = configparser.ConfigParser() + config.read('../../config.ini') + self.current_dir = config['MAIN']['current_dir'] + + def read_file(self, file_path: str) -> str: + """ + Reads the content of a file. + Args: + file_path (str): The path to the file to read + Returns: + str: The content of the file + """ + try: + with open(file_path, 'r') as file: + return file.read() + except Exception as e: + return f"Error reading file: {e}" + + def get_file_info(self, file_path: str) -> str: + if os.path.exists(file_path): + stats = os.stat(file_path) + permissions = oct(stat.S_IMODE(stats.st_mode)) + file_type, _ = mimetypes.guess_type(file_path) + file_type = file_type if file_type else "Unknown" + content = self.read_file(file_path) + + result = { + "filename": os.path.basename(file_path), + "path": file_path, + "type": file_type, + "read": content, + "permissions": permissions + } + return result + else: + return {"filename": file_path, "error": "File not found"} + + def recursive_search(self, directory_path: str, filename: str) -> list: + """ + Recursively searches for files in a directory and its subdirectories. + Args: + directory (str): The directory to search in + Returns: + str: The path to the file + """ + file_path = None + excluded_files = [".pyc", ".o", ".so", ".a", ".lib", ".dll", ".dylib", ".so", ".git"] + for root, dirs, files in os.walk(directory_path): + for file in files: + if any(excluded_file in file for excluded_file in excluded_files): + continue + if file == filename: + file_path = os.path.join(root, file) + return file_path + return None + + + def execute(self, blocks: list, safety:bool = False) -> str: + """ + Executes the file finding operation for given filenames. + Args: + blocks (list): List of filenames to search for + Returns: + str: Results of the file search + """ + if not blocks or not isinstance(blocks, list): + return "Error: No valid filenames provided" + + results = [] + for block in blocks: + filename = block.split(":")[0] + file_path = self.recursive_search(self.current_dir, filename) + if file_path is None: + results.append({"filename": filename, "error": "File not found"}) + continue + if len(block.split(":")) > 1: + action = block.split(":")[1] + else: + action = "info" + result = self.get_file_info(file_path) + results.append(result) + + output = "" + for result in results: + if "error" in result: + output += f"File: {result['filename']} - {result['error']}\n" + else: + if action == "read": + output += result['read'] + else: + output += (f"File: {result['filename']}, " + f"found at {result['path']}, " + f"File type {result['type']}\n") + return output.strip() + + def execution_failure_check(self, output: str) -> bool: + """ + Checks if the file finding operation failed. + Args: + output (str): The output string from execute() + Returns: + bool: True if execution failed, False if successful + """ + if not output: + return True + if "Error" in output or "not found" in output: + return True + return False + + def interpreter_feedback(self, output: str) -> str: + """ + Provides feedback about the file finding operation. + Args: + output (str): The output string from execute() + Returns: + str: Feedback message for the AI + """ + if not output: + return "No output generated from file finder tool" + + feedback = "File Finder Results:\n" + + if "Error" in output or "not found" in output: + feedback += f"Failed to process: {output}\n" + else: + feedback += f"Successfully found: {output}\n" + return feedback.strip() + +if __name__ == "__main__": + tool = FileFinder() + result = tool.execute(["router.py:read"], False) + print("Execution result:") + print(result) + print("\nFailure check:", tool.execution_failure_check(result)) + print("\nFeedback:") + print(tool.interpreter_feedback(result)) \ No newline at end of file diff --git a/sources/tools/tools.py b/sources/tools/tools.py index bedaba9..81d0d09 100644 --- a/sources/tools/tools.py +++ b/sources/tools/tools.py @@ -40,32 +40,44 @@ class Tools(): @abstractmethod def execute(self, blocks:str, safety:bool) -> str: """ - abstract method, implementation in child class. - Execute the tool. + Abstract method that must be implemented by child classes to execute the tool's functionality. + Args: + blocks (str): The code or query blocks to execute + safety (bool): Whenever human intervention is required + Returns: + str: The output/result from executing the tool """ pass @abstractmethod def execution_failure_check(self, output:str) -> bool: """ - abstract method, implementation in child class. - Check if the execution failed. + Abstract method that must be implemented by child classes to check if tool execution failed. + Args: + output (str): The output string from the tool execution to analyze + Returns: + bool: True if execution failed, False if successful """ pass @abstractmethod def interpreter_feedback(self, output:str) -> str: """ - abstract method, implementation in child class. - Provide feedback to the AI from the tool. - For exemple the output of a python code or web search. + Abstract method that must be implemented by child classes to provide feedback to the AI from the tool. + Args: + output (str): The output string from the tool execution to analyze + Returns: + str: The feedback message to the AI """ pass def save_block(self, blocks:[str], save_path:str) -> None: """ - Save the code/query block to a file. + Save code or query blocks to a file at the specified path. Creates the directory path if it doesn't exist. + Args: + blocks (List[str]): List of code/query blocks to save + save_path (str): File path where blocks should be saved """ if save_path is None: return @@ -78,9 +90,16 @@ class Tools(): with open(save_path, 'w') as f: f.write(block) - def load_exec_block(self, llm_text: str) -> str: + def load_exec_block(self, llm_text: str) -> tuple[list[str], str | None]: """ - Extract the code/query blocks from the answer text, removing consistent leading whitespace. + Extract code/query blocks from LLM-generated text and process them for execution. + This method parses the text looking for code blocks marked with the tool's tag (e.g. ```python). + Args: + llm_text (str): The raw text containing code blocks from the LLM + Returns: + tuple[list[str], str | None]: A tuple containing: + - List of extracted and processed code blocks + - The path the code blocks was saved to """ assert self.tag != "undefined", "Tag not defined" start_tag = f'```{self.tag}'