diff --git a/README.md b/README.md index c91cf8b..05b1903 100644 --- a/README.md +++ b/README.md @@ -9,9 +9,10 @@ THIS IS A WORK IN PROGRESS - Make sure you have ollama installed on your machine - Install dependencies (`pip3 install -r requirements.txt`) -## Run +## Run fully local Simplest way is to use ollama +- First change the config.ini file to set the provider_name to `ollama` and provider_model to `deepseek-r1:7b` - In first terminal run `ollama serve` - In second terminal run `python3 main.py` - Ollama will download `deepseek-r1:7b` on your machine @@ -20,6 +21,23 @@ Simplest way is to use ollama * For speech to text: `distil-whisper/distil-medium.en` - type or say goodbye to exit. +# Run model on another machine + +- First change the config.ini file to set the provider_name to `server` and provider_model to `deepseek-r1:7b` (or higher) +- On the machine that will run the model execute the script in stream_llm.py + +``` +python3 stream_llm.py +``` + +- In the config.ini file, set the provider_server_address to the ip address of the machine that will run the model. + +- On the machine that will run the assistant execute main.py + +``` +python3 main.py +``` + ## Text to speech If you want your AI to speak, run with the `--speak` option. diff --git a/config.ini b/config.ini index c0fe018..1b04872 100644 --- a/config.ini +++ b/config.ini @@ -1,6 +1,6 @@ [MAIN] -is_local = False +is_local = True provider_name = ollama -provider_model = deepseek-r1:32b -provider_server_address = 127.0.0.1:6666 +provider_model = deepseek-r1:7b +provider_server_address = 127.0.0.1:5000 agent_name = jarvis diff --git a/sources/tools/tools.py b/sources/tools/tools.py index 9c6d2ba..dc1eeae 100644 --- a/sources/tools/tools.py +++ b/sources/tools/tools.py @@ -92,7 +92,7 @@ class Tools(): save_path = None if start_tag not in llm_text: - return None + return None, None while True: start_pos = llm_text.find(start_tag, start_index)