readme update for server install instruction

This commit is contained in:
martin legrand 2025-03-29 12:40:50 +01:00
parent d1b20a1446
commit 4c87353db4
5 changed files with 28 additions and 10 deletions

View File

@ -218,14 +218,28 @@ Note: For Windows or macOS, use ipconfig or ifconfig respectively to find the IP
**If you wish to use openai based provider follow the *Run with an API* section.**
Make sure ollama is installed (Currently our script only support ollama)
Clone the repository and enter the `server/`folder.
Run our server script.
```sh
python3 server_ollama.py --model "deepseek-r1:32b"
git clone --depth 1 https://github.com/Fosowl/agenticSeek.git
cd agenticSeek/server/
```
Install server specific requirements:
```sh
pip3 install -r requirements.txt
```
Run the server script.
```sh
python3 app.py --provider ollama --port 3333
```
You have the choice between using `ollama` and `llamacpp` as a LLM service.
### 2**Run it**
Now on your personal computer:
@ -238,7 +252,7 @@ Set the `provider_server_address` to the ip address of the machine that will run
is_local = False
provider_name = server
provider_model = deepseek-r1:14b
provider_server_address = x.x.x.x:5000
provider_server_address = x.x.x.x:3333
```
Run the assistant:

View File

@ -1,15 +1,11 @@
#!/usr/bin python3
import logging
import argparse
from flask import Flask, jsonify, request
from sources.llamacpp import LlamacppLLM
from sources.ollama import OllamaLLM
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
parser = argparse.ArgumentParser(description='AgenticSeek server script')
parser.add_argument('--provider', type=str, help='LLM backend library to use. set to [ollama] or [llamacpp]', required=True)
parser.add_argument('--port', type=int, help='port to use', required=True)

View File

@ -24,6 +24,12 @@ class GeneratorLLM():
self.model = None
self.state = GenerationState()
self.logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
self.logger.addHandler(handler)
self.logger.setLevel(logging.INFO)
def set_model(self, model: str) -> None:
self.logger.info(f"Model set to {model}")

View File

@ -1,10 +1,10 @@
from .generator import GeneratorLLM
import ollama
class OllamaLLM(GeneratorLLM):
def __init__(self):
import ollama
"""
Handle generation using Ollama.
"""

View File

@ -116,11 +116,13 @@ class Provider:
try:
requests.post(route_setup, json={"model": self.model})
pretty_print("Setting up server...", color="status")
time.sleep(5)
requests.post(route_gen, json={"messages": history})
is_complete = False
while not is_complete:
response = requests.get(f"http://{self.server_ip}/get_updated_sentence")
print("raw response", response.json())
if "error" in response.json():
continue
thought = response.json()["sentence"]
is_complete = bool(response.json()["is_complete"])
time.sleep(2)