mirror of
https://github.com/tcsenpai/agenticSeek.git
synced 2025-06-07 03:25:32 +00:00
feat : support for local openai based api
This commit is contained in:
parent
697bc882c7
commit
86da6acf3f
18
README.md
18
README.md
@ -222,7 +222,11 @@ ip a | grep "inet " | grep -v 127.0.0.1 | awk '{print $2}' | cut -d/ -f1
|
||||
|
||||
Note: For Windows or macOS, use ipconfig or ifconfig respectively to find the IP address.
|
||||
|
||||
Clone the repository and then, run the script `stream_llm.py` in `server/`
|
||||
**If you wish to use openai based provider follow the *Run with an API* section. vLLM**
|
||||
|
||||
Make sure ollama is installed (Currently our script only support ollama)
|
||||
|
||||
Run our server script.
|
||||
|
||||
```sh
|
||||
python3 server_ollama.py --model "deepseek-r1:32b"
|
||||
@ -232,8 +236,6 @@ python3 server_ollama.py --model "deepseek-r1:32b"
|
||||
|
||||
Now on your personal computer:
|
||||
|
||||
Clone the repository.
|
||||
|
||||
Change the `config.ini` file to set the `provider_name` to `server` and `provider_model` to `deepseek-r1:14b`.
|
||||
Set the `provider_server_address` to the ip address of the machine that will run the model.
|
||||
|
||||
@ -254,18 +256,20 @@ python3 main.py
|
||||
|
||||
## **Run with an API**
|
||||
|
||||
Clone the repository.
|
||||
|
||||
Set the desired provider in the `config.ini`
|
||||
|
||||
```sh
|
||||
[MAIN]
|
||||
is_local = False
|
||||
provider_name = openai
|
||||
provider_model = gpt4-o
|
||||
provider_server_address = 127.0.0.1:5000 # can be set to anything, not used
|
||||
provider_model = gpt4o
|
||||
provider_server_address = 127.0.0.1:5000
|
||||
```
|
||||
|
||||
Set `is_local` to True if using a local openai-based api.
|
||||
|
||||
Change the IP address if your openai-based api run on your own server.
|
||||
|
||||
Run the assistant:
|
||||
|
||||
```sh
|
||||
|
6
main.py
6
main.py
@ -22,12 +22,10 @@ def handleInterrupt(signum, frame):
|
||||
def main():
|
||||
signal.signal(signal.SIGINT, handler=handleInterrupt)
|
||||
|
||||
if config.getboolean('MAIN', 'is_local'):
|
||||
provider = Provider(config["MAIN"]["provider_name"], config["MAIN"]["provider_model"], config["MAIN"]["provider_server_address"])
|
||||
else:
|
||||
provider = Provider(provider_name=config["MAIN"]["provider_name"],
|
||||
model=config["MAIN"]["provider_model"],
|
||||
server_address=config["MAIN"]["provider_server_address"])
|
||||
server_address=config["MAIN"]["provider_server_address"],
|
||||
is_local=config.getboolean('MAIN', 'is_local'))
|
||||
|
||||
browser = Browser(create_driver(), headless=config.getboolean('MAIN', 'headless_browser'))
|
||||
|
||||
|
@ -15,9 +15,10 @@ import httpx
|
||||
from sources.utility import pretty_print, animate_thinking
|
||||
|
||||
class Provider:
|
||||
def __init__(self, provider_name, model, server_address = "127.0.0.1:5000"):
|
||||
def __init__(self, provider_name, model, server_address = "127.0.0.1:5000", is_local=False):
|
||||
self.provider_name = provider_name.lower()
|
||||
self.model = model
|
||||
self.is_local = is_local
|
||||
self.server = self.check_address_format(server_address)
|
||||
self.available_providers = {
|
||||
"ollama": self.ollama_fn,
|
||||
@ -169,11 +170,16 @@ class Provider:
|
||||
"""
|
||||
Use openai to generate text.
|
||||
"""
|
||||
if self.is_local:
|
||||
print("Using local OpenAI server")
|
||||
client = OpenAI(api_key=self.api_key, base_url=base_url)
|
||||
else:
|
||||
print("Using OpenAI API")
|
||||
client = OpenAI(api_key=self.api_key)
|
||||
try:
|
||||
response = client.chat.completions.create(
|
||||
model=self.model,
|
||||
messages=history
|
||||
messages=history,
|
||||
)
|
||||
thought = response.choices[0].message.content
|
||||
if verbose:
|
||||
|
Loading…
x
Reference in New Issue
Block a user