mirror of
https://github.com/tcsenpai/agenticSeek.git
synced 2025-06-06 19:15:28 +00:00
fix : server crash with ollama
This commit is contained in:
parent
d8ded2d456
commit
e0eee90202
@ -3,8 +3,8 @@
|
|||||||
import argparse
|
import argparse
|
||||||
from flask import Flask, jsonify, request
|
from flask import Flask, jsonify, request
|
||||||
|
|
||||||
from sources.llamacpp import LlamacppLLM
|
from sources.llamacpp_handler import LlamacppLLM
|
||||||
from sources.ollama import OllamaLLM
|
from sources.ollama_handler import OllamaLLM
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description='AgenticSeek server script')
|
parser = argparse.ArgumentParser(description='AgenticSeek server script')
|
||||||
parser.add_argument('--provider', type=str, help='LLM backend library to use. set to [ollama] or [llamacpp]', required=True)
|
parser.add_argument('--provider', type=str, help='LLM backend library to use. set to [ollama] or [llamacpp]', required=True)
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
|
|
||||||
|
import time
|
||||||
from .generator import GeneratorLLM
|
from .generator import GeneratorLLM
|
||||||
import ollama
|
import ollama
|
||||||
|
|
||||||
@ -31,17 +32,28 @@ class OllamaLLM(GeneratorLLM):
|
|||||||
|
|
||||||
with self.state.lock:
|
with self.state.lock:
|
||||||
self.state.current_buffer += content
|
self.state.current_buffer += content
|
||||||
|
except Exception as e:
|
||||||
except ollama.ResponseError as e:
|
if "404" in str(e):
|
||||||
if e.status_code == 404:
|
|
||||||
self.logger.info(f"Downloading {self.model}...")
|
self.logger.info(f"Downloading {self.model}...")
|
||||||
ollama.pull(self.model)
|
ollama.pull(self.model)
|
||||||
with self.state.lock:
|
|
||||||
self.state.is_generating = False
|
|
||||||
print(f"Error: {e}")
|
|
||||||
except Exception as e:
|
|
||||||
if "refused" in str(e).lower():
|
if "refused" in str(e).lower():
|
||||||
raise Exception("Ollama connection failed. is the server running ?") from e
|
raise Exception("Ollama connection failed. is the server running ?") from e
|
||||||
|
raise e
|
||||||
finally:
|
finally:
|
||||||
|
self.logger.info("Generation complete")
|
||||||
with self.state.lock:
|
with self.state.lock:
|
||||||
self.state.is_generating = False
|
self.state.is_generating = False
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
generator = OllamaLLM()
|
||||||
|
history = [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "Hello, how are you ?"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
generator.set_model("deepseek-r1:1.5b")
|
||||||
|
generator.start(history)
|
||||||
|
while True:
|
||||||
|
print(generator.get_status())
|
||||||
|
time.sleep(1)
|
@ -270,5 +270,6 @@ goodbye!
|
|||||||
return thought
|
return thought
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
provider = Provider("openai", "gpt-4o-mini")
|
provider = Provider("server", "deepseek-r1:1.5b", "192.168.1.20:3333")
|
||||||
print(provider.respond(["user", "Hello, how are you?"]))
|
res = provider.respond(["user", "Hello, how are you?"])
|
||||||
|
print("Response:", res)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user