fix : server crash with ollama

This commit is contained in:
martin legrand 2025-03-29 18:07:38 +01:00
parent d8ded2d456
commit e0eee90202
4 changed files with 25 additions and 12 deletions

View File

@ -3,8 +3,8 @@
import argparse
from flask import Flask, jsonify, request
from sources.llamacpp import LlamacppLLM
from sources.ollama import OllamaLLM
from sources.llamacpp_handler import LlamacppLLM
from sources.ollama_handler import OllamaLLM
parser = argparse.ArgumentParser(description='AgenticSeek server script')
parser.add_argument('--provider', type=str, help='LLM backend library to use. set to [ollama] or [llamacpp]', required=True)

View File

@ -1,4 +1,5 @@
import time
from .generator import GeneratorLLM
import ollama
@ -31,17 +32,28 @@ class OllamaLLM(GeneratorLLM):
with self.state.lock:
self.state.current_buffer += content
except ollama.ResponseError as e:
if e.status_code == 404:
except Exception as e:
if "404" in str(e):
self.logger.info(f"Downloading {self.model}...")
ollama.pull(self.model)
with self.state.lock:
self.state.is_generating = False
print(f"Error: {e}")
except Exception as e:
if "refused" in str(e).lower():
raise Exception("Ollama connection failed. is the server running ?") from e
raise e
finally:
self.logger.info("Generation complete")
with self.state.lock:
self.state.is_generating = False
self.state.is_generating = False
if __name__ == "__main__":
generator = OllamaLLM()
history = [
{
"role": "user",
"content": "Hello, how are you ?"
}
]
generator.set_model("deepseek-r1:1.5b")
generator.start(history)
while True:
print(generator.get_status())
time.sleep(1)

View File

@ -270,5 +270,6 @@ goodbye!
return thought
if __name__ == "__main__":
provider = Provider("openai", "gpt-4o-mini")
print(provider.respond(["user", "Hello, how are you?"]))
provider = Provider("server", "deepseek-r1:1.5b", "192.168.1.20:3333")
res = provider.respond(["user", "Hello, how are you?"])
print("Response:", res)