fix : server problem with llamacpp

This commit is contained in:
martin legrand 2025-03-29 13:33:25 +01:00
parent 0c3a07f208
commit 58d52ad61f

View File

@ -12,7 +12,7 @@ class LlamacppLLM(GeneratorLLM):
self.llm = None
def generate(self, history):
if self.model is None:
if self.llm is None:
self.llm = Llama.from_pretrained(
repo_id=self.model,
filename="*q8_0.gguf",