fix : server problem with llamacpp

This commit is contained in:
martin legrand 2025-03-29 13:32:46 +01:00
parent 44e0508ae5
commit 0c3a07f208

View File

@ -18,6 +18,7 @@ class LlamacppLLM(GeneratorLLM):
filename="*q8_0.gguf",
verbose=True
)
return
self.logger.info(f"Using {self.model} for generation with Llama.cpp")
self.llm.create_chat_completion(
messages = history