fix : server cache

This commit is contained in:
martin legrand 2025-04-05 16:41:24 +02:00
parent 8991aaae8d
commit a667f89c12

View File

@ -13,8 +13,8 @@ class OllamaLLM(GeneratorLLM):
def generate(self, history):
self.logger.info(f"Using {self.model} for generation with Ollama")
if cache.is_cached(history[-1]['content']):
self.state.current_buffer = cache.get_cached_response(history[-1]['content'])
if self.cache.is_cached(history[-1]['content']):
self.state.current_buffer = self.cache.get_cached_response(history[-1]['content'])
self.state.is_generating = False
return
try: