From 06ddc45955fe567138681a6ec4e6ba3d7abf84d9 Mon Sep 17 00:00:00 2001 From: martin legrand Date: Sat, 5 Apr 2025 16:47:38 +0200 Subject: [PATCH] fix : server cache bug --- server/sources/ollama_handler.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/server/sources/ollama_handler.py b/server/sources/ollama_handler.py index 8510f96..29d2e0d 100644 --- a/server/sources/ollama_handler.py +++ b/server/sources/ollama_handler.py @@ -15,10 +15,6 @@ class OllamaLLM(GeneratorLLM): def generate(self, history): self.logger.info(f"Using {self.model} for generation with Ollama") - if self.cache.is_cached(history[-1]['content']): - self.state.current_buffer = self.cache.get_cached_response(history[-1]['content']) - self.state.is_generating = False - return try: with self.state.lock: self.state.is_generating = True @@ -49,7 +45,6 @@ class OllamaLLM(GeneratorLLM): self.logger.info("Generation complete") with self.state.lock: self.state.is_generating = False - self.cache.add_message_pair(history[-1]['content'], self.state.current_buffer) if __name__ == "__main__": generator = OllamaLLM()