mirror of
https://github.com/tcsenpai/agenticSeek.git
synced 2025-07-23 18:00:16 +00:00
fix : server cache bug
This commit is contained in:
parent
93c8f0f8e4
commit
06ddc45955
@ -15,10 +15,6 @@ class OllamaLLM(GeneratorLLM):
|
|||||||
|
|
||||||
def generate(self, history):
|
def generate(self, history):
|
||||||
self.logger.info(f"Using {self.model} for generation with Ollama")
|
self.logger.info(f"Using {self.model} for generation with Ollama")
|
||||||
if self.cache.is_cached(history[-1]['content']):
|
|
||||||
self.state.current_buffer = self.cache.get_cached_response(history[-1]['content'])
|
|
||||||
self.state.is_generating = False
|
|
||||||
return
|
|
||||||
try:
|
try:
|
||||||
with self.state.lock:
|
with self.state.lock:
|
||||||
self.state.is_generating = True
|
self.state.is_generating = True
|
||||||
@ -49,7 +45,6 @@ class OllamaLLM(GeneratorLLM):
|
|||||||
self.logger.info("Generation complete")
|
self.logger.info("Generation complete")
|
||||||
with self.state.lock:
|
with self.state.lock:
|
||||||
self.state.is_generating = False
|
self.state.is_generating = False
|
||||||
self.cache.add_message_pair(history[-1]['content'], self.state.current_buffer)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
generator = OllamaLLM()
|
generator = OllamaLLM()
|
||||||
|
Loading…
x
Reference in New Issue
Block a user