fix: ollama server issue

This commit is contained in:
martin legrand 2025-03-30 12:04:06 +02:00
parent e56e5a4b3d
commit 69f276955a

View File

@ -24,18 +24,14 @@ class OllamaLLM(GeneratorLLM):
messages=history, messages=history,
stream=True, stream=True,
) )
for chunk in stream: for chunk in stream:
if type(chunk) != dict:
self.logger.error(f"Error: chunk not a dict")
continue
content = chunk['message']['content'] content = chunk['message']['content']
if '.' in content: if '\n' in content:
self.logger.info(self.state.current_buffer) self.logger.info(content)
self.state.last_complete_sentence = self.state.current_buffer
with self.state.lock: with self.state.lock:
self.state.current_buffer += content self.state.current_buffer += content
except Exception as e: except Exception as e:
if "404" in str(e): if "404" in str(e):
self.logger.info(f"Downloading {self.model}...") self.logger.info(f"Downloading {self.model}...")