diff --git a/ai_conversation.py b/ai_conversation.py index 16b9cc5..89e7880 100644 --- a/ai_conversation.py +++ b/ai_conversation.py @@ -85,10 +85,10 @@ class AIConversation: # Post-process to remove repetition response_content = self.remove_repetition(response_content) - # Format and print the response + # Format and print the response with a bubble model_name = f"{self.current_model.upper()} ({name}):" - formatted_response = f"{model_name}\n{response_content}\n" - print(colored(formatted_response, color)) + formatted_response = self.create_bubble(response_content, color, model_name) + print(formatted_response) conversation_log.append( {"role": "assistant", "content": formatted_response} ) @@ -161,4 +161,21 @@ class AIConversation: unique_sentences.append(sentence) # Join the sentences back together - return " ".join(unique_sentences) \ No newline at end of file + return " ".join(unique_sentences) + + def create_bubble(self, text, color, header): + # Split the text into lines + lines = text.split('\n') + # Find the maximum line length + max_length = max(len(line) for line in lines) + + # Create the bubble + bubble = [] + bubble.append(colored(f"╭{'─' * (max_length + 2)}╮", color)) + bubble.append(colored(f"│ {header:<{max_length}} │", color)) + bubble.append(colored(f"├{'─' * (max_length + 2)}┤", color)) + for line in lines: + bubble.append(colored(f"│ {line:<{max_length}} │", color)) + bubble.append(colored(f"╰{'─' * (max_length + 2)}╯", color)) + + return '\n'.join(bubble) \ No newline at end of file diff --git a/env.example b/env.example index c06ff18..f2ebed4 100644 --- a/env.example +++ b/env.example @@ -1,4 +1,5 @@ OLLAMA_ENDPOINT=http://127.0.0.1:11434 MODEL_1=llama3.1:8b MODEL_2=mistral-nemo:latest -INITIAL_PROMPT="Let's discuss the future of AI. What are your thoughts on its potential impact on society? Please tell me your name too. " \ No newline at end of file +INITIAL_PROMPT="Hi! How are you?" +MAX_TOKENS=8000 \ No newline at end of file