removed anything webui: let's keep things simple

This commit is contained in:
tcsenpai 2024-10-06 23:15:24 +02:00
parent 648e03ae28
commit 7f5ea8c003
7 changed files with 35 additions and 302 deletions

View File

@ -1,6 +1,6 @@
# DualMind: AI Conversation Simulator
DualMind is an innovative AI conversation simulator that facilitates engaging dialogues between two AI models using the Ollama API. It offers both a command-line interface (CLI) and a Streamlit-based web interface for immersive and customizable AI interactions.
DualMind is an innovative AI conversation simulator that facilitates engaging dialogues between two AI models using the Ollama API. It offers a command-line interface (CLI) for immersive and customizable AI interactions.
![Screenshot](imgs/screenshot.png)
@ -8,15 +8,13 @@ DualMind is an innovative AI conversation simulator that facilitates engaging di
- 🤖 Dual-model conversation: Engage two different AI models in a thought-provoking dialogue
- 🎭 Customizable system prompts: Tailor the behavior and personality of each AI model
- 🖥️ Multiple interface options:
- Command-line interface for quick interactions
- Streamlit web interface for a user-friendly experience
- 🖥️ Command-line interface for quick interactions
- 🛠️ Conversation customization:
- Adjust the number of exchanges
- Modify the initial prompt
- Select different AI models
- 💾 Save and download conversation logs
- 🎨 Responsive and visually appealing design
- 💾 Save conversation logs
- 🔢 Token count display and limit: Monitor the token usage for each message and limit the conversation to a specified number of tokens.
## Prerequisite: Ollama
@ -46,6 +44,7 @@ Please refer to [Ollama](https://ollama.com/download) to install Ollama on your
MODEL_1=llama2
MODEL_2=mistral
INITIAL_PROMPT="Let's discuss the future of AI. What are your thoughts on its potential impact on society?"
MAX_TOKENS=8000
```
Feel free to use the env.example file as a template.
@ -54,42 +53,29 @@ Please refer to [Ollama](https://ollama.com/download) to install Ollama on your
## Usage
### Command-line Interface
To run DualMind in CLI mode:
```sh
./run_cli.sh
```
### Streamlit Web Interface
To run DualMind in Streamlit mode:
```sh
./run_streamlit.sh
```
Then, open your web browser and navigate to the URL provided in the terminal (usually `http://localhost:8501`).
## Customization
### System Prompts
You can customize the system prompts for each AI model by editing the `system_prompt_1.txt` and `system_prompt_2.txt` files in the project root.
### Styling
### Options
You can customize the options for the conversation by editing the `options.json` file in the project root.
The appearance of the Streamlit interface can be customized by modifying the `style/custom.css` file.
## Project Structure
- `main.py`: Entry point of the application
- `ai_conversation.py`: Core logic for AI conversations
- `streamlit_app.py`: Streamlit web interface implementation
- `style/custom.css`: Custom styles for the web interface
- `run_cli.sh`: Shell script to run the CLI version
- `run_streamlit.sh`: Shell script to run the Streamlit version
## Contributing
@ -102,4 +88,3 @@ This project is open source and available under the [MIT License](LICENSE).
## Acknowledgements
- This project uses the [Ollama](https://ollama.ai/) API for AI model interactions.
- The web interface is built with [Streamlit](https://streamlit.io/).

View File

@ -11,7 +11,7 @@ class AIConversation:
system_prompt_1,
system_prompt_2,
ollama_endpoint,
max_tokens=4000,
max_tokens=4000,
):
# Initialize conversation parameters and Ollama client
self.model_1 = model_1
@ -41,7 +41,7 @@ class AIConversation:
break # Avoid removing the system message
return messages
def start_conversation(self, initial_message, num_exchanges=0):
def start_conversation(self, initial_message, num_exchanges=0, options=None):
# Main conversation loop
current_message = initial_message
color_1, color_2 = "cyan", "yellow"
@ -78,10 +78,7 @@ class AIConversation:
response = self.client.chat(
model=self.current_model,
messages=messages,
options={
"temperature": 0.7, # Control randomness
"repeat_penalty": 1.2, # Penalize repetition
},
options=options,
)
response_content = response["message"]["content"]

35
main.py
View File

@ -1,5 +1,5 @@
import os
import argparse
import json
from dotenv import load_dotenv, set_key
from ai_conversation import AIConversation
@ -8,6 +8,11 @@ def load_system_prompt(filename):
with open(filename, "r") as file:
return file.read().strip()
def load_options_from_json(filename):
"""Load options from a JSON file."""
with open(filename, "r") as file:
return json.load(file)
def main():
# Load environment variables
load_dotenv()
@ -25,38 +30,24 @@ def main():
max_tokens = int(os.getenv("MAX_TOKENS", 4000))
print(f"Max tokens: {max_tokens}")
# Load options from JSON file
options = load_options_from_json("options.json")
print(f"Options: {options}")
# Initialize the AI conversation object
conversation = AIConversation(
model_1, model_2, system_prompt_1, system_prompt_2, ollama_endpoint, max_tokens
)
# Set up command-line argument parser
parser = argparse.ArgumentParser(description="AI Conversation")
parser.add_argument("--cli", action="store_true", help="Run in CLI mode")
parser.add_argument(
"--streamlit", action="store_true", help="Run in Streamlit mode"
)
args = parser.parse_args()
# Run the appropriate interface based on command-line arguments
if args.cli:
run_cli(conversation, initial_prompt)
elif args.streamlit:
run_streamlit(conversation, initial_prompt)
else:
print("Please specify either --cli or --streamlit mode.")
run_cli(conversation, initial_prompt, options)
def run_cli(conversation, initial_prompt):
def run_cli(conversation, initial_prompt, options):
"""Run the conversation in command-line interface mode."""
load_dotenv()
conversation.start_conversation(initial_prompt, num_exchanges=0)
conversation.start_conversation(initial_prompt, num_exchanges=0, options=options)
def run_streamlit(conversation, initial_prompt):
"""Run the conversation in Streamlit interface mode."""
import streamlit as st
from streamlit_app import streamlit_interface
streamlit_interface(conversation, initial_prompt)
if __name__ == "__main__":
main()

8
options.json Normal file
View File

@ -0,0 +1,8 @@
{
"temperature": 0.8,
"top_k": 40,
"top_p": 0.9,
"repeat_penalty": 1.1,
"presence_penalty": 0.5,
"frequency_penalty": 0.5
}

View File

@ -1,6 +1,6 @@
python-dotenv
requests
termcolor
streamlit
Pillow
tiktoken
tiktoken
ollama

View File

@ -1,2 +0,0 @@
#!/bin/bash
streamlit run main.py -- --streamlit

View File

@ -1,246 +0,0 @@
import streamlit as st
import os
import datetime
from dotenv import load_dotenv
# Function to load and apply custom CSS
def load_css(file_name):
with open(file_name, "r") as f:
st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True)
# Function to set page configuration
def set_page_config():
st.set_page_config(
page_title="DualMind",
page_icon="🤖",
layout="wide",
initial_sidebar_state="expanded",
)
def streamlit_interface(conversation, initial_prompt):
set_page_config()
load_css("style/custom.css")
st.markdown(
'<h1 class="main-title">🤖 DualMind</h1>', unsafe_allow_html=True
)
# Sidebar for customization
st.sidebar.title("🛠️ Settings")
# Load default values from .env
load_dotenv()
default_endpoint = os.getenv("OLLAMA_ENDPOINT")
default_model_1 = os.getenv("MODEL_1")
default_model_2 = os.getenv("MODEL_2")
# Sidebar for customization
ollama_endpoint = st.sidebar.text_input("Ollama Endpoint", value=default_endpoint)
# Update the OllamaClient endpoint
conversation.ollama_client.endpoint = ollama_endpoint
# Fetch available models
try:
available_models = conversation.ollama_client.get_available_models()
except Exception as e:
st.error(f"Error fetching models: {str(e)}")
available_models = []
# Model selection dropdowns
model_1 = st.sidebar.selectbox(
"Model 1",
options=available_models,
index=(
available_models.index(default_model_1)
if default_model_1 in available_models
else 0
),
)
model_2 = st.sidebar.selectbox(
"Model 2",
options=available_models,
index=(
available_models.index(default_model_2)
if default_model_2 in available_models
else 0
),
)
# System prompt customization
col1, col2 = st.columns(2)
with col1:
with st.expander("Customize System Prompt 1"):
system_prompt_1 = st.text_area(
"System Prompt 1", value=conversation.system_prompt_1, height=150
)
if st.button("Save Prompt 1"):
with open("system_prompt_1.txt", "w") as f:
f.write(system_prompt_1)
st.success("System Prompt 1 saved!")
with col2:
with st.expander("Customize System Prompt 2"):
system_prompt_2 = st.text_area(
"System Prompt 2", value=conversation.system_prompt_2, height=150
)
if st.button("Save Prompt 2"):
with open("system_prompt_2.txt", "w") as f:
f.write(system_prompt_2)
st.success("System Prompt 2 saved!")
# Update conversation with new settings
conversation.model_1 = model_1
conversation.model_2 = model_2
conversation.system_prompt_1 = system_prompt_1
conversation.system_prompt_2 = system_prompt_2
if "messages" not in st.session_state:
st.session_state.messages = []
if "current_message" not in st.session_state:
st.session_state.current_message = initial_prompt
# Add this new section for customizing the initial message
initial_message = st.text_area(
"Customize initial message:", value=st.session_state.current_message
)
if st.button("Set Initial Message"):
st.session_state.current_message = initial_message
st.success("Initial message updated!")
if "exchange_count" not in st.session_state:
st.session_state.exchange_count = 0
# Update the chat message display
for message in st.session_state.messages:
with st.chat_message(
message["role"], avatar="🧑" if message["role"] == "user" else "🤖"
):
st.markdown(
f'<div class="chat-message {"user-message" if message["role"] == "user" else "assistant-message"}">{message["content"]}</div>',
unsafe_allow_html=True,
)
num_exchanges = st.number_input(
"Number of exchanges", min_value=1, max_value=10, value=3
)
if st.button("Generate Responses"):
with st.spinner("Generating responses..."):
for _ in range(num_exchanges):
response = conversation.get_conversation_response(
st.session_state.current_message
)
model_name, content = response.split("\n", 1)
avatar = "🔵" if model_name == model_1 else "🟢"
st.session_state.messages.append(
{
"role": "assistant",
"content": f"**{model_name}**\n\n{content}",
"avatar": avatar,
}
)
st.session_state.current_message = content
st.session_state.exchange_count += 1
with st.chat_message("assistant", avatar=avatar):
st.markdown(f"**{model_name}**\n\n{content}")
if st.button("Reset Conversation"):
st.session_state.messages = []
st.session_state.current_message = (
initial_message # Use the customized initial message here
)
st.session_state.exchange_count = 0
conversation.current_model = conversation.model_1
conversation.current_system_prompt = conversation.system_prompt_1
st.write(f"Total exchanges: {st.session_state.exchange_count}")
user_input = st.text_input("Your message:", key="user_input")
if st.button("Send"):
if user_input:
st.session_state.messages.append(
{"role": "user", "content": user_input, "avatar": "🧑‍💻"}
)
st.session_state.current_message = user_input
with st.spinner("Processing your message..."):
st.experimental_rerun()
col1, col2, col3 = st.columns([1, 1, 1])
with col2:
if st.button(
"💾 Save Conversation",
key="save_button",
help="Save the current conversation",
use_container_width=True,
):
log_content = create_conversation_log(
conversation, st.session_state.messages
)
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"conversation_log_{timestamp}.txt"
with open(filename, "w") as f:
f.write(log_content)
st.success(f"Conversation saved as {filename}")
st.balloons() # Add a celebratory animation when saving
st.experimental_rerun() # Rerun the app to update the saved conversations list
# Add collapsible section for saved conversations
with st.sidebar.expander("📚 Saved Conversations"):
saved_conversations = get_saved_conversations()
if saved_conversations:
for conv_file in saved_conversations:
if st.button(f"📥 {conv_file}", key=f"download_{conv_file}"):
with open(conv_file, "r") as f:
content = f.read()
st.download_button(
label=f"📥 Download {conv_file}",
data=content,
file_name=conv_file,
mime="text/plain",
key=f"download_button_{conv_file}",
)
else:
st.info("No saved conversations found.")
# Add a footer
st.markdown(
"""
<footer>
<p>Made with by <a href="https://github.com/tcsenpai">TCSenpai</a></p>
</footer>
""",
unsafe_allow_html=True,
)
def create_conversation_log(conversation, messages):
log = f"Conversation Log - {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n"
log += f"Ollama Endpoint: {conversation.ollama_client.endpoint}\n"
log += f"Model 1: {conversation.model_1}\n"
log += f"Model 2: {conversation.model_2}\n"
log += f"System Prompt 1:\n{conversation.system_prompt_1}\n\n"
log += f"System Prompt 2:\n{conversation.system_prompt_2}\n\n"
log += "Conversation:\n\n"
for message in messages:
log += f"{message['role'].capitalize()}: {message['content']}\n\n"
return log
def get_saved_conversations():
return [
f
for f in os.listdir()
if f.startswith("conversation_log_") and f.endswith(".txt")
]