import streamlit as st from dotenv import load_dotenv from api_handlers import OllamaHandler, PerplexityHandler, GroqHandler from utils import generate_response, load_env_vars # Load environment variables and configuration load_dotenv() config = load_env_vars() def setup_page(): st.set_page_config(page_title="multi1 - Unified AI Reasoning Chains", page_icon="🧠", layout="wide") st.markdown("""
This app demonstrates AI reasoning chains using different backends: Ollama, Perplexity AI, and Groq. Choose a backend and enter your query to see the step-by-step reasoning process.
""", unsafe_allow_html=True) def get_api_handler(backend): if backend == "Ollama": return OllamaHandler(config['OLLAMA_URL'], config['OLLAMA_MODEL']) elif backend == "Perplexity AI": return PerplexityHandler(config['PERPLEXITY_API_KEY'], config['PERPLEXITY_MODEL']) else: # Groq return GroqHandler() def display_config(backend): st.sidebar.markdown("## 🛠️ Current Configuration") if backend == "Ollama": st.sidebar.markdown(f"- 🖥️ Ollama URL: `{config['OLLAMA_URL']}`") st.sidebar.markdown(f"- 🤖 Ollama Model: `{config['OLLAMA_MODEL']}`") elif backend == "Perplexity AI": st.sidebar.markdown(f"- 🧠 Perplexity AI Model: `{config['PERPLEXITY_MODEL']}`") else: # Groq st.sidebar.markdown("- ⚡ Using Groq API") def main(): setup_page() st.sidebar.markdown("⏱️ Total thinking time: {total_thinking_time:.2f} seconds
", unsafe_allow_html=True) if __name__ == "__main__": main()