import streamlit as st from dotenv import load_dotenv from api_handlers import OllamaHandler, PerplexityHandler, GroqHandler from utils import generate_response, litellm_config, litellm_instructions from config_menu import config_menu, display_config from logger import logger import os from handlers.litellm_handler import LiteLLMHandler # Load environment variables from .env file load_dotenv() def load_css(): # Load custom CSS styles with open(os.path.join(os.path.dirname(__file__), "..", "static", "styles.css")) as f: st.markdown(f'', unsafe_allow_html=True) def setup_page(): # Configure the Streamlit page st.set_page_config(page_title="multi1 - Unified AI Reasoning Chains", page_icon="🧠", layout="wide") load_css() # Display the main title st.markdown("""
This app demonstrates AI reasoning chains using different backends: Ollama, Perplexity AI, and Groq. Choose a backend and enter your query to see the step-by-step reasoning process.
""", unsafe_allow_html=True) def get_api_handler(backend, config): if backend == "Ollama": return OllamaHandler(config['OLLAMA_URL'], config['OLLAMA_MODEL']) elif backend == "Perplexity AI": return PerplexityHandler(config['PERPLEXITY_API_KEY'], config['PERPLEXITY_MODEL']) elif backend == "Groq": return GroqHandler(config['GROQ_API_KEY'], config['GROQ_MODEL']) else: # LiteLLM litellm_config = st.session_state.get('litellm_config', {}) return LiteLLMHandler( litellm_config.get('model', ''), litellm_config.get('api_base', ''), litellm_config.get('api_key', '') ) def main(): logger.info("Starting the application") setup_page() # Set up the sidebar for configuration st.sidebar.markdown('⏱️ Total thinking time: {total_thinking_time:.2f} seconds
', unsafe_allow_html=True) logger.info(f"Total thinking time: {total_thinking_time:.2f} seconds") except Exception as e: # Handle and display any errors logger.error(f"Error generating response: {str(e)}", exc_info=True) st.error("An error occurred while generating the response. Please try again.") if __name__ == "__main__": main()