from time import sleep import streamlit as st from dotenv import load_dotenv from api_handlers import LitellmHandler from utils import generate_response from config_menu import config_menu from logger import logger import os # Load environment variables load_dotenv() def load_css(): with open(os.path.join(os.path.dirname(__file__), "..", "static", "styles.css")) as f: st.markdown(f'', unsafe_allow_html=True) def setup_page(): st.set_page_config(page_title="multi1 - Unified AI Reasoning Chains", page_icon="🧠", layout="wide") load_css() st.markdown("""
This app demonstrates AI reasoning chains using different backends: Ollama, Perplexity AI, and Groq. Choose a backend and enter your query to see the step-by-step reasoning process.
""", unsafe_allow_html=True) def get_api_handler(model, api_key): return LitellmHandler(model=model, api_key=api_key) def main(): logger.info("Starting the application") setup_page() st.sidebar.markdown('⏱️ Total thinking time: {total_thinking_time:.2f} seconds
', unsafe_allow_html=True) logger.info(f"Total thinking time: {total_thinking_time:.2f} seconds") except Exception as e: logger.error(f"Error generating response: {str(e)}", exc_info=True) st.error("An error occurred while generating the response. Please try again.") if __name__ == "__main__": main()