mirror of
https://github.com/tcsenpai/multi1.git
synced 2025-06-08 03:55:22 +00:00
configs on the sidebar
This commit is contained in:
parent
5275985873
commit
f4de5beac9
@ -1,16 +1,24 @@
|
|||||||
import streamlit as st
|
import streamlit as st
|
||||||
import os
|
import os
|
||||||
from dotenv import load_dotenv, set_key
|
from dotenv import load_dotenv, set_key
|
||||||
|
from dataclasses import dataclass
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class StInputs:
|
||||||
|
model:str
|
||||||
|
api_key:str
|
||||||
|
temperature:float
|
||||||
|
timeout:float
|
||||||
|
sleeptime:float
|
||||||
|
max_steps:int
|
||||||
|
max_tokens:int
|
||||||
|
|
||||||
|
|
||||||
def load_env_vars():
|
def load_env_vars():
|
||||||
load_dotenv(os.path.join(os.path.dirname(__file__), "..", ".env"))
|
load_dotenv(os.path.join(os.path.dirname(__file__), "..", ".env"))
|
||||||
return {
|
return {
|
||||||
'OLLAMA_URL': os.getenv('OLLAMA_URL', 'http://localhost:11434'),
|
'MODEL': os.getenv('MODEL', 'gemini/gemini-1.5-pro'),
|
||||||
'OLLAMA_MODEL': os.getenv('OLLAMA_MODEL', 'mistral'),
|
"MODEL_API_KEY": os.getenv("MODEL_API_KEY", ""),
|
||||||
'PERPLEXITY_API_KEY': os.getenv('PERPLEXITY_API_KEY', ''),
|
|
||||||
'PERPLEXITY_MODEL': os.getenv('PERPLEXITY_MODEL', 'mistral-7b-instruct'),
|
|
||||||
'GROQ_API_KEY': os.getenv('GROQ_API_KEY', ''),
|
|
||||||
'GROQ_MODEL': os.getenv('GROQ_MODEL', 'mixtral-8x7b-32768')
|
|
||||||
}
|
}
|
||||||
|
|
||||||
def save_env_vars(config):
|
def save_env_vars(config):
|
||||||
@ -20,30 +28,31 @@ def save_env_vars(config):
|
|||||||
|
|
||||||
def config_menu():
|
def config_menu():
|
||||||
st.sidebar.markdown("## 🛠️ Configuration")
|
st.sidebar.markdown("## 🛠️ Configuration")
|
||||||
|
|
||||||
config = load_env_vars()
|
config = load_env_vars()
|
||||||
|
|
||||||
with st.sidebar.expander("Edit Configuration"):
|
with st.sidebar.expander("Edit Configuration"):
|
||||||
new_config = {}
|
new_config = {}
|
||||||
new_config['OLLAMA_URL'] = st.text_input("Ollama URL", value=config['OLLAMA_URL'])
|
model = new_config['MODEL'] = st.text_input("Model (in Litellm style, provider/llm-name)", value=config['MODEL'], placeholder='openai/gpt-3.5-turbo')
|
||||||
new_config['OLLAMA_MODEL'] = st.text_input("Ollama Model", value=config['OLLAMA_MODEL'])
|
api_key = new_config['MODEL_API_KEY'] = st.text_input("Model API Key", value=config['MODEL_API_KEY'], placeholder='api key here')
|
||||||
new_config['PERPLEXITY_API_KEY'] = st.text_input("Perplexity API Key", value=config['PERPLEXITY_API_KEY'], type="password")
|
timeout = st.number_input("Timout", value=30.0, min_value=1.0, max_value=60.0, step=1.0)
|
||||||
new_config['PERPLEXITY_MODEL'] = st.text_input("Perplexity Model", value=config['PERPLEXITY_MODEL'])
|
max_tokens =st.number_input("Max Tokens", value=512, min_value=300, max_value=2048, step = 100)
|
||||||
new_config['GROQ_API_KEY'] = st.text_input("Groq API Key", value=config['GROQ_API_KEY'], type="password")
|
temperature =st.number_input("Temperature", value=0.1, min_value=0.0, max_value=2.0, step=0.1)
|
||||||
new_config['GROQ_MODEL'] = st.text_input("Groq Model", value=config['GROQ_MODEL'])
|
max_steps = st.number_input("Max Steps (number of reasoning steps)", value=20, min_value=1, max_value=20, step=1)
|
||||||
|
sleeptime = st.number_input("Sleeptime between request hits(to avoid too many requests error)" ,value=1.0, min_value=0.0, max_value=30.0, step=1.0)
|
||||||
|
|
||||||
if st.button("Save Configuration"):
|
if st.button("Save Configuration"):
|
||||||
save_env_vars(new_config)
|
save_env_vars(new_config)
|
||||||
st.success("Configuration saved successfully!")
|
st.success("Configuration saved successfully!")
|
||||||
|
|
||||||
return config
|
inputs = StInputs(
|
||||||
|
model=model,
|
||||||
|
api_key=api_key,
|
||||||
|
timeout=timeout,
|
||||||
|
max_tokens=max_tokens,
|
||||||
|
temperature=temperature,
|
||||||
|
max_steps=max_steps,
|
||||||
|
sleeptime=sleeptime
|
||||||
|
)
|
||||||
|
|
||||||
|
return inputs
|
||||||
|
|
||||||
def display_config(backend, config):
|
|
||||||
st.sidebar.markdown("## 🛠️ Current Configuration")
|
|
||||||
if backend == "Ollama":
|
|
||||||
st.sidebar.markdown(f"- 🖥️ Ollama URL: `{config['OLLAMA_URL']}`")
|
|
||||||
st.sidebar.markdown(f"- 🤖 Ollama Model: `{config['OLLAMA_MODEL']}`")
|
|
||||||
elif backend == "Perplexity AI":
|
|
||||||
st.sidebar.markdown(f"- 🧠 Perplexity AI Model: `{config['PERPLEXITY_MODEL']}`")
|
|
||||||
else: # Groq
|
|
||||||
st.sidebar.markdown(f"- ⚡ Groq Model: `{config['GROQ_MODEL']}`")
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user