mirror of
https://github.com/maglore9900/max_headroom.git
synced 2025-06-02 17:40:30 +00:00
33 lines
1.1 KiB
Plaintext
33 lines
1.1 KiB
Plaintext
#LLM_TYPE will take openai, local. Local will use Ollama
|
|
LLM_TYPE = 'openai'
|
|
|
|
#-----OpenAI variables
|
|
OPENAI_API_KEY = ''
|
|
OPENAI_MODEL = 'gpt-4o-mini'
|
|
|
|
#-----Ollama variables
|
|
#OLLAMA_MODEL will take any model you can load in ollama
|
|
OLLAMA_MODEL = 'gemma2'
|
|
OLLAMA_URL = 'http://localhost:11434'
|
|
|
|
#-----Customization Variables
|
|
#CHARACTER will take any character prompt you have in the modules/prompts.py file. 'max' or 'none' are the default options
|
|
CHARACTER = 'max'
|
|
|
|
#LISTEN_MODEL will take whisper or google, whisper is the best option but requires additional setup with Nvidia drivers
|
|
LISTEN_MODEL = 'google'
|
|
TIME_LISTEN = '10'
|
|
|
|
#STREAM SPEAK URL is using the default url for Alltalk. If you dont have all talk you can ignore this, if you want to use a different service, simply replace the url
|
|
STREAM_SPEAK_URL = 'http://127.0.0.1:7851/api/tts-generate'
|
|
|
|
#-----Spotify, if you want Max to be able to control spotify this is where you will put your credentials
|
|
SPOTIFY_CLIENT_ID = ''
|
|
SPOTIFY_CLIENT_SECRET = ''
|
|
SPOTIFY_REDIRECT_URI = 'http://localhost:8888/callback'
|
|
|
|
|
|
|
|
|
|
|