Merge pull request #1 from fengwang/main

Encourage self-hosted ollama to generate valid json response
This commit is contained in:
TheCookingSenpai 2024-09-17 09:03:56 +02:00 committed by GitHub
commit 3e00dfc89a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 33 additions and 24 deletions

View File

@ -57,6 +57,11 @@ Result:
![0.9 or 0.11 example](examples/math.png)
Prompt: In the context of Lie Group and Lie Algebra, let $R \in E$ be an irreducible root system. Show that then $E$ is an irreducible representation of the Weyl group $W$.
![](examples/lie.1.png)
### Quickstart
To use the launcher, follow these instructions:

View File

@ -1,7 +1,7 @@
GROQ_API_KEY=gsk...
OLLAMA_URL=http://localhost:11434
OLLAMA_MODEL=llama2
OLLAMA_MODEL=llama3.1:70b
PERPLEXITY_API_KEY=your_perplexity_api_key
PERPLEXITY_MODEL=llama-3.1-sonar-small-128k-online

BIN
examples/lie.1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 344 KiB

12
ol1.py
View File

@ -10,7 +10,7 @@ load_dotenv()
# Get configuration from .env file
OLLAMA_URL = os.getenv('OLLAMA_URL', 'http://localhost:11434')
OLLAMA_MODEL = os.getenv('OLLAMA_MODEL', 'llama2')
OLLAMA_MODEL = os.getenv('OLLAMA_MODEL', 'llama3.1:70b')
def make_api_call(messages, max_tokens, is_final_answer=False):
for attempt in range(3):
@ -21,6 +21,7 @@ def make_api_call(messages, max_tokens, is_final_answer=False):
"model": OLLAMA_MODEL,
"messages": messages,
"stream": False,
"format": "json", # important, or most of the time ollama does not generate valid json response
"options": {
"num_predict": max_tokens,
"temperature": 0.2
@ -38,7 +39,7 @@ def make_api_call(messages, max_tokens, is_final_answer=False):
time.sleep(1) # Wait for 1 second before retrying
def generate_response(prompt):
messages = [
messages = [ # add two sentences to encourage json format response
{"role": "system", "content": """You are an expert AI assistant that explains your reasoning step by step. For each step, provide a title that describes what you're doing in that step, along with the content. Decide if you need another step or if you're ready to give the final answer. Respond in JSON format with 'title', 'content', and 'next_action' (either 'continue' or 'final_answer') keys. USE AS MANY REASONING STEPS AS POSSIBLE. AT LEAST 3. BE AWARE OF YOUR LIMITATIONS AS AN LLM AND WHAT YOU CAN AND CANNOT DO. IN YOUR REASONING, INCLUDE EXPLORATION OF ALTERNATIVE ANSWERS. CONSIDER YOU MAY BE WRONG, AND IF YOU ARE WRONG IN YOUR REASONING, WHERE IT WOULD BE. FULLY TEST ALL OTHER POSSIBILITIES. YOU CAN BE WRONG. WHEN YOU SAY YOU ARE RE-EXAMINING, ACTUALLY RE-EXAMINE, AND USE ANOTHER APPROACH TO DO SO. DO NOT JUST SAY YOU ARE RE-EXAMINING. USE AT LEAST 3 METHODS TO DERIVE THE ANSWER. USE BEST PRACTICES.
Example of a valid JSON response:
@ -47,7 +48,8 @@ Example of a valid JSON response:
"title": "Identifying Key Information",
"content": "To begin solving this problem, we need to carefully examine the given information and identify the crucial elements that will guide our solution process. This involves...",
"next_action": "continue"
}```
}```.
You MUST response using the expected json schema, and your response must be valid json. This JSON response is essential for our job.
"""},
{"role": "user", "content": prompt},
{"role": "assistant", "content": "Thank you! I will now think step by step following my instructions, starting at the beginning after decomposing the problem."}
@ -121,7 +123,9 @@ def main():
for i, (title, content, thinking_time) in enumerate(steps):
if title.startswith("Final Answer"):
st.markdown(f"### {title}")
st.markdown(content.replace('\n', '<br>'), unsafe_allow_html=True)
# this will not work were there codes in the content
#st.markdown(content.replace('\n', '<br>'), unsafe_allow_html=True)
st.markdown(content, unsafe_allow_html=True)
else:
with st.expander(title, expanded=True):
st.markdown(content.replace('\n', '<br>'), unsafe_allow_html=True)