enhance: encourage ollama to generate json response

This commit is contained in:
Feng 2024-09-16 20:43:04 +00:00
parent c2c4213f0f
commit 1e21cd520f
4 changed files with 37 additions and 24 deletions

View File

@ -57,6 +57,11 @@ Result:
![0.9 or 0.11 example](examples/math.png) ![0.9 or 0.11 example](examples/math.png)
Prompt: In the context of Lie Group and Lie Algebra, let $R \in E$ be an irreducible root system. Show that then $E$ is an irreducible representation of the Weyl group $W$.
![](examples/lie.1.png)
### Quickstart ### Quickstart
To use the launcher, follow these instructions: To use the launcher, follow these instructions:

View File

@ -1,7 +1,7 @@
GROQ_API_KEY=gsk... GROQ_API_KEY=gsk...
OLLAMA_URL=http://localhost:11434 OLLAMA_URL=http://localhost:11434
OLLAMA_MODEL=llama2 OLLAMA_MODEL=llama3.1:70b
PERPLEXITY_API_KEY=your_perplexity_api_key PERPLEXITY_API_KEY=your_perplexity_api_key
PERPLEXITY_MODEL=llama-3.1-sonar-small-128k-online PERPLEXITY_MODEL=llama-3.1-sonar-small-128k-online

BIN
examples/lie.1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 344 KiB

16
ol1.py
View File

@ -10,23 +10,26 @@ load_dotenv()
# Get configuration from .env file # Get configuration from .env file
OLLAMA_URL = os.getenv('OLLAMA_URL', 'http://localhost:11434') OLLAMA_URL = os.getenv('OLLAMA_URL', 'http://localhost:11434')
OLLAMA_MODEL = os.getenv('OLLAMA_MODEL', 'llama2') OLLAMA_MODEL = os.getenv('OLLAMA_MODEL', 'llama3.1:70b')
def make_api_call(messages, max_tokens, is_final_answer=False): def make_api_call(messages, max_tokens, is_final_answer=False):
for attempt in range(3): for attempt in range(3):
try: try:
print( f'Try api call with {messages=}, {max_tokens=}, {is_final_answer=}', flush=True )
response = requests.post( response = requests.post(
f"{OLLAMA_URL}/api/chat", f"{OLLAMA_URL}/api/chat",
json={ json={
"model": OLLAMA_MODEL, "model": OLLAMA_MODEL,
"messages": messages, "messages": messages,
"stream": False, "stream": False,
"format": "json", # important, or most of the time ollama does not generate valid json response
"options": { "options": {
"num_predict": max_tokens, "num_predict": max_tokens,
"temperature": 0.2 "temperature": 0.2
} }
} }
) )
print( f'Got response:\n{response.json()}\n', flush=True)
response.raise_for_status() response.raise_for_status()
return json.loads(response.json()["message"]["content"]) return json.loads(response.json()["message"]["content"])
except Exception as e: except Exception as e:
@ -38,7 +41,7 @@ def make_api_call(messages, max_tokens, is_final_answer=False):
time.sleep(1) # Wait for 1 second before retrying time.sleep(1) # Wait for 1 second before retrying
def generate_response(prompt): def generate_response(prompt):
messages = [ messages = [ # add two sentences to encourage json format response
{"role": "system", "content": """You are an expert AI assistant that explains your reasoning step by step. For each step, provide a title that describes what you're doing in that step, along with the content. Decide if you need another step or if you're ready to give the final answer. Respond in JSON format with 'title', 'content', and 'next_action' (either 'continue' or 'final_answer') keys. USE AS MANY REASONING STEPS AS POSSIBLE. AT LEAST 3. BE AWARE OF YOUR LIMITATIONS AS AN LLM AND WHAT YOU CAN AND CANNOT DO. IN YOUR REASONING, INCLUDE EXPLORATION OF ALTERNATIVE ANSWERS. CONSIDER YOU MAY BE WRONG, AND IF YOU ARE WRONG IN YOUR REASONING, WHERE IT WOULD BE. FULLY TEST ALL OTHER POSSIBILITIES. YOU CAN BE WRONG. WHEN YOU SAY YOU ARE RE-EXAMINING, ACTUALLY RE-EXAMINE, AND USE ANOTHER APPROACH TO DO SO. DO NOT JUST SAY YOU ARE RE-EXAMINING. USE AT LEAST 3 METHODS TO DERIVE THE ANSWER. USE BEST PRACTICES. {"role": "system", "content": """You are an expert AI assistant that explains your reasoning step by step. For each step, provide a title that describes what you're doing in that step, along with the content. Decide if you need another step or if you're ready to give the final answer. Respond in JSON format with 'title', 'content', and 'next_action' (either 'continue' or 'final_answer') keys. USE AS MANY REASONING STEPS AS POSSIBLE. AT LEAST 3. BE AWARE OF YOUR LIMITATIONS AS AN LLM AND WHAT YOU CAN AND CANNOT DO. IN YOUR REASONING, INCLUDE EXPLORATION OF ALTERNATIVE ANSWERS. CONSIDER YOU MAY BE WRONG, AND IF YOU ARE WRONG IN YOUR REASONING, WHERE IT WOULD BE. FULLY TEST ALL OTHER POSSIBILITIES. YOU CAN BE WRONG. WHEN YOU SAY YOU ARE RE-EXAMINING, ACTUALLY RE-EXAMINE, AND USE ANOTHER APPROACH TO DO SO. DO NOT JUST SAY YOU ARE RE-EXAMINING. USE AT LEAST 3 METHODS TO DERIVE THE ANSWER. USE BEST PRACTICES.
Example of a valid JSON response: Example of a valid JSON response:
@ -47,7 +50,8 @@ Example of a valid JSON response:
"title": "Identifying Key Information", "title": "Identifying Key Information",
"content": "To begin solving this problem, we need to carefully examine the given information and identify the crucial elements that will guide our solution process. This involves...", "content": "To begin solving this problem, we need to carefully examine the given information and identify the crucial elements that will guide our solution process. This involves...",
"next_action": "continue" "next_action": "continue"
}``` }```.
You MUST response using the expected json schema, and your response must be valid json. This JSON response is essential for our job.
"""}, """},
{"role": "user", "content": prompt}, {"role": "user", "content": prompt},
{"role": "assistant", "content": "Thank you! I will now think step by step following my instructions, starting at the beginning after decomposing the problem."} {"role": "assistant", "content": "Thank you! I will now think step by step following my instructions, starting at the beginning after decomposing the problem."}
@ -68,6 +72,8 @@ Example of a valid JSON response:
messages.append({"role": "assistant", "content": json.dumps(step_data)}) messages.append({"role": "assistant", "content": json.dumps(step_data)})
print( f"Generate response update: {messages=}", flush=True )
if step_data['next_action'] == 'final_answer': if step_data['next_action'] == 'final_answer':
break break
@ -121,7 +127,9 @@ def main():
for i, (title, content, thinking_time) in enumerate(steps): for i, (title, content, thinking_time) in enumerate(steps):
if title.startswith("Final Answer"): if title.startswith("Final Answer"):
st.markdown(f"### {title}") st.markdown(f"### {title}")
st.markdown(content.replace('\n', '<br>'), unsafe_allow_html=True) # this will not work were there codes in the content
#st.markdown(content.replace('\n', '<br>'), unsafe_allow_html=True)
st.markdown(content, unsafe_allow_html=True)
else: else:
with st.expander(title, expanded=True): with st.expander(title, expanded=True):
st.markdown(content.replace('\n', '<br>'), unsafe_allow_html=True) st.markdown(content.replace('\n', '<br>'), unsafe_allow_html=True)