mirror of
https://github.com/tcsenpai/multi1.git
synced 2025-06-06 19:15:23 +00:00
removed multiple files and unified + modularized
This commit is contained in:
parent
35ae853d7c
commit
d838d55334
46
README.md
46
README.md
@ -7,6 +7,7 @@
|
|||||||
- [x] Using Llama-3.1 70b on Groq to create o1-like reasoning chains
|
- [x] Using Llama-3.1 70b on Groq to create o1-like reasoning chains
|
||||||
- [x] Using Ollama to create o1-like reasoning chains
|
- [x] Using Ollama to create o1-like reasoning chains
|
||||||
- [x] Using Perplexity to create o1-like reasoning chains
|
- [x] Using Perplexity to create o1-like reasoning chains
|
||||||
|
- [x] Using an unified interface to try out different providers
|
||||||
|
|
||||||
## Work in progress
|
## Work in progress
|
||||||
|
|
||||||
@ -82,55 +83,14 @@ To use the launcher, follow these instructions:
|
|||||||
|
|
||||||
3. Edit the .env file with your API keys / models preferences.
|
3. Edit the .env file with your API keys / models preferences.
|
||||||
|
|
||||||
4. Run the launcher:
|
4. Run the main interface
|
||||||
|
|
||||||
```
|
```
|
||||||
python launcher.py
|
streamlit run main.py
|
||||||
```
|
```
|
||||||
|
|
||||||
5. Use the arrow keys to navigate the menu, Enter to select an option, and 'q' to quit.
|
|
||||||
|
|
||||||
The launcher allows you to:
|
|
||||||
|
|
||||||
- Run the Ollama-based chat application (ol1.py)
|
|
||||||
- Run the Perplexity-based chat application (p1.py)
|
|
||||||
- Run the Groq-based chat application (g1.py)
|
|
||||||
- Edit the .env file
|
|
||||||
- Exit the launcher
|
|
||||||
|
|
||||||
When running a chat application, you can press 'q' at any time to stop the application and return to the launcher.
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
Alternatively, if you prefer to run the applications directly without the launcher:
|
|
||||||
|
|
||||||
```
|
|
||||||
streamlit run app.py
|
|
||||||
```
|
|
||||||
|
|
||||||
Where 'app.py' is the app you want to run and can be:
|
|
||||||
|
|
||||||
- g1.py (Groq)
|
|
||||||
- ol1.py (Ollama)
|
|
||||||
- p1.py (Perplexity)
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
If you prefer to use the Gradio UI, follow these additional instructions (only works with Groq at the moment):
|
|
||||||
|
|
||||||
~~~
|
|
||||||
cd gradio
|
|
||||||
~~~
|
|
||||||
|
|
||||||
~~~
|
|
||||||
pip3 install -r requirements.txt
|
|
||||||
~~~
|
|
||||||
|
|
||||||
~~~
|
|
||||||
python3 app.py
|
|
||||||
~~~
|
|
||||||
|
|
||||||
|
|
||||||
### Prompting Strategy
|
### Prompting Strategy
|
||||||
|
|
||||||
The prompt is as follows:
|
The prompt is as follows:
|
||||||
|
BIN
__pycache__/api_handlers.cpython-310.pyc
Normal file
BIN
__pycache__/api_handlers.cpython-310.pyc
Normal file
Binary file not shown.
BIN
__pycache__/utils.cpython-310.pyc
Normal file
BIN
__pycache__/utils.cpython-310.pyc
Normal file
Binary file not shown.
124
api_handlers.py
Normal file
124
api_handlers.py
Normal file
@ -0,0 +1,124 @@
|
|||||||
|
import json
|
||||||
|
import requests
|
||||||
|
import groq
|
||||||
|
import time
|
||||||
|
|
||||||
|
class OllamaHandler:
|
||||||
|
def __init__(self, url, model):
|
||||||
|
self.url = url
|
||||||
|
self.model = model
|
||||||
|
|
||||||
|
def make_api_call(self, messages, max_tokens, is_final_answer=False):
|
||||||
|
for attempt in range(3):
|
||||||
|
try:
|
||||||
|
response = requests.post(
|
||||||
|
f"{self.url}/api/chat",
|
||||||
|
json={
|
||||||
|
"model": self.model,
|
||||||
|
"messages": messages,
|
||||||
|
"stream": False,
|
||||||
|
"format": "json",
|
||||||
|
"options": {
|
||||||
|
"num_predict": max_tokens,
|
||||||
|
"temperature": 0.2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
response.raise_for_status()
|
||||||
|
return json.loads(response.json()["message"]["content"])
|
||||||
|
except Exception as e:
|
||||||
|
if attempt == 2:
|
||||||
|
return self._error_response(str(e), is_final_answer)
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
def _error_response(self, error_msg, is_final_answer):
|
||||||
|
if is_final_answer:
|
||||||
|
return {"title": "Error", "content": f"Failed to generate final answer after 3 attempts. Error: {error_msg}"}
|
||||||
|
else:
|
||||||
|
return {"title": "Error", "content": f"Failed to generate step after 3 attempts. Error: {error_msg}", "next_action": "final_answer"}
|
||||||
|
|
||||||
|
class PerplexityHandler:
|
||||||
|
def __init__(self, api_key, model):
|
||||||
|
self.api_key = api_key
|
||||||
|
self.model = model
|
||||||
|
|
||||||
|
def make_api_call(self, messages, max_tokens, is_final_answer=False):
|
||||||
|
|
||||||
|
# Quick dirty fix for API calls in perplexity that removes the assistant message
|
||||||
|
#messages[0]["content"] = messages[0]["content"] + " You will always respond ONLY with JSON with the following format: {'title': 'Title of the step', 'content': 'Content of the step', 'next_action': 'continue' or 'final_answer'}. You are not allowed to respond with anything else or any additional text. "
|
||||||
|
if not is_final_answer:
|
||||||
|
for i in range(len(messages)):
|
||||||
|
if messages[i]["role"] == "assistant":
|
||||||
|
messages.pop(i)
|
||||||
|
|
||||||
|
for attempt in range(3):
|
||||||
|
try:
|
||||||
|
url = "https://api.perplexity.ai/chat/completions"
|
||||||
|
payload = {"model": self.model, "messages": messages}
|
||||||
|
headers = {
|
||||||
|
"Authorization": f"Bearer {self.api_key}",
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
}
|
||||||
|
response = requests.post(url, json=payload, headers=headers)
|
||||||
|
|
||||||
|
# Add specific handling for 400 error
|
||||||
|
if response.status_code == 400:
|
||||||
|
error_content = response.json()
|
||||||
|
print(f"HTTP 400 Error: {error_content}")
|
||||||
|
return self._error_response(f"HTTP 400 Error: {error_content}", is_final_answer)
|
||||||
|
|
||||||
|
response.raise_for_status()
|
||||||
|
content = response.json()["choices"][0]["message"]["content"]
|
||||||
|
print("Content: ", content)
|
||||||
|
return json.loads(content)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
print("Warning: content is not a valid JSON, returning raw response")
|
||||||
|
# Better detection of final answer in the raw response for Perplexity
|
||||||
|
forced_final_answer = False
|
||||||
|
if '"next_action": "final_answer"' in content.lower().strip():
|
||||||
|
forced_final_answer = True
|
||||||
|
print("Forced final answer: ", forced_final_answer)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"title": "Raw Response",
|
||||||
|
"content": content,
|
||||||
|
"next_action": "final_answer" if (is_final_answer|forced_final_answer) else "continue"
|
||||||
|
}
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
print(f"Request failed: {e}")
|
||||||
|
if attempt == 2:
|
||||||
|
return self._error_response(str(e), is_final_answer)
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
def _error_response(self, error_msg, is_final_answer):
|
||||||
|
return {
|
||||||
|
"title": "Error",
|
||||||
|
"content": f"API request failed after 3 attempts. Error: {error_msg}",
|
||||||
|
"next_action": "final_answer",
|
||||||
|
}
|
||||||
|
|
||||||
|
class GroqHandler:
|
||||||
|
def __init__(self):
|
||||||
|
self.client = groq.Groq()
|
||||||
|
|
||||||
|
def make_api_call(self, messages, max_tokens, is_final_answer=False):
|
||||||
|
for attempt in range(3):
|
||||||
|
try:
|
||||||
|
response = self.client.chat.completions.create(
|
||||||
|
model="llama-3.1-70b-versatile",
|
||||||
|
messages=messages,
|
||||||
|
max_tokens=max_tokens,
|
||||||
|
temperature=0.2,
|
||||||
|
response_format={"type": "json_object"}
|
||||||
|
)
|
||||||
|
return json.loads(response.choices[0].message.content)
|
||||||
|
except Exception as e:
|
||||||
|
if attempt == 2:
|
||||||
|
return self._error_response(str(e), is_final_answer)
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
def _error_response(self, error_msg, is_final_answer):
|
||||||
|
if is_final_answer:
|
||||||
|
return {"title": "Error", "content": f"Failed to generate final answer after 3 attempts. Error: {error_msg}"}
|
||||||
|
else:
|
||||||
|
return {"title": "Error", "content": f"Failed to generate step after 3 attempts. Error: {error_msg}", "next_action": "final_answer"}
|
117
g1.py
117
g1.py
@ -1,117 +0,0 @@
|
|||||||
import streamlit as st
|
|
||||||
import groq
|
|
||||||
import os
|
|
||||||
import json
|
|
||||||
import time
|
|
||||||
|
|
||||||
client = groq.Groq()
|
|
||||||
|
|
||||||
def make_api_call(messages, max_tokens, is_final_answer=False):
|
|
||||||
for attempt in range(3):
|
|
||||||
try:
|
|
||||||
response = client.chat.completions.create(
|
|
||||||
model="llama-3.1-70b-versatile",
|
|
||||||
messages=messages,
|
|
||||||
max_tokens=max_tokens,
|
|
||||||
temperature=0.2,
|
|
||||||
response_format={"type": "json_object"}
|
|
||||||
)
|
|
||||||
return json.loads(response.choices[0].message.content)
|
|
||||||
except Exception as e:
|
|
||||||
if attempt == 2:
|
|
||||||
if is_final_answer:
|
|
||||||
return {"title": "Error", "content": f"Failed to generate final answer after 3 attempts. Error: {str(e)}"}
|
|
||||||
else:
|
|
||||||
return {"title": "Error", "content": f"Failed to generate step after 3 attempts. Error: {str(e)}", "next_action": "final_answer"}
|
|
||||||
time.sleep(1) # Wait for 1 second before retrying
|
|
||||||
|
|
||||||
def generate_response(prompt):
|
|
||||||
messages = [
|
|
||||||
{"role": "system", "content": """You are an expert AI assistant that explains your reasoning step by step. For each step, provide a title that describes what you're doing in that step, along with the content. Decide if you need another step or if you're ready to give the final answer. Respond in JSON format with 'title', 'content', and 'next_action' (either 'continue' or 'final_answer') keys. USE AS MANY REASONING STEPS AS POSSIBLE. AT LEAST 3. BE AWARE OF YOUR LIMITATIONS AS AN LLM AND WHAT YOU CAN AND CANNOT DO. IN YOUR REASONING, INCLUDE EXPLORATION OF ALTERNATIVE ANSWERS. CONSIDER YOU MAY BE WRONG, AND IF YOU ARE WRONG IN YOUR REASONING, WHERE IT WOULD BE. FULLY TEST ALL OTHER POSSIBILITIES. YOU CAN BE WRONG. WHEN YOU SAY YOU ARE RE-EXAMINING, ACTUALLY RE-EXAMINE, AND USE ANOTHER APPROACH TO DO SO. DO NOT JUST SAY YOU ARE RE-EXAMINING. USE AT LEAST 3 METHODS TO DERIVE THE ANSWER. USE BEST PRACTICES.
|
|
||||||
|
|
||||||
Example of a valid JSON response:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"title": "Identifying Key Information",
|
|
||||||
"content": "To begin solving this problem, we need to carefully examine the given information and identify the crucial elements that will guide our solution process. This involves...",
|
|
||||||
"next_action": "continue"
|
|
||||||
}```
|
|
||||||
"""},
|
|
||||||
{"role": "user", "content": prompt},
|
|
||||||
{"role": "assistant", "content": "Thank you! I will now think step by step following my instructions, starting at the beginning after decomposing the problem."}
|
|
||||||
]
|
|
||||||
|
|
||||||
steps = []
|
|
||||||
step_count = 1
|
|
||||||
total_thinking_time = 0
|
|
||||||
|
|
||||||
while True:
|
|
||||||
start_time = time.time()
|
|
||||||
step_data = make_api_call(messages, 300)
|
|
||||||
end_time = time.time()
|
|
||||||
thinking_time = end_time - start_time
|
|
||||||
total_thinking_time += thinking_time
|
|
||||||
|
|
||||||
steps.append((f"Step {step_count}: {step_data['title']}", step_data['content'], thinking_time))
|
|
||||||
|
|
||||||
messages.append({"role": "assistant", "content": json.dumps(step_data)})
|
|
||||||
|
|
||||||
if step_data['next_action'] == 'final_answer':
|
|
||||||
break
|
|
||||||
|
|
||||||
step_count += 1
|
|
||||||
|
|
||||||
# Yield after each step for Streamlit to update
|
|
||||||
yield steps, None # We're not yielding the total time until the end
|
|
||||||
|
|
||||||
# Generate final answer
|
|
||||||
messages.append({"role": "user", "content": "Please provide the final answer based on your reasoning above."})
|
|
||||||
|
|
||||||
start_time = time.time()
|
|
||||||
final_data = make_api_call(messages, 200, is_final_answer=True)
|
|
||||||
end_time = time.time()
|
|
||||||
thinking_time = end_time - start_time
|
|
||||||
total_thinking_time += thinking_time
|
|
||||||
|
|
||||||
steps.append(("Final Answer", final_data['content'], thinking_time))
|
|
||||||
|
|
||||||
yield steps, total_thinking_time
|
|
||||||
|
|
||||||
def main():
|
|
||||||
st.set_page_config(page_title="g1 prototype", page_icon="🧠", layout="wide")
|
|
||||||
|
|
||||||
st.title("g1: Using Llama-3.1 70b on Groq to create o1-like reasoning chains")
|
|
||||||
|
|
||||||
st.markdown("""
|
|
||||||
This is an early prototype of using prompting to create o1-like reasoning chains to improve output accuracy. It is not perfect and accuracy has yet to be formally evaluated. It is powered by Groq so that the reasoning step is fast!
|
|
||||||
|
|
||||||
Open source [repository here](https://github.com/bklieger-groq)
|
|
||||||
""")
|
|
||||||
|
|
||||||
# Text input for user query
|
|
||||||
user_query = st.text_input("Enter your query:", placeholder="e.g., How many 'R's are in the word strawberry?")
|
|
||||||
|
|
||||||
if user_query:
|
|
||||||
st.write("Generating response...")
|
|
||||||
|
|
||||||
# Create empty elements to hold the generated text and total time
|
|
||||||
response_container = st.empty()
|
|
||||||
time_container = st.empty()
|
|
||||||
|
|
||||||
# Generate and display the response
|
|
||||||
for steps, total_thinking_time in generate_response(user_query):
|
|
||||||
with response_container.container():
|
|
||||||
for i, (title, content, thinking_time) in enumerate(steps):
|
|
||||||
if title.startswith("Final Answer"):
|
|
||||||
st.markdown(f"### {title}")
|
|
||||||
st.markdown(content.replace('\n', '<br>'), unsafe_allow_html=True)
|
|
||||||
else:
|
|
||||||
with st.expander(title, expanded=True):
|
|
||||||
st.markdown(content.replace('\n', '<br>'), unsafe_allow_html=True)
|
|
||||||
|
|
||||||
# Only show total time when it's available at the end
|
|
||||||
if total_thinking_time is not None:
|
|
||||||
time_container.markdown(f"**Total thinking time: {total_thinking_time:.2f} seconds**")
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
156
gradio/app.py
156
gradio/app.py
@ -1,156 +0,0 @@
|
|||||||
import gradio as gr
|
|
||||||
import groq
|
|
||||||
import os
|
|
||||||
import json
|
|
||||||
import time
|
|
||||||
|
|
||||||
def make_api_call(client, messages, max_tokens, is_final_answer=False):
|
|
||||||
for attempt in range(3):
|
|
||||||
try:
|
|
||||||
response = client.chat.completions.create(
|
|
||||||
model="llama-3.1-70b-versatile",
|
|
||||||
messages=messages,
|
|
||||||
max_tokens=max_tokens,
|
|
||||||
temperature=0.2,
|
|
||||||
response_format={"type": "json_object"}
|
|
||||||
)
|
|
||||||
return json.loads(response.choices[0].message.content)
|
|
||||||
except Exception as e:
|
|
||||||
if attempt == 2:
|
|
||||||
if is_final_answer:
|
|
||||||
return {"title": "Error", "content": f"Failed to generate final answer after 3 attempts. Error: {str(e)}"}
|
|
||||||
else:
|
|
||||||
return {"title": "Error", "content": f"Failed to generate step after 3 attempts. Error: {str(e)}", "next_action": "final_answer"}
|
|
||||||
time.sleep(1) # Wait for 1 second before retrying
|
|
||||||
|
|
||||||
def generate_response(client, prompt):
|
|
||||||
messages = [
|
|
||||||
{"role": "system", "content": """You are an expert AI assistant that explains your reasoning step by step. For each step, provide a title that describes what you're doing in that step, along with the content. Decide if you need another step or if you're ready to give the final answer. Respond in JSON format with 'title', 'content', and 'next_action' (either 'continue' or 'final_answer') keys. USE AS MANY REASONING STEPS AS POSSIBLE. AT LEAST 3. BE AWARE OF YOUR LIMITATIONS AS AN LLM AND WHAT YOU CAN AND CANNOT DO. IN YOUR REASONING, INCLUDE EXPLORATION OF ALTERNATIVE ANSWERS. CONSIDER YOU MAY BE WRONG, AND IF YOU ARE WRONG IN YOUR REASONING, WHERE IT WOULD BE. FULLY TEST ALL OTHER POSSIBILITIES. YOU CAN BE WRONG. WHEN YOU SAY YOU ARE RE-EXAMINING, ACTUALLY RE-EXAMINE, AND USE ANOTHER APPROACH TO DO SO. DO NOT JUST SAY YOU ARE RE-EXAMINING. USE AT LEAST 3 METHODS TO DERIVE THE ANSWER. USE BEST PRACTICES.
|
|
||||||
|
|
||||||
Example of a valid JSON response:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"title": "Identifying Key Information",
|
|
||||||
"content": "To begin solving this problem, we need to carefully examine the given information and identify the crucial elements that will guide our solution process. This involves...",
|
|
||||||
"next_action": "continue"
|
|
||||||
}```
|
|
||||||
""" },
|
|
||||||
{"role": "user", "content": prompt},
|
|
||||||
{"role": "assistant", "content": "Thank you! I will now think step by step following my instructions, starting at the beginning after decomposing the problem."}
|
|
||||||
]
|
|
||||||
|
|
||||||
steps = []
|
|
||||||
step_count = 1
|
|
||||||
total_thinking_time = 0
|
|
||||||
|
|
||||||
while True:
|
|
||||||
start_time = time.time()
|
|
||||||
step_data = make_api_call(client, messages, 300)
|
|
||||||
end_time = time.time()
|
|
||||||
thinking_time = end_time - start_time
|
|
||||||
total_thinking_time += thinking_time
|
|
||||||
|
|
||||||
# Handle potential errors
|
|
||||||
if step_data.get('title') == "Error":
|
|
||||||
steps.append((f"Step {step_count}: {step_data.get('title')}", step_data.get('content'), thinking_time))
|
|
||||||
break
|
|
||||||
|
|
||||||
step_title = f"Step {step_count}: {step_data.get('title', 'No Title')}"
|
|
||||||
step_content = step_data.get('content', 'No Content')
|
|
||||||
steps.append((step_title, step_content, thinking_time))
|
|
||||||
|
|
||||||
messages.append({"role": "assistant", "content": json.dumps(step_data)})
|
|
||||||
|
|
||||||
if step_data.get('next_action') == 'final_answer':
|
|
||||||
break
|
|
||||||
|
|
||||||
step_count += 1
|
|
||||||
|
|
||||||
# Generate final answer
|
|
||||||
messages.append({"role": "user", "content": "Please provide the final answer based on your reasoning above."})
|
|
||||||
|
|
||||||
start_time = time.time()
|
|
||||||
final_data = make_api_call(client, messages, 200, is_final_answer=True)
|
|
||||||
end_time = time.time()
|
|
||||||
thinking_time = end_time - start_time
|
|
||||||
total_thinking_time += thinking_time
|
|
||||||
|
|
||||||
if final_data.get('title') == "Error":
|
|
||||||
steps.append(("Final Answer", final_data.get('content'), thinking_time))
|
|
||||||
else:
|
|
||||||
steps.append(("Final Answer", final_data.get('content', 'No Content'), thinking_time))
|
|
||||||
|
|
||||||
return steps, total_thinking_time
|
|
||||||
|
|
||||||
def format_steps(steps, total_time):
|
|
||||||
html_content = ""
|
|
||||||
for title, content, thinking_time in steps:
|
|
||||||
if title == "Final Answer":
|
|
||||||
html_content += "<h3>{}</h3>".format(title)
|
|
||||||
html_content += "<p>{}</p>".format(content.replace('\n', '<br>'))
|
|
||||||
else:
|
|
||||||
html_content += """
|
|
||||||
<details>
|
|
||||||
<summary><strong>{}</strong></summary>
|
|
||||||
<p>{}</p>
|
|
||||||
<p><em>Thinking time for this step: {:.2f} seconds</em></p>
|
|
||||||
</details>
|
|
||||||
<br>
|
|
||||||
""".format(title, content.replace('\n', '<br>'), thinking_time)
|
|
||||||
html_content += "<strong>Total thinking time: {:.2f} seconds</strong>".format(total_time)
|
|
||||||
return html_content
|
|
||||||
|
|
||||||
def main(api_key, user_query):
|
|
||||||
if not api_key:
|
|
||||||
return "Please enter your Groq API key to proceed.", ""
|
|
||||||
|
|
||||||
if not user_query:
|
|
||||||
return "Please enter a query to get started.", ""
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Initialize the Groq client with the provided API key
|
|
||||||
client = groq.Groq(api_key=api_key)
|
|
||||||
except Exception as e:
|
|
||||||
return f"Failed to initialize Groq client. Error: {str(e)}", ""
|
|
||||||
|
|
||||||
try:
|
|
||||||
steps, total_time = generate_response(client, user_query)
|
|
||||||
formatted_steps = format_steps(steps, total_time)
|
|
||||||
except Exception as e:
|
|
||||||
return f"An error occurred during processing. Error: {str(e)}", ""
|
|
||||||
|
|
||||||
return formatted_steps, ""
|
|
||||||
|
|
||||||
# Define the Gradio interface
|
|
||||||
with gr.Blocks() as demo:
|
|
||||||
gr.Markdown("# 🧠 g1: Using Llama-3.1 70b on Groq to Create O1-like Reasoning Chains")
|
|
||||||
|
|
||||||
gr.Markdown("""
|
|
||||||
This is an early prototype of using prompting to create O1-like reasoning chains to improve output accuracy. It is not perfect and accuracy has yet to be formally evaluated. It is powered by Groq so that the reasoning step is fast!
|
|
||||||
|
|
||||||
Open source [repository here](https://github.com/bklieger-groq)
|
|
||||||
""")
|
|
||||||
|
|
||||||
with gr.Row():
|
|
||||||
with gr.Column():
|
|
||||||
api_input = gr.Textbox(
|
|
||||||
label="Enter your Groq API Key:",
|
|
||||||
placeholder="Your Groq API Key",
|
|
||||||
type="password"
|
|
||||||
)
|
|
||||||
user_input = gr.Textbox(
|
|
||||||
label="Enter your query:",
|
|
||||||
placeholder="e.g., How many 'R's are in the word strawberry?",
|
|
||||||
lines=2
|
|
||||||
)
|
|
||||||
submit_btn = gr.Button("Generate Response")
|
|
||||||
|
|
||||||
with gr.Row():
|
|
||||||
with gr.Column():
|
|
||||||
output_html = gr.HTML()
|
|
||||||
|
|
||||||
submit_btn.click(fn=main, inputs=[api_input, user_input], outputs=output_html)
|
|
||||||
|
|
||||||
# Launch the Gradio app
|
|
||||||
if __name__ == "__main__":
|
|
||||||
demo.launch()
|
|
@ -1,2 +0,0 @@
|
|||||||
groq
|
|
||||||
gradio
|
|
123
launcher.py
123
launcher.py
@ -1,123 +0,0 @@
|
|||||||
import blessed
|
|
||||||
import subprocess
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import time
|
|
||||||
from contextlib import contextmanager
|
|
||||||
|
|
||||||
term = blessed.Terminal()
|
|
||||||
|
|
||||||
MENU_ITEMS = [
|
|
||||||
("Ollama", "ol1.py", "Launch Ollama-based chat application"),
|
|
||||||
("Perplexity", "p1.py", "Launch Perplexity-based chat application"),
|
|
||||||
("Groq", "g1.py", "Launch Groq-based chat application"),
|
|
||||||
("Edit .env", "edit_env", "Edit environment variables"),
|
|
||||||
("Exit", None, "Exit the launcher")
|
|
||||||
]
|
|
||||||
|
|
||||||
@contextmanager
|
|
||||||
def fullscreen():
|
|
||||||
with term.fullscreen(), term.cbreak(), term.hidden_cursor():
|
|
||||||
yield
|
|
||||||
|
|
||||||
def draw_3d_box(y, x, height, width, color):
|
|
||||||
shadow_color = term.color_rgb(50, 50, 50)
|
|
||||||
|
|
||||||
# Draw shadow
|
|
||||||
print(term.move(y+1, x+2) + shadow_color + '█' * (width-1) + term.normal)
|
|
||||||
for i in range(height-1):
|
|
||||||
print(term.move(y+2+i, x+width) + shadow_color + '█' + term.normal)
|
|
||||||
|
|
||||||
# Draw main box
|
|
||||||
print(term.move(y, x) + color + '╔' + '═' * (width - 2) + '╗' + term.normal)
|
|
||||||
for i in range(height - 2):
|
|
||||||
print(term.move(y + i + 1, x) + color + '║' + ' ' * (width - 2) + '║' + term.normal)
|
|
||||||
print(term.move(y + height - 1, x) + color + '╚' + '═' * (width - 2) + '╝' + term.normal)
|
|
||||||
|
|
||||||
def draw_menu(current_option):
|
|
||||||
menu_width = 50
|
|
||||||
menu_height = len(MENU_ITEMS) * 3 + 5
|
|
||||||
start_y = (term.height - menu_height) // 2
|
|
||||||
start_x = (term.width - menu_width) // 2
|
|
||||||
|
|
||||||
main_color = term.cornflower_blue
|
|
||||||
draw_3d_box(start_y, start_x, menu_height, menu_width, main_color)
|
|
||||||
|
|
||||||
title = '🚀 Launcher Menu 🚀'
|
|
||||||
print(term.move(start_y + 1, start_x + (menu_width - len(title)) // 2) + term.bold + term.yellow(title))
|
|
||||||
|
|
||||||
for i, (option, _, _) in enumerate(MENU_ITEMS):
|
|
||||||
y = start_y + i * 3 + 4
|
|
||||||
if i == current_option:
|
|
||||||
item_color = term.black_on_yellow
|
|
||||||
draw_3d_box(y-1, start_x+3, 3, menu_width-6, item_color)
|
|
||||||
print(term.move(y, start_x + 5) + item_color + term.bold(f" {option:<{menu_width - 10}} ") + term.normal)
|
|
||||||
else:
|
|
||||||
item_color = term.white_on_blue
|
|
||||||
draw_3d_box(y-1, start_x+3, 3, menu_width-6, item_color)
|
|
||||||
print(term.move(y, start_x + 5) + item_color + term.bold(f" {option:<{menu_width - 10}} ") + term.normal)
|
|
||||||
|
|
||||||
description = MENU_ITEMS[current_option][2]
|
|
||||||
print(term.move(start_y + menu_height, start_x) + term.center(term.italic(description), menu_width))
|
|
||||||
|
|
||||||
def run_script(script):
|
|
||||||
with fullscreen():
|
|
||||||
print(term.clear + term.move_y(term.height // 2) + term.bold_green(term.center(f"Running {script}...")))
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
process = subprocess.Popen(["streamlit", "run", script], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
|
|
||||||
|
|
||||||
with term.cbreak():
|
|
||||||
print(term.clear)
|
|
||||||
try:
|
|
||||||
while True:
|
|
||||||
output = process.stdout.readline()
|
|
||||||
if output == '' and process.poll() is not None:
|
|
||||||
break
|
|
||||||
if output:
|
|
||||||
print(output.strip())
|
|
||||||
if term.inkey(timeout=0.1) == 'q':
|
|
||||||
process.terminate()
|
|
||||||
print(term.bold_red("\nScript terminated. Press any key to return to the launcher..."))
|
|
||||||
term.inkey()
|
|
||||||
return
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
process.terminate()
|
|
||||||
print(term.bold_red("\nScript terminated. Press any key to return to the launcher..."))
|
|
||||||
term.inkey()
|
|
||||||
return
|
|
||||||
|
|
||||||
print(term.bold_green("\nScript finished. Press any key to return to the launcher..."))
|
|
||||||
term.inkey()
|
|
||||||
|
|
||||||
def edit_env():
|
|
||||||
os.system('clear')
|
|
||||||
os.system("nano .env")
|
|
||||||
|
|
||||||
def main_menu():
|
|
||||||
current_option = 0
|
|
||||||
|
|
||||||
while True:
|
|
||||||
with fullscreen():
|
|
||||||
print(term.clear)
|
|
||||||
draw_menu(current_option)
|
|
||||||
|
|
||||||
key = term.inkey()
|
|
||||||
|
|
||||||
if key.name == 'KEY_UP' and current_option > 0:
|
|
||||||
current_option -= 1
|
|
||||||
elif key.name == 'KEY_DOWN' and current_option < len(MENU_ITEMS) - 1:
|
|
||||||
current_option += 1
|
|
||||||
elif key.name == 'KEY_ENTER':
|
|
||||||
selected_option = MENU_ITEMS[current_option][1]
|
|
||||||
if selected_option is None:
|
|
||||||
return
|
|
||||||
elif selected_option == "edit_env":
|
|
||||||
edit_env()
|
|
||||||
else:
|
|
||||||
run_script(selected_option)
|
|
||||||
elif key == 'q':
|
|
||||||
return
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main_menu()
|
|
71
main.py
Normal file
71
main.py
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
import streamlit as st
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
from api_handlers import OllamaHandler, PerplexityHandler, GroqHandler
|
||||||
|
from utils import generate_response, load_env_vars
|
||||||
|
|
||||||
|
# Load environment variables and configuration
|
||||||
|
load_dotenv()
|
||||||
|
config = load_env_vars()
|
||||||
|
|
||||||
|
def setup_page():
|
||||||
|
st.set_page_config(page_title="multi1 - Unified AI Reasoning Chains", page_icon="🧠", layout="wide")
|
||||||
|
st.markdown("""
|
||||||
|
<h1 style='text-align: center; font-family: -apple-system, BlinkMacSystemFont, sans-serif;'>
|
||||||
|
🧠 multi1 - Unified AI Reasoning Chains
|
||||||
|
</h1>
|
||||||
|
""", unsafe_allow_html=True)
|
||||||
|
st.markdown("""
|
||||||
|
<p style='text-align: center; font-family: -apple-system, BlinkMacSystemFont, sans-serif; font-size: 1.1em;'>
|
||||||
|
This app demonstrates AI reasoning chains using different backends: Ollama, Perplexity AI, and Groq.
|
||||||
|
Choose a backend and enter your query to see the step-by-step reasoning process.
|
||||||
|
</p>
|
||||||
|
""", unsafe_allow_html=True)
|
||||||
|
|
||||||
|
def get_api_handler(backend):
|
||||||
|
if backend == "Ollama":
|
||||||
|
return OllamaHandler(config['OLLAMA_URL'], config['OLLAMA_MODEL'])
|
||||||
|
elif backend == "Perplexity AI":
|
||||||
|
return PerplexityHandler(config['PERPLEXITY_API_KEY'], config['PERPLEXITY_MODEL'])
|
||||||
|
else: # Groq
|
||||||
|
return GroqHandler()
|
||||||
|
|
||||||
|
def display_config(backend):
|
||||||
|
st.sidebar.markdown("## 🛠️ Current Configuration")
|
||||||
|
if backend == "Ollama":
|
||||||
|
st.sidebar.markdown(f"- 🖥️ Ollama URL: `{config['OLLAMA_URL']}`")
|
||||||
|
st.sidebar.markdown(f"- 🤖 Ollama Model: `{config['OLLAMA_MODEL']}`")
|
||||||
|
elif backend == "Perplexity AI":
|
||||||
|
st.sidebar.markdown(f"- 🧠 Perplexity AI Model: `{config['PERPLEXITY_MODEL']}`")
|
||||||
|
else: # Groq
|
||||||
|
st.sidebar.markdown("- ⚡ Using Groq API")
|
||||||
|
|
||||||
|
def main():
|
||||||
|
setup_page()
|
||||||
|
|
||||||
|
st.sidebar.markdown("<h3 style='font-family: -apple-system, BlinkMacSystemFont, sans-serif;'>⚙️ Settings</h3>", unsafe_allow_html=True)
|
||||||
|
backend = st.sidebar.selectbox("Choose AI Backend", ["Ollama", "Perplexity AI", "Groq"])
|
||||||
|
display_config(backend)
|
||||||
|
api_handler = get_api_handler(backend)
|
||||||
|
|
||||||
|
user_query = st.text_input("💬 Enter your query:", placeholder="e.g., How many 'R's are in the word strawberry?")
|
||||||
|
|
||||||
|
if user_query:
|
||||||
|
st.write("🔍 Generating response...")
|
||||||
|
response_container = st.empty()
|
||||||
|
time_container = st.empty()
|
||||||
|
|
||||||
|
for steps, total_thinking_time in generate_response(user_query, api_handler):
|
||||||
|
with response_container.container():
|
||||||
|
for title, content, _ in steps:
|
||||||
|
if title.startswith("Final Answer"):
|
||||||
|
st.markdown(f"<h3 style='font-family: -apple-system, BlinkMacSystemFont, sans-serif;'>🎯 {title}</h3>", unsafe_allow_html=True)
|
||||||
|
st.markdown(f"<div style='font-family: -apple-system, BlinkMacSystemFont, sans-serif;'>{content}</div>", unsafe_allow_html=True)
|
||||||
|
else:
|
||||||
|
with st.expander(f"📝 {title}", expanded=True):
|
||||||
|
st.markdown(f"<div style='font-family: -apple-system, BlinkMacSystemFont, sans-serif;'>{content}</div>", unsafe_allow_html=True)
|
||||||
|
|
||||||
|
if total_thinking_time is not None:
|
||||||
|
time_container.markdown(f"<p style='font-family: -apple-system, BlinkMacSystemFont, sans-serif;'><strong>⏱️ Total thinking time: {total_thinking_time:.2f} seconds</strong></p>", unsafe_allow_html=True)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
138
ol1.py
138
ol1.py
@ -1,138 +0,0 @@
|
|||||||
import streamlit as st
|
|
||||||
import json
|
|
||||||
import time
|
|
||||||
import requests # Add this import for making HTTP requests to Ollama
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
import os
|
|
||||||
|
|
||||||
# Load environment variables
|
|
||||||
load_dotenv()
|
|
||||||
|
|
||||||
# Get configuration from .env file
|
|
||||||
OLLAMA_URL = os.getenv('OLLAMA_URL', 'http://localhost:11434')
|
|
||||||
OLLAMA_MODEL = os.getenv('OLLAMA_MODEL', 'llama3.1:70b')
|
|
||||||
|
|
||||||
def make_api_call(messages, max_tokens, is_final_answer=False):
|
|
||||||
for attempt in range(3):
|
|
||||||
try:
|
|
||||||
response = requests.post(
|
|
||||||
f"{OLLAMA_URL}/api/chat",
|
|
||||||
json={
|
|
||||||
"model": OLLAMA_MODEL,
|
|
||||||
"messages": messages,
|
|
||||||
"stream": False,
|
|
||||||
"format": "json", # important, or most of the time ollama does not generate valid json response
|
|
||||||
"options": {
|
|
||||||
"num_predict": max_tokens,
|
|
||||||
"temperature": 0.2
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
response.raise_for_status()
|
|
||||||
return json.loads(response.json()["message"]["content"])
|
|
||||||
except Exception as e:
|
|
||||||
if attempt == 2:
|
|
||||||
if is_final_answer:
|
|
||||||
return {"title": "Error", "content": f"Failed to generate final answer after 3 attempts. Error: {str(e)}"}
|
|
||||||
else:
|
|
||||||
return {"title": "Error", "content": f"Failed to generate step after 3 attempts. Error: {str(e)}", "next_action": "final_answer"}
|
|
||||||
time.sleep(1) # Wait for 1 second before retrying
|
|
||||||
|
|
||||||
def generate_response(prompt):
|
|
||||||
messages = [ # add two sentences to encourage json format response
|
|
||||||
{"role": "system", "content": """You are an expert AI assistant that explains your reasoning step by step. For each step, provide a title that describes what you're doing in that step, along with the content. Decide if you need another step or if you're ready to give the final answer. Respond in JSON format with 'title', 'content', and 'next_action' (either 'continue' or 'final_answer') keys. USE AS MANY REASONING STEPS AS POSSIBLE. AT LEAST 3. BE AWARE OF YOUR LIMITATIONS AS AN LLM AND WHAT YOU CAN AND CANNOT DO. IN YOUR REASONING, INCLUDE EXPLORATION OF ALTERNATIVE ANSWERS. CONSIDER YOU MAY BE WRONG, AND IF YOU ARE WRONG IN YOUR REASONING, WHERE IT WOULD BE. FULLY TEST ALL OTHER POSSIBILITIES. YOU CAN BE WRONG. WHEN YOU SAY YOU ARE RE-EXAMINING, ACTUALLY RE-EXAMINE, AND USE ANOTHER APPROACH TO DO SO. DO NOT JUST SAY YOU ARE RE-EXAMINING. USE AT LEAST 3 METHODS TO DERIVE THE ANSWER. USE BEST PRACTICES.
|
|
||||||
|
|
||||||
Example of a valid JSON response:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"title": "Identifying Key Information",
|
|
||||||
"content": "To begin solving this problem, we need to carefully examine the given information and identify the crucial elements that will guide our solution process. This involves...",
|
|
||||||
"next_action": "continue"
|
|
||||||
}```.
|
|
||||||
You MUST response using the expected json schema, and your response must be valid json. This JSON response is essential for our job.
|
|
||||||
"""},
|
|
||||||
{"role": "user", "content": prompt},
|
|
||||||
{"role": "assistant", "content": "Thank you! I will now think step by step following my instructions, starting at the beginning after decomposing the problem."}
|
|
||||||
]
|
|
||||||
|
|
||||||
steps = []
|
|
||||||
step_count = 1
|
|
||||||
total_thinking_time = 0
|
|
||||||
|
|
||||||
while True:
|
|
||||||
start_time = time.time()
|
|
||||||
step_data = make_api_call(messages, 300)
|
|
||||||
end_time = time.time()
|
|
||||||
thinking_time = end_time - start_time
|
|
||||||
total_thinking_time += thinking_time
|
|
||||||
|
|
||||||
steps.append((f"Step {step_count}: {step_data['title']}", step_data['content'], thinking_time))
|
|
||||||
|
|
||||||
messages.append({"role": "assistant", "content": json.dumps(step_data)})
|
|
||||||
|
|
||||||
if step_data['next_action'] == 'final_answer':
|
|
||||||
break
|
|
||||||
|
|
||||||
step_count += 1
|
|
||||||
|
|
||||||
# Yield after each step for Streamlit to update
|
|
||||||
yield steps, None # We're not yielding the total time until the end
|
|
||||||
|
|
||||||
# Generate final answer
|
|
||||||
messages.append({"role": "user", "content": "Please provide the final answer based on your reasoning above."})
|
|
||||||
|
|
||||||
start_time = time.time()
|
|
||||||
final_data = make_api_call(messages, 200, is_final_answer=True)
|
|
||||||
end_time = time.time()
|
|
||||||
thinking_time = end_time - start_time
|
|
||||||
total_thinking_time += thinking_time
|
|
||||||
|
|
||||||
steps.append(("Final Answer", final_data['content'], thinking_time))
|
|
||||||
|
|
||||||
yield steps, total_thinking_time
|
|
||||||
|
|
||||||
def main():
|
|
||||||
st.set_page_config(page_title="ol1 prototype - Ollama version", page_icon="🧠", layout="wide")
|
|
||||||
|
|
||||||
st.title("ol1: Using Ollama to create o1-like reasoning chains")
|
|
||||||
|
|
||||||
st.markdown("""
|
|
||||||
This is an early prototype of using prompting to create o1-like reasoning chains to improve output accuracy. It is not perfect and accuracy has yet to be formally evaluated. It is powered by Ollama so that the reasoning step is local!
|
|
||||||
|
|
||||||
Forked from [bklieger-groq](https://github.com/bklieger-groq)
|
|
||||||
Open source [repository here](https://github.com/tcsenpai/ol1-p1)
|
|
||||||
""")
|
|
||||||
|
|
||||||
st.markdown(f"**Current Configuration:**")
|
|
||||||
st.markdown(f"- Ollama URL: `{OLLAMA_URL}`")
|
|
||||||
st.markdown(f"- Ollama Model: `{OLLAMA_MODEL}`")
|
|
||||||
|
|
||||||
# Text input for user query
|
|
||||||
user_query = st.text_input("Enter your query:", placeholder="e.g., How many 'R's are in the word strawberry?")
|
|
||||||
|
|
||||||
if user_query:
|
|
||||||
st.write("Generating response...")
|
|
||||||
|
|
||||||
# Create empty elements to hold the generated text and total time
|
|
||||||
response_container = st.empty()
|
|
||||||
time_container = st.empty()
|
|
||||||
|
|
||||||
# Generate and display the response
|
|
||||||
for steps, total_thinking_time in generate_response(user_query):
|
|
||||||
with response_container.container():
|
|
||||||
for i, (title, content, thinking_time) in enumerate(steps):
|
|
||||||
if title.startswith("Final Answer"):
|
|
||||||
st.markdown(f"### {title}")
|
|
||||||
# this will not work were there codes in the content
|
|
||||||
#st.markdown(content.replace('\n', '<br>'), unsafe_allow_html=True)
|
|
||||||
st.markdown(content, unsafe_allow_html=True)
|
|
||||||
else:
|
|
||||||
with st.expander(title, expanded=True):
|
|
||||||
st.markdown(content.replace('\n', '<br>'), unsafe_allow_html=True)
|
|
||||||
|
|
||||||
# Only show total time when it's available at the end
|
|
||||||
if total_thinking_time is not None:
|
|
||||||
time_container.markdown(f"**Total thinking time: {total_thinking_time:.2f} seconds**")
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
212
p1.py
212
p1.py
@ -1,212 +0,0 @@
|
|||||||
import streamlit as st
|
|
||||||
import json
|
|
||||||
import time
|
|
||||||
import requests # Add this import for making HTTP requests to Ollama
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
import os
|
|
||||||
|
|
||||||
# Load environment variables
|
|
||||||
load_dotenv()
|
|
||||||
|
|
||||||
# Get configuration from .env file
|
|
||||||
PERPLEXITY_API_KEY = os.getenv("PERPLEXITY_API_KEY")
|
|
||||||
PERPLEXITY_MODEL = os.getenv("PERPLEXITY_MODEL", "llama-3.1-sonar-small-128k-online")
|
|
||||||
|
|
||||||
if not PERPLEXITY_API_KEY:
|
|
||||||
raise ValueError("PERPLEXITY_API_KEY is not set in the .env file")
|
|
||||||
|
|
||||||
|
|
||||||
def make_api_call(messages, max_tokens, is_final_answer=False):
|
|
||||||
for attempt in range(3):
|
|
||||||
try:
|
|
||||||
url = "https://api.perplexity.ai/chat/completions"
|
|
||||||
|
|
||||||
payload = {"model": PERPLEXITY_MODEL, "messages": messages}
|
|
||||||
headers = {
|
|
||||||
"Authorization": f"Bearer {PERPLEXITY_API_KEY}",
|
|
||||||
"Content-Type": "application/json",
|
|
||||||
}
|
|
||||||
|
|
||||||
print(f"payload: {payload}")
|
|
||||||
|
|
||||||
response = requests.request("POST", url, json=payload, headers=headers)
|
|
||||||
|
|
||||||
print(f"Response status code: {response.status_code}")
|
|
||||||
print(f"Response content: {response.text}")
|
|
||||||
|
|
||||||
response.raise_for_status()
|
|
||||||
response_json = response.json()
|
|
||||||
content = response_json["choices"][0]["message"]["content"]
|
|
||||||
|
|
||||||
# Try to parse the content as JSON
|
|
||||||
try:
|
|
||||||
return json.loads(content)
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
# If parsing fails, return the content as is
|
|
||||||
return {
|
|
||||||
"title": "Raw Response",
|
|
||||||
"content": content,
|
|
||||||
"next_action": "final_answer" if is_final_answer else "continue"
|
|
||||||
}
|
|
||||||
|
|
||||||
except requests.exceptions.HTTPError as e:
|
|
||||||
if response.status_code == 400:
|
|
||||||
error_message = f"400 Bad Request: {response.text}"
|
|
||||||
print(error_message)
|
|
||||||
if attempt == 2:
|
|
||||||
return {
|
|
||||||
"title": "Error",
|
|
||||||
"content": error_message,
|
|
||||||
"next_action": "final_answer",
|
|
||||||
}
|
|
||||||
else:
|
|
||||||
# Handle other HTTP errors
|
|
||||||
if attempt == 2:
|
|
||||||
error_message = f"HTTP error occurred: {str(e)}"
|
|
||||||
return {
|
|
||||||
"title": "Error",
|
|
||||||
"content": error_message,
|
|
||||||
"next_action": "final_answer",
|
|
||||||
}
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
if attempt == 2:
|
|
||||||
return {
|
|
||||||
"title": "Error",
|
|
||||||
"content": f"Failed to parse API response: {response.text}",
|
|
||||||
"next_action": "final_answer",
|
|
||||||
}
|
|
||||||
except requests.exceptions.RequestException as e:
|
|
||||||
if attempt == 2:
|
|
||||||
error_message = f"API request failed after 3 attempts. Error: {str(e)}"
|
|
||||||
return {
|
|
||||||
"title": "Error",
|
|
||||||
"content": error_message,
|
|
||||||
"next_action": "final_answer",
|
|
||||||
}
|
|
||||||
time.sleep(1) # Wait for 1 second before retrying
|
|
||||||
|
|
||||||
|
|
||||||
def generate_response(prompt):
|
|
||||||
|
|
||||||
messages = [
|
|
||||||
{
|
|
||||||
"role": "system",
|
|
||||||
"content": """You are an expert AI assistant that explains your reasoning step by step. For each step, provide a title that describes what you're doing in that step, along with the content. Decide if you need another step or if you're ready to give the final answer. Respond in JSON format with 'title', 'content', and 'next_action' (either 'continue' or 'final_answer') keys. USE AS MANY REASONING STEPS AS POSSIBLE. AT LEAST 3. BE AWARE OF YOUR LIMITATIONS AS AN LLM AND WHAT YOU CAN AND CANNOT DO. IN YOUR REASONING, INCLUDE EXPLORATION OF ALTERNATIVE ANSWERS. CONSIDER YOU MAY BE WRONG, AND IF YOU ARE WRONG IN YOUR REASONING, WHERE IT WOULD BE. FULLY TEST ALL OTHER POSSIBILITIES. YOU CAN BE WRONG. WHEN YOU SAY YOU ARE RE-EXAMINING, ACTUALLY RE-EXAMINE, AND USE ANOTHER APPROACH TO DO SO. DO NOT JUST SAY YOU ARE RE-EXAMINING. USE AT LEAST 3 METHODS TO DERIVE THE ANSWER. USE BEST PRACTICES.
|
|
||||||
|
|
||||||
Example of a valid JSON response:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"title": "Identifying Key Information",
|
|
||||||
"content": "To begin solving this problem, we need to carefully examine the given information and identify the crucial elements that will guide our solution process. This involves...",
|
|
||||||
"next_action": "continue"
|
|
||||||
}```
|
|
||||||
""",
|
|
||||||
},
|
|
||||||
{"role": "user", "content": prompt},
|
|
||||||
]
|
|
||||||
|
|
||||||
steps = []
|
|
||||||
step_count = 1
|
|
||||||
total_thinking_time = 0
|
|
||||||
|
|
||||||
while True:
|
|
||||||
start_time = time.time()
|
|
||||||
step_data = make_api_call(messages, 300)
|
|
||||||
end_time = time.time()
|
|
||||||
thinking_time = end_time - start_time
|
|
||||||
total_thinking_time += thinking_time
|
|
||||||
|
|
||||||
steps.append(
|
|
||||||
(
|
|
||||||
f"Step {step_count}: {step_data['title']}",
|
|
||||||
step_data["content"],
|
|
||||||
thinking_time,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
messages.append({"role": "assistant", "content": json.dumps(step_data)})
|
|
||||||
|
|
||||||
if step_data["next_action"] == "final_answer":
|
|
||||||
break
|
|
||||||
|
|
||||||
step_count += 1
|
|
||||||
|
|
||||||
# Add a user message to maintain alternation
|
|
||||||
messages.append({"role": "user", "content": "Continue with the next step."})
|
|
||||||
|
|
||||||
# Yield after each step for Streamlit to update
|
|
||||||
yield steps, None # We're not yielding the total time until the end
|
|
||||||
|
|
||||||
# Generate final answer
|
|
||||||
messages.append(
|
|
||||||
{
|
|
||||||
"role": "user",
|
|
||||||
"content": "Please provide the final answer based on your reasoning above.",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
start_time = time.time()
|
|
||||||
final_data = make_api_call(messages, 200, is_final_answer=True)
|
|
||||||
end_time = time.time()
|
|
||||||
thinking_time = end_time - start_time
|
|
||||||
total_thinking_time += thinking_time
|
|
||||||
|
|
||||||
steps.append(("Final Answer", final_data["content"], thinking_time))
|
|
||||||
|
|
||||||
yield steps, total_thinking_time
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
st.set_page_config(page_title="p1 prototype - Perplexity version", page_icon="🧠", layout="wide")
|
|
||||||
|
|
||||||
st.title("ol1: Using Perplexity AI to create o1-like reasoning chains")
|
|
||||||
|
|
||||||
st.markdown(
|
|
||||||
"""
|
|
||||||
This is an early prototype of using prompting to create o1-like reasoning chains to improve output accuracy. It is not perfect and accuracy has yet to be formally evaluated. It is powered by Perplexity AI API!
|
|
||||||
|
|
||||||
Forked from [bklieger-groq](https://github.com/bklieger-groq)
|
|
||||||
Open source [repository here](https://github.com/tcsenpai/ol1-p1)
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
|
|
||||||
st.markdown(f"**Current Configuration:**")
|
|
||||||
st.markdown(f"- Perplexity AI Model: `{PERPLEXITY_MODEL}`")
|
|
||||||
|
|
||||||
# Text input for user query
|
|
||||||
user_query = st.text_input(
|
|
||||||
"Enter your query:",
|
|
||||||
placeholder="e.g., How many 'R's are in the word strawberry?",
|
|
||||||
)
|
|
||||||
|
|
||||||
if user_query:
|
|
||||||
st.write("Generating response...")
|
|
||||||
|
|
||||||
# Create empty elements to hold the generated text and total time
|
|
||||||
response_container = st.empty()
|
|
||||||
time_container = st.empty()
|
|
||||||
|
|
||||||
# Generate and display the response
|
|
||||||
for steps, total_thinking_time in generate_response(user_query):
|
|
||||||
with response_container.container():
|
|
||||||
for i, (title, content, thinking_time) in enumerate(steps):
|
|
||||||
if title.startswith("Final Answer"):
|
|
||||||
st.markdown(f"### {title}")
|
|
||||||
st.markdown(
|
|
||||||
content.replace("\n", "<br>"), unsafe_allow_html=True
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
with st.expander(title, expanded=True):
|
|
||||||
st.markdown(
|
|
||||||
content.replace("\n", "<br>"), unsafe_allow_html=True
|
|
||||||
)
|
|
||||||
|
|
||||||
# Only show total time when it's available at the end
|
|
||||||
if total_thinking_time is not None:
|
|
||||||
time_container.markdown(
|
|
||||||
f"**Total thinking time: {total_thinking_time:.2f} seconds**"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
73
utils.py
Normal file
73
utils.py
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
import json
|
||||||
|
import time
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
def generate_response(prompt, api_handler):
|
||||||
|
messages = [
|
||||||
|
{
|
||||||
|
"role": "system",
|
||||||
|
"content": """You are an expert AI assistant that explains your reasoning step by step. For each step, provide a title that describes what you're doing in that step, along with the content. Decide if you need another step or if you're ready to give the final answer. Respond in JSON format with 'title', 'content', and 'next_action' (either 'continue' or 'final_answer') keys. USE AS MANY REASONING STEPS AS POSSIBLE. AT LEAST 3. BE AWARE OF YOUR LIMITATIONS AS AN LLM AND WHAT YOU CAN AND CANNOT DO. IN YOUR REASONING, INCLUDE EXPLORATION OF ALTERNATIVE ANSWERS. CONSIDER YOU MAY BE WRONG, AND IF YOU ARE WRONG IN YOUR REASONING, WHERE IT WOULD BE. FULLY TEST ALL OTHER POSSIBILITIES. YOU CAN BE WRONG. WHEN YOU SAY YOU ARE RE-EXAMINING, ACTUALLY RE-EXAMINE, AND USE ANOTHER APPROACH TO DO SO. DO NOT JUST SAY YOU ARE RE-EXAMINING. USE AT LEAST 3 METHODS TO DERIVE THE ANSWER. USE BEST PRACTICES.""",
|
||||||
|
},
|
||||||
|
{"role": "user", "content": prompt},
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "Thank you! I will now think step by step following my instructions, starting at the beginning after decomposing the problem.",
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
steps = []
|
||||||
|
step_count = 1
|
||||||
|
total_thinking_time = 0
|
||||||
|
|
||||||
|
while True:
|
||||||
|
start_time = time.time()
|
||||||
|
step_data = api_handler.make_api_call(messages, 300)
|
||||||
|
end_time = time.time()
|
||||||
|
thinking_time = end_time - start_time
|
||||||
|
total_thinking_time += thinking_time
|
||||||
|
|
||||||
|
steps.append(
|
||||||
|
(
|
||||||
|
f"Step {step_count}: {step_data['title']}",
|
||||||
|
step_data["content"],
|
||||||
|
thinking_time,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
messages.append({"role": "assistant", "content": json.dumps(step_data)})
|
||||||
|
print("Next reasoning step: ", step_data["next_action"])
|
||||||
|
if step_data["next_action"].lower().strip() == "final_answer":
|
||||||
|
break
|
||||||
|
|
||||||
|
step_count += 1
|
||||||
|
|
||||||
|
yield steps, None
|
||||||
|
|
||||||
|
messages.append(
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "Please provide the final answer based on your reasoning above.",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
start_time = time.time()
|
||||||
|
final_data = api_handler.make_api_call(messages, 200, is_final_answer=True)
|
||||||
|
end_time = time.time()
|
||||||
|
thinking_time = end_time - start_time
|
||||||
|
total_thinking_time += thinking_time
|
||||||
|
|
||||||
|
steps.append(("Final Answer", final_data["content"], thinking_time))
|
||||||
|
|
||||||
|
yield steps, total_thinking_time
|
||||||
|
|
||||||
|
|
||||||
|
def load_env_vars():
|
||||||
|
return {
|
||||||
|
"OLLAMA_URL": os.getenv("OLLAMA_URL", "http://localhost:11434"),
|
||||||
|
"OLLAMA_MODEL": os.getenv("OLLAMA_MODEL", "llama3.1:70b"),
|
||||||
|
"PERPLEXITY_API_KEY": os.getenv("PERPLEXITY_API_KEY"),
|
||||||
|
"PERPLEXITY_MODEL": os.getenv(
|
||||||
|
"PERPLEXITY_MODEL", "llama-3.1-sonar-small-128k-online"
|
||||||
|
),
|
||||||
|
}
|
Loading…
x
Reference in New Issue
Block a user