fixed a path error + modularized

This commit is contained in:
tcsenpai 2024-09-17 21:13:29 +02:00
parent 311a292c57
commit 7ed4e99d7f
6 changed files with 150 additions and 127 deletions

View File

@ -1,6 +1,4 @@
import json
import requests
import groq
import time
from abc import ABC, abstractmethod
@ -38,125 +36,7 @@ class BaseHandler(ABC):
"next_action": "final_answer" if is_final_answer else "continue"
}
# Handler for Ollama API
class OllamaHandler(BaseHandler):
def __init__(self, url, model):
super().__init__()
self.url = url
self.model = model
def _make_request(self, messages, max_tokens):
# Make a request to the Ollama API
response = requests.post(
f"{self.url}/api/chat",
json={
"model": self.model,
"messages": messages,
"stream": False,
"format": "json",
"options": {
"num_predict": max_tokens,
"temperature": 0.2
}
}
)
response.raise_for_status()
print(response.json())
return response.json()["message"]["content"]
def _process_response(self, response, is_final_answer):
# Process the Ollama API response
if isinstance(response, dict) and 'message' in response:
content = response['message']['content']
else:
content = response
try:
parsed_content = json.loads(content)
if 'final_answer' in parsed_content:
return {
"title": "Final Answer",
"content": parsed_content['final_answer'],
"next_action": "final_answer"
}
return parsed_content
except json.JSONDecodeError:
return {
"title": "Raw Response",
"content": content,
"next_action": "final_answer" if is_final_answer else "continue"
}
# Handler for Perplexity API
class PerplexityHandler(BaseHandler):
def __init__(self, api_key, model):
super().__init__()
self.api_key = api_key
self.model = model
def _clean_messages(self, messages):
# Clean and consolidate messages for the Perplexity API
cleaned_messages = []
last_role = None
for message in messages:
if message["role"] == "system":
cleaned_messages.append(message)
elif message["role"] != last_role:
cleaned_messages.append(message)
last_role = message["role"]
elif message["role"] == "user":
cleaned_messages[-1]["content"] += "\n" + message["content"]
# Remove the last assistant message if present
if cleaned_messages and cleaned_messages[-1]["role"] == "assistant":
cleaned_messages.pop()
return cleaned_messages
def _make_request(self, messages, max_tokens):
# Make a request to the Perplexity API
cleaned_messages = self._clean_messages(messages)
url = "https://api.perplexity.ai/chat/completions"
payload = {"model": self.model, "messages": cleaned_messages}
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
}
try:
response = requests.post(url, json=payload, headers=headers)
response.raise_for_status()
return response.json()["choices"][0]["message"]["content"]
except requests.exceptions.HTTPError as http_err:
if response.status_code == 400:
error_message = response.json().get("error", {}).get("message", "Unknown error")
raise ValueError(f"Bad request (400): {error_message}")
raise # Re-raise the exception if it's not a 400 error
def _process_response(self, response, is_final_answer):
# Process the Perplexity API response
try:
return super()._process_response(response, is_final_answer)
except json.JSONDecodeError:
print("Warning: content is not a valid JSON, returning raw response")
forced_final_answer = '"next_action": "final_answer"' in response.lower().strip()
return {
"title": "Raw Response",
"content": response,
"next_action": "final_answer" if (is_final_answer or forced_final_answer) else "continue"
}
# Handler for Groq API
class GroqHandler(BaseHandler):
def __init__(self):
super().__init__()
self.client = groq.Groq()
def _make_request(self, messages, max_tokens):
# Make a request to the Groq API
response = self.client.chat.completions.create(
model="llama-3.1-70b-versatile",
messages=messages,
max_tokens=max_tokens,
temperature=0.2,
response_format={"type": "json_object"}
)
return response.choices[0].message.content
# Import derived handlers
from handlers.ollama_handler import OllamaHandler
from handlers.perplexity_handler import PerplexityHandler
from handlers.groq_handler import GroqHandler

5
app/handlers/__init__.py Normal file
View File

@ -0,0 +1,5 @@
from .ollama_handler import OllamaHandler
from .perplexity_handler import PerplexityHandler
from .groq_handler import GroqHandler
__all__ = ['OllamaHandler', 'PerplexityHandler', 'GroqHandler']

View File

@ -0,0 +1,19 @@
import groq
from api_handlers import BaseHandler
class GroqHandler(BaseHandler):
def __init__(self, api_key, model):
super().__init__()
self.client = groq.Groq(api_key=api_key)
self.model = model
def _make_request(self, messages, max_tokens):
# Make a request to the Groq API
response = self.client.chat.completions.create(
model=self.model,
messages=messages,
max_tokens=max_tokens,
temperature=0.2,
response_format={"type": "json_object"}
)
return response.choices[0].message.content

View File

@ -0,0 +1,51 @@
import json
import requests
from api_handlers import BaseHandler
class OllamaHandler(BaseHandler):
def __init__(self, url, model):
super().__init__()
self.url = url
self.model = model
def _make_request(self, messages, max_tokens):
# Make a request to the Ollama API
response = requests.post(
f"{self.url}/api/chat",
json={
"model": self.model,
"messages": messages,
"stream": False,
"format": "json",
"options": {
"num_predict": max_tokens,
"temperature": 0.2
}
}
)
response.raise_for_status()
print(response.json())
return response.json()["message"]["content"]
def _process_response(self, response, is_final_answer):
# Process the Ollama API response
if isinstance(response, dict) and 'message' in response:
content = response['message']['content']
else:
content = response
try:
parsed_content = json.loads(content)
if 'final_answer' in parsed_content:
return {
"title": "Final Answer",
"content": parsed_content['final_answer'],
"next_action": "final_answer"
}
return parsed_content
except json.JSONDecodeError:
return {
"title": "Raw Response",
"content": content,
"next_action": "final_answer" if is_final_answer else "continue"
}

View File

@ -0,0 +1,59 @@
import json
import requests
from api_handlers import BaseHandler
class PerplexityHandler(BaseHandler):
def __init__(self, api_key, model):
super().__init__()
self.api_key = api_key
self.model = model
def _clean_messages(self, messages):
# Clean and consolidate messages for the Perplexity API
cleaned_messages = []
last_role = None
for message in messages:
if message["role"] == "system":
cleaned_messages.append(message)
elif message["role"] != last_role:
cleaned_messages.append(message)
last_role = message["role"]
elif message["role"] == "user":
cleaned_messages[-1]["content"] += "\n" + message["content"]
# Remove the last assistant message if present
if cleaned_messages and cleaned_messages[-1]["role"] == "assistant":
cleaned_messages.pop()
return cleaned_messages
def _make_request(self, messages, max_tokens):
# Make a request to the Perplexity API
cleaned_messages = self._clean_messages(messages)
url = "https://api.perplexity.ai/chat/completions"
payload = {"model": self.model, "messages": cleaned_messages}
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
}
try:
response = requests.post(url, json=payload, headers=headers)
response.raise_for_status()
return response.json()["choices"][0]["message"]["content"]
except requests.exceptions.HTTPError as http_err:
if response.status_code == 400:
error_message = response.json().get("error", {}).get("message", "Unknown error")
raise ValueError(f"Bad request (400): {error_message}")
raise # Re-raise the exception if it's not a 400 error
def _process_response(self, response, is_final_answer):
# Process the Perplexity API response
try:
return super()._process_response(response, is_final_answer)
except json.JSONDecodeError:
print("Warning: content is not a valid JSON, returning raw response")
forced_final_answer = '"next_action": "final_answer"' in response.lower().strip()
return {
"title": "Raw Response",
"content": response,
"next_action": "final_answer" if (is_final_answer or forced_final_answer) else "continue"
}

View File

@ -2,10 +2,19 @@ import json
import time
import os
def generate_response(prompt, api_handler):
def generate_response(prompt, api_handler):# Get the absolute path to the system_prompt.txt file
current_dir = os.path.dirname(os.path.abspath(__file__))
system_prompt_path = os.path.join(current_dir, 'system_prompt.txt')
# Load the system prompt from an external file
with open('system_prompt.txt', 'r') as file:
try:
with open(system_prompt_path, 'r') as file:
SYSTEM_PROMPT = file.read()
except FileNotFoundError:
print(f"Error: system_prompt.txt not found at {system_prompt_path}")
os._exit(-1)
# Initialize the conversation with system prompt, user input, and an initial assistant response
messages = [