mirror of
https://github.com/tcsenpai/agenticSeek.git
synced 2025-06-06 11:05:26 +00:00
Merge pull request #42 from Fosowl/dev
Feat : better error handling in llm_provider
This commit is contained in:
commit
e358fd4df7
3
.env.example
Normal file
3
.env.example
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
OPENAI_API_KEY='dont share this, not needed for local providers'
|
||||||
|
SERPAPI_KEY='dont share this, needed for internet search'
|
||||||
|
AVIATIONSTACK_API_KEY='not needed if you dont search for flight'
|
@ -34,7 +34,7 @@ class Browser:
|
|||||||
self.logger.info("Browser initialized successfully")
|
self.logger.info("Browser initialized successfully")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise Exception(f"Failed to initialize browser: {str(e)}")
|
raise Exception(f"Failed to initialize browser: {str(e)}")
|
||||||
|
|
||||||
def go_to(self, url):
|
def go_to(self, url):
|
||||||
"""Navigate to a specified URL."""
|
"""Navigate to a specified URL."""
|
||||||
try:
|
try:
|
||||||
|
@ -10,6 +10,7 @@ from dotenv import load_dotenv, set_key
|
|||||||
from openai import OpenAI
|
from openai import OpenAI
|
||||||
from huggingface_hub import InferenceClient
|
from huggingface_hub import InferenceClient
|
||||||
import os
|
import os
|
||||||
|
import httpx
|
||||||
|
|
||||||
class Provider:
|
class Provider:
|
||||||
def __init__(self, provider_name, model, server_address = "127.0.0.1:5000"):
|
def __init__(self, provider_name, model, server_address = "127.0.0.1:5000"):
|
||||||
@ -31,6 +32,9 @@ class Provider:
|
|||||||
self.get_api_key(self.provider_name)
|
self.get_api_key(self.provider_name)
|
||||||
elif self.server != "":
|
elif self.server != "":
|
||||||
print("Provider", provider_name, "initialized at", self.server)
|
print("Provider", provider_name, "initialized at", self.server)
|
||||||
|
self.check_address_format(self.server)
|
||||||
|
if not self.is_ip_online(self.server.split(':')[0]):
|
||||||
|
raise Exception(f"Server at {self.server} is offline.")
|
||||||
|
|
||||||
def get_api_key(self, provider):
|
def get_api_key(self, provider):
|
||||||
load_dotenv()
|
load_dotenv()
|
||||||
@ -53,7 +57,7 @@ class Provider:
|
|||||||
if not port.isdigit() or not (0 <= int(port) <= 65535):
|
if not port.isdigit() or not (0 <= int(port) <= 65535):
|
||||||
raise ValueError("Port must be a number between 0 and 65535.")
|
raise ValueError("Port must be a number between 0 and 65535.")
|
||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
raise Exception(f"Invalid address format: {e}")
|
raise Exception(f"Invalid address format: {e}. Is port specified?")
|
||||||
return address
|
return address
|
||||||
|
|
||||||
def respond(self, history, verbose = True):
|
def respond(self, history, verbose = True):
|
||||||
@ -61,7 +65,14 @@ class Provider:
|
|||||||
Use the choosen provider to generate text.
|
Use the choosen provider to generate text.
|
||||||
"""
|
"""
|
||||||
llm = self.available_providers[self.provider_name]
|
llm = self.available_providers[self.provider_name]
|
||||||
thought = llm(history, verbose)
|
try:
|
||||||
|
thought = llm(history, verbose)
|
||||||
|
except ConnectionError as e:
|
||||||
|
raise ConnectionError(f"{str(e)}\nConnection to {self.server} failed.")
|
||||||
|
except AttributeError as e:
|
||||||
|
raise NotImplementedError(f"{str(e)}\nIs {self.provider_name} implemented ?")
|
||||||
|
except Exception as e:
|
||||||
|
raise Exception(f"Provider {self.provider_name} failed: {str(e)}") from e
|
||||||
return thought
|
return thought
|
||||||
|
|
||||||
def is_ip_online(self, ip_address):
|
def is_ip_online(self, ip_address):
|
||||||
@ -71,20 +82,22 @@ class Provider:
|
|||||||
param = '-n' if platform.system().lower() == 'windows' else '-c'
|
param = '-n' if platform.system().lower() == 'windows' else '-c'
|
||||||
command = ['ping', param, '1', ip_address]
|
command = ['ping', param, '1', ip_address]
|
||||||
try:
|
try:
|
||||||
output = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=5)
|
output = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=15)
|
||||||
if output.returncode == 0:
|
if output.returncode == 0:
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
|
print("errorcode:", output)
|
||||||
return False
|
return False
|
||||||
except subprocess.TimeoutExpired:
|
except subprocess.TimeoutExpired:
|
||||||
|
print("timeout")
|
||||||
return True
|
return True
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"An error occurred: {e}")
|
print(f"is_ip_online error:\n{e}")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def server_fn(self, history, verbose = False):
|
def server_fn(self, history, verbose = False):
|
||||||
"""
|
"""
|
||||||
Use a remote server wit LLM to generate text.
|
Use a remote server with LLM to generate text.
|
||||||
"""
|
"""
|
||||||
thought = ""
|
thought = ""
|
||||||
route_start = f"http://{self.server}/generate"
|
route_start = f"http://{self.server}/generate"
|
||||||
@ -92,13 +105,18 @@ class Provider:
|
|||||||
if not self.is_ip_online(self.server.split(":")[0]):
|
if not self.is_ip_online(self.server.split(":")[0]):
|
||||||
raise Exception(f"Server is offline at {self.server}")
|
raise Exception(f"Server is offline at {self.server}")
|
||||||
|
|
||||||
requests.post(route_start, json={"messages": history})
|
try:
|
||||||
is_complete = False
|
requests.post(route_start, json={"messages": history})
|
||||||
while not is_complete:
|
is_complete = False
|
||||||
response = requests.get(f"http://{self.server}/get_updated_sentence")
|
while not is_complete:
|
||||||
thought = response.json()["sentence"]
|
response = requests.get(f"http://{self.server}/get_updated_sentence")
|
||||||
is_complete = bool(response.json()["is_complete"])
|
thought = response.json()["sentence"]
|
||||||
time.sleep(2)
|
is_complete = bool(response.json()["is_complete"])
|
||||||
|
time.sleep(2)
|
||||||
|
except KeyError as e:
|
||||||
|
raise f"{str(e)}\n\nError occured with server route. Are you using the correct address for the config.ini provider?"
|
||||||
|
except Exception as e:
|
||||||
|
raise e
|
||||||
return thought
|
return thought
|
||||||
|
|
||||||
def ollama_fn(self, history, verbose = False):
|
def ollama_fn(self, history, verbose = False):
|
||||||
@ -116,11 +134,14 @@ class Provider:
|
|||||||
if verbose:
|
if verbose:
|
||||||
print(chunk['message']['content'], end='', flush=True)
|
print(chunk['message']['content'], end='', flush=True)
|
||||||
thought += chunk['message']['content']
|
thought += chunk['message']['content']
|
||||||
|
except httpx.ConnectError as e:
|
||||||
|
raise Exception("\nOllama connection failed. provider should not be set to ollama if server address is not localhost") from e
|
||||||
except ollama.ResponseError as e:
|
except ollama.ResponseError as e:
|
||||||
if e.status_code == 404:
|
if e.status_code == 404:
|
||||||
|
print(f"Downloading {self.model}...")
|
||||||
ollama.pull(self.model)
|
ollama.pull(self.model)
|
||||||
if "refused" in str(e):
|
if "refused" in str(e).lower():
|
||||||
raise Exception("Ollama connection failed. is the server running ?")
|
raise Exception("Ollama connection failed. is the server running ?") from e
|
||||||
raise e
|
raise e
|
||||||
return thought
|
return thought
|
||||||
|
|
||||||
@ -155,7 +176,7 @@ class Provider:
|
|||||||
print(thought)
|
print(thought)
|
||||||
return thought
|
return thought
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise Exception(f"OpenAI API error: {e}")
|
raise Exception(f"OpenAI API error: {str(e)}") from e
|
||||||
|
|
||||||
def test_fn(self, history, verbose = True):
|
def test_fn(self, history, verbose = True):
|
||||||
"""
|
"""
|
||||||
|
@ -76,8 +76,9 @@ class Speech():
|
|||||||
return parts[-1] if parts else path
|
return parts[-1] if parts else path
|
||||||
|
|
||||||
def shorten_paragraph(self, sentence):
|
def shorten_paragraph(self, sentence):
|
||||||
|
#TODO find a better way, we would like to have the TTS not be annoying, speak only useful informations
|
||||||
"""
|
"""
|
||||||
Shorten paragraph like **explaination**: <long text> by keeping only the first sentence.
|
Find long paragraph like **explaination**: <long text> by keeping only the first sentence.
|
||||||
Args:
|
Args:
|
||||||
sentence (str): The sentence to shorten
|
sentence (str): The sentence to shorten
|
||||||
Returns:
|
Returns:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user