From e06acd65a64fa51113e003906e6963647156b4fc Mon Sep 17 00:00:00 2001 From: martin legrand Date: Sat, 26 Apr 2025 10:42:29 +0200 Subject: [PATCH] fix : http added to url unnecessary --- sources/llm_provider.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sources/llm_provider.py b/sources/llm_provider.py index 30a57a8..a04d620 100644 --- a/sources/llm_provider.py +++ b/sources/llm_provider.py @@ -108,8 +108,8 @@ class Provider: Use a remote server with LLM to generate text. """ thought = "" - route_setup = f"http://{self.server_ip}/setup" - route_gen = f"http://{self.server_ip}/generate" + route_setup = f"{self.server_ip}/setup" + route_gen = f"{self.server_ip}/generate" if not self.is_ip_online(self.server_ip): pretty_print(f"Server is offline at {self.server_ip}", color="failure") @@ -120,7 +120,7 @@ class Provider: is_complete = False while not is_complete: try: - response = requests.get(f"http://{self.server_ip}/get_updated_sentence") + response = requests.get(f"{self.server_ip}/get_updated_sentence") if "error" in response.json(): pretty_print(response.json()["error"], color="failure") break @@ -276,7 +276,7 @@ class Provider: lm studio use endpoint /v1/chat/completions not /chat/completions like openai """ thought = "" - route_start = f"http://{self.server_ip}/v1/chat/completions" + route_start = f"{self.server_ip}/v1/chat/completions" payload = { "messages": history, "temperature": 0.7,