Merge pull request #120 from Fosowl/dev

readme update + avoid asking for clarification multiple times
This commit is contained in:
Martin 2025-04-14 14:31:32 +02:00 committed by GitHub
commit aad1b426f0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 14 additions and 8 deletions

View File

@ -321,7 +321,7 @@ Example config:
[MAIN]
is_local = True
provider_name = ollama
provider_model = deepseek-r1:1.5b
provider_model = deepseek-r1:32b
provider_server_address = 127.0.0.1:11434
agent_name = Friday
recover_last_session = False
@ -339,7 +339,7 @@ stealth_mode = False
- is_local -> Runs the agent locally (True) or on a remote server (False).
- provider_name -> The provider to use (one of: `ollama`, `server`, `lm-studio`, `deepseek-api`)
- provider_model -> The model used, e.g., deepseek-r1:1.5b.
- provider_model -> The model used, e.g., deepseek-r1:32b.
- provider_server_address -> Server address, e.g., 127.0.0.1:11434 for local. Set to anything for non-local API.
- agent_name -> Name of the agent, e.g., Friday. Used as a trigger word for TTS.
- recover_last_session -> Restarts from last session (True) or not (False).
@ -410,9 +410,12 @@ If this section is incomplete please raise an issue.
**Q: What hardware do I need?**
7B Model: GPU with 8GB VRAM.
14B Model: 12GB GPU (e.g., RTX 3060).
32B Model: 24GB+ VRAM.
| Model Size | GPU | Comment |
|-----------|--------|-----------------------------------------------------------|
| 7B | 8GB Vram | ⚠️ Not recommended. Performance is poor, frequent hallucinations, and planner agents will likely fail. |
| 14B | 12 GB VRAM (e.g. RTX 3060) | ✅ Usable for simple tasks. May struggle with web browsing and planning tasks. |
| 32B | 24+ GB VRAM (e.g. RTX 4090) | 🚀 Success with most tasks, might still struggle with task planning |
| 70B+ | 48+ GB Vram (eg. rtx 4090) | 💪 Excellent. Recommended for advanced use cases. |
**Q: Why Deepseek R1 over other models?**

View File

@ -341,7 +341,7 @@ class BrowserAgent(Agent):
complete = True
break
if link == None or Action.GO_BACK.value in answer or link in self.search_history:
if (link == None and not len(extracted_form)) or Action.GO_BACK.value in answer or link in self.search_history:
pretty_print(f"Going back to results. Still {len(unvisited)}", color="status")
unvisited = self.select_unvisited(search_result)
prompt = self.make_newsearch_prompt(user_prompt, unvisited)

View File

@ -332,6 +332,6 @@ class Provider:
return thought
if __name__ == "__main__":
provider = Provider("server", "deepseek-r1:32b", " 172.81.127.6:8080")
provider = Provider("server", "deepseek-r1:32b", " x.x.x.x:8080")
res = provider.respond(["user", "Hello, how are you?"])
print("Response:", res)

View File

@ -29,6 +29,7 @@ class AgentRouter:
self.complexity_classifier = self.load_llm_router()
self.learn_few_shots_tasks()
self.learn_few_shots_complexity()
self.asked_clarify = False
def load_pipelines(self) -> Dict[str, Type[pipeline]]:
"""
@ -439,9 +440,11 @@ class AgentRouter:
text = self.lang_analysis.translate(text, lang)
labels = [agent.role for agent in self.agents]
complexity = self.estimate_complexity(text)
if complexity == None:
if complexity == None and self.asked_clarify == False:
self.asked_clarify = True
pretty_print(f"Humm, the task seem complex but you gave very little information. can you clarify?", color="info")
return None
self.asked_clarify = False
if complexity == "HIGH":
pretty_print(f"Complex task detected, routing to planner agent.", color="info")
return self.find_planner_agent()