mirror of
https://github.com/tcsenpai/agenticSeek.git
synced 2025-06-05 02:25:27 +00:00
fix : web front bugs
This commit is contained in:
parent
83c595144b
commit
c030b55521
13
api.py
13
api.py
@ -5,6 +5,7 @@ import uvicorn
|
||||
import aiofiles
|
||||
import configparser
|
||||
import asyncio
|
||||
import time
|
||||
from typing import List
|
||||
from fastapi import FastAPI
|
||||
from fastapi.responses import JSONResponse
|
||||
@ -136,7 +137,9 @@ async def get_latest_answer():
|
||||
"answer": interaction.current_agent.last_answer,
|
||||
"agent_name": interaction.current_agent.agent_name if interaction.current_agent else "None",
|
||||
"success": "false",
|
||||
"blocks": {f'{i}': block.jsonify() for i, block in enumerate(interaction.current_agent.get_blocks_result())} if interaction.current_agent else {}
|
||||
"blocks": {f'{i}': block.jsonify() for i, block in enumerate(interaction.current_agent.get_blocks_result())} if interaction.current_agent else {},
|
||||
"status": interaction.current_agent.status_message if interaction.current_agent else "No status available",
|
||||
"timestamp": str(time.time())
|
||||
}
|
||||
query_resp_history.append(query_resp)
|
||||
return JSONResponse(status_code=200, content=query_resp)
|
||||
@ -171,7 +174,9 @@ async def process_query(request: QueryRequest):
|
||||
answer="Waiting for agent...",
|
||||
agent_name="Waiting for agent...",
|
||||
success="false",
|
||||
blocks={}
|
||||
blocks={},
|
||||
status="Waiting for agent...",
|
||||
timestamp=str(time.time())
|
||||
)
|
||||
if is_generating:
|
||||
logger.warning("Another query is being processed, please wait.")
|
||||
@ -208,7 +213,9 @@ async def process_query(request: QueryRequest):
|
||||
"answer": query_resp.answer,
|
||||
"agent_name": query_resp.agent_name,
|
||||
"success": query_resp.success,
|
||||
"blocks": query_resp.blocks
|
||||
"blocks": query_resp.blocks,
|
||||
"status": query_resp.status,
|
||||
"timestamp": query_resp.timestamp
|
||||
}
|
||||
query_resp_history.append(query_resp_dict)
|
||||
|
||||
|
@ -45,6 +45,7 @@ class Agent():
|
||||
self.tools = {}
|
||||
self.blocks_result = []
|
||||
self.last_answer = ""
|
||||
self.status_message = "Haven't started yet"
|
||||
self.verbose = verbose
|
||||
self.executor = ThreadPoolExecutor(max_workers=1)
|
||||
|
||||
@ -118,6 +119,7 @@ class Agent():
|
||||
"""
|
||||
Asynchronously ask the LLM to process the prompt.
|
||||
"""
|
||||
self.status_message = "Thinking..."
|
||||
loop = asyncio.get_event_loop()
|
||||
return await loop.run_in_executor(self.executor, self.sync_llm_request)
|
||||
|
||||
|
@ -308,6 +308,7 @@ class BrowserAgent(Agent):
|
||||
pretty_print(f"Web agent requested exit.\n{reasoning}\n\n{ai_prompt}", color="failure")
|
||||
return ai_prompt, ""
|
||||
animate_thinking(f"Searching...", color="status")
|
||||
self.status_message = "Searching..."
|
||||
search_result_raw = self.tools["web_search"].execute([ai_prompt], False)
|
||||
search_result = self.jsonify_search_results(search_result_raw)[:16]
|
||||
self.show_search_results(search_result)
|
||||
@ -322,6 +323,7 @@ class BrowserAgent(Agent):
|
||||
|
||||
extracted_form = self.extract_form(answer)
|
||||
if len(extracted_form) > 0:
|
||||
self.status_message = "Filling web form..."
|
||||
pretty_print(f"Filling inputs form...", color="status")
|
||||
fill_success = self.browser.fill_form(extracted_form)
|
||||
page_text = self.browser.get_text()
|
||||
@ -339,12 +341,14 @@ class BrowserAgent(Agent):
|
||||
link = self.select_link(links)
|
||||
|
||||
if Action.REQUEST_EXIT.value in answer:
|
||||
self.status_message = "Exiting web browser..."
|
||||
pretty_print(f"Agent requested exit.", color="status")
|
||||
complete = True
|
||||
break
|
||||
|
||||
if (link == None and not len(extracted_form)) or Action.GO_BACK.value in answer or link in self.search_history:
|
||||
pretty_print(f"Going back to results. Still {len(unvisited)}", color="status")
|
||||
self.status_message = "Going back to search results..."
|
||||
unvisited = self.select_unvisited(search_result)
|
||||
prompt = self.make_newsearch_prompt(user_prompt, unvisited)
|
||||
continue
|
||||
@ -357,13 +361,16 @@ class BrowserAgent(Agent):
|
||||
page_text = self.browser.get_text()
|
||||
self.navigable_links = self.browser.get_navigable()
|
||||
prompt = self.make_navigation_prompt(user_prompt, page_text)
|
||||
self.status_message = "Navigating..."
|
||||
self.browser.screenshot()
|
||||
|
||||
pretty_print("Exited navigation, starting to summarize finding...", color="status")
|
||||
prompt = self.conclude_prompt(user_prompt)
|
||||
mem_last_idx = self.memory.push('user', prompt)
|
||||
self.status_message = "Summarizing findings..."
|
||||
answer, reasoning = await self.llm_request()
|
||||
pretty_print(answer, color="output")
|
||||
self.status_message = "Done"
|
||||
return answer, reasoning
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -23,6 +23,7 @@ class CasualAgent(Agent):
|
||||
animate_thinking("Thinking...", color="status")
|
||||
answer, reasoning = await self.llm_request()
|
||||
self.last_answer = answer
|
||||
self.status_message = "Done"
|
||||
return answer, reasoning
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -57,6 +57,7 @@ class CoderAgent(Agent):
|
||||
await asyncio.sleep(0)
|
||||
break
|
||||
animate_thinking("Executing code...", color="status")
|
||||
self.status_message = "Executing code..."
|
||||
exec_success, _ = self.execute_modules(answer)
|
||||
answer = self.remove_blocks(answer)
|
||||
self.last_answer = answer
|
||||
@ -65,8 +66,10 @@ class CoderAgent(Agent):
|
||||
break
|
||||
pretty_print("Execution failure", color="failure")
|
||||
pretty_print("Correcting code...", color="status")
|
||||
self.status_message = "Correcting code..."
|
||||
self.show_answer()
|
||||
attempt += 1
|
||||
self.status_message = "Done"
|
||||
if attempt == max_attempts:
|
||||
return "I'm sorry, I couldn't find a solution to your problem. How would you like me to proceed ?", reasoning
|
||||
return answer, reasoning
|
||||
|
@ -30,6 +30,7 @@ class FileAgent(Agent):
|
||||
exec_success, _ = self.execute_modules(answer)
|
||||
answer = self.remove_blocks(answer)
|
||||
self.last_answer = answer
|
||||
self.status_message = "Done"
|
||||
return answer, reasoning
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -102,7 +102,7 @@ class PlannerAgent(Agent):
|
||||
ok = True
|
||||
return answer
|
||||
|
||||
async def start_agent_process(self, task: str, required_infos: dict | None) -> str:
|
||||
async def start_agent_process(self, task: dict, required_infos: dict | None) -> str:
|
||||
agent_prompt = self.make_prompt(task['task'], required_infos)
|
||||
pretty_print(f"Agent {task['agent']} started working...", color="status")
|
||||
agent_answer, _ = await self.agents[task['agent'].lower()].process(agent_prompt, None)
|
||||
@ -123,6 +123,7 @@ class PlannerAgent(Agent):
|
||||
if agents_tasks == (None, None):
|
||||
return "Failed to parse the tasks.", ""
|
||||
for task_name, task in agents_tasks:
|
||||
self.status_message = "Starting agent process..."
|
||||
pretty_print(f"I will {task_name}.", color="info")
|
||||
pretty_print(f"Assigned agent {task['agent']} to {task_name}", color="info")
|
||||
if speech_module: speech_module.speak(f"I will {task_name}. I assigned the {task['agent']} agent to the task.")
|
||||
|
@ -22,6 +22,8 @@ class QueryResponse(BaseModel):
|
||||
agent_name: str
|
||||
success: str
|
||||
blocks: dict
|
||||
status: str
|
||||
timestamp: str
|
||||
|
||||
def __str__(self):
|
||||
return f"Done: {self.done}, Answer: {self.answer}, Agent Name: {self.agent_name}, Success: {self.success}, Blocks: {self.blocks}"
|
||||
|
Loading…
x
Reference in New Issue
Block a user