add deep research to webui
This commit is contained in:
@@ -5,6 +5,7 @@ from browser_use.agent.prompts import SystemPrompt, AgentMessagePrompt
|
||||
from browser_use.agent.views import ActionResult, ActionModel
|
||||
from browser_use.browser.views import BrowserState
|
||||
from langchain_core.messages import HumanMessage, SystemMessage
|
||||
from datetime import datetime
|
||||
|
||||
from .custom_views import CustomAgentStepInfo
|
||||
|
||||
@@ -116,15 +117,11 @@ class CustomSystemPrompt(SystemPrompt):
|
||||
Returns:
|
||||
str: Formatted system prompt
|
||||
"""
|
||||
time_str = self.current_date.strftime("%Y-%m-%d %H:%M")
|
||||
|
||||
AGENT_PROMPT = f"""You are a precise browser automation agent that interacts with websites through structured commands. Your role is to:
|
||||
1. Analyze the provided webpage elements and structure
|
||||
2. Plan a sequence of actions to accomplish the given task
|
||||
3. Your final result MUST be a valid JSON as the **RESPONSE FORMAT** described, containing your action sequence and state assessment, No need extra content to expalin.
|
||||
|
||||
Current date and time: {time_str}
|
||||
|
||||
{self.input_format()}
|
||||
|
||||
{self.important_rules()}
|
||||
@@ -160,6 +157,9 @@ class CustomAgentMessagePrompt(AgentMessagePrompt):
|
||||
else:
|
||||
step_info_description = ''
|
||||
|
||||
time_str = datetime.now().strftime("%Y-%m-%d %H:%M")
|
||||
step_info_description += "Current date and time: {time_str}"
|
||||
|
||||
elements_text = self.state.element_tree.clickable_elements_to_string(include_attributes=self.include_attributes)
|
||||
|
||||
has_content_above = (self.state.pixels_above or 0) > 0
|
||||
|
||||
@@ -7,6 +7,7 @@ load_dotenv()
|
||||
import asyncio
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
from pprint import pprint
|
||||
from uuid import uuid4
|
||||
from src.utils import utils
|
||||
@@ -17,171 +18,240 @@ from browser_use.browser.browser import BrowserConfig, Browser
|
||||
from langchain.schema import SystemMessage, HumanMessage
|
||||
from json_repair import repair_json
|
||||
from src.agent.custom_prompts import CustomSystemPrompt, CustomAgentMessagePrompt
|
||||
from src.controller.custom_controller import CustomController
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
async def deep_research(task, llm, **kwargs):
|
||||
|
||||
task_id = str(uuid4())
|
||||
save_dir = kwargs.get("save_dir", os.path.join(f"./tmp/deep_research/{task_id}"))
|
||||
logger.info(f"Save Deep Research at: {save_dir}")
|
||||
os.makedirs(save_dir, exist_ok=True)
|
||||
|
||||
# 搜索的信息
|
||||
search_infos = ""
|
||||
# 搜索的LLM历史信息
|
||||
max_query_num = 3
|
||||
# max qyery num per iteration
|
||||
max_query_num = kwargs.get("max_query_num", 3)
|
||||
search_system_prompt = f"""
|
||||
You are an expert task planner for an AI agent that uses a web browser with **automated execution capabilities**. Your goal is to analyze user instructions and, based on available information,
|
||||
determine what further search queries are necessary to fulfill the user's request. You will output a JSON object with the following structure:
|
||||
You are a **Deep Researcher**, an AI agent specializing in in-depth information gathering and research using a web browser with **automated execution capabilities**. Your expertise lies in formulating comprehensive research plans and executing them meticulously to fulfill complex user requests. You will analyze user instructions, devise a detailed research plan, and determine the necessary search queries to gather the required information.
|
||||
|
||||
[
|
||||
**Your Task:**
|
||||
|
||||
Given a user's research topic, you will:
|
||||
|
||||
1. **Develop a Research Plan:** Outline the key aspects and subtopics that need to be investigated to thoroughly address the user's request. This plan should be a high-level overview of the research direction.
|
||||
2. **Generate Search Queries:** Based on your research plan, generate a list of specific search queries to be executed in a web browser. These queries should be designed to efficiently gather relevant information for each aspect of your plan.
|
||||
|
||||
**Output Format:**
|
||||
|
||||
Your output will be a JSON object with the following structure:
|
||||
|
||||
```json
|
||||
{{
|
||||
"plan": "A concise, high-level research plan outlining the key areas to investigate.",
|
||||
"queries": [
|
||||
"search query 1",
|
||||
"search query 2",
|
||||
//... up to a maximum of {max_query_num} search queries
|
||||
]
|
||||
```
|
||||
|
||||
Here's an example of the type of `search` tasks we are expecting:
|
||||
[
|
||||
"weather in Tokyo",
|
||||
"cheap flights to Paris"
|
||||
]
|
||||
}}
|
||||
```
|
||||
|
||||
**Important:**
|
||||
|
||||
* Your output should *only* include search queries as strings in a JSON array. Do not include other task types like navigate, click, extract, etc.
|
||||
* Limit your output to a **maximum of {max_query_num}** search queries.
|
||||
* Make the search queries to help the automated agent find the needed information. Consider what keywords are most likely to lead to useful results.
|
||||
* If you have gathered for all the information you want and no further search queries are required, output an empty list: `[]`
|
||||
* Make sure your search queries are different from the previous queries.
|
||||
* If you have gathered for all the information you want and no further search queries are required, output queries with an empty list: `[]`
|
||||
* Make sure output search queries are different from the history queries.
|
||||
|
||||
**Inputs:**
|
||||
|
||||
1. **User Instruction:** The original instruction given by the user.
|
||||
2. **Previous Search Results:** Textual data gathered from prior search queries. If there are no previous search results this string will be empty.
|
||||
2. **Previous Queries:** History Queries.
|
||||
3. **Previous Search Results:** Textual data gathered from prior search queries. If there are no previous search results this string will be empty.
|
||||
"""
|
||||
search_messages = [SystemMessage(content=search_system_prompt)]
|
||||
# 记录和总结的历史信息,保存到raw_infos
|
||||
|
||||
record_system_prompt = """
|
||||
You are an expert information recorder. Your role is to process user instructions, current search results, and previously recorded information to extract, summarize, and record new, useful information that helps fulfill the user's request. Your output will be a concise textual summary of new information.
|
||||
You are an expert information recorder. Your role is to process user instructions, current search results, and previously recorded information to extract, summarize, and record new, useful information that helps fulfill the user's request. Your output will be a JSON formatted list, where each element represents a piece of extracted information and follows the structure: `{"url": "source_url", "title": "source_title", "summary_content": "concise_summary", "thinking": "reasoning"}`.
|
||||
|
||||
**Important Considerations:**
|
||||
|
||||
1. **Avoid Redundancy:** Do not record information that is already present in the `Previous Recorded Information`. Check for semantic similarity, not just exact matches.
|
||||
1. **Minimize Information Loss:** While concise, prioritize retaining important details and nuances from the sources. Aim for a summary that captures the essence of the information without over-simplification. **Crucially, ensure to preserve key data and figures within the `summary_content`. This is essential for later stages, such as generating tables and reports.**
|
||||
|
||||
2. **Utility Focus:** Only record information that is likely to be useful for completing the user's original instruction. Ask yourself: "Will this help the AI agent achieve its goal?" Discard irrelevant details.
|
||||
2. **Avoid Redundancy:** Do not record information that is already present in the Previous Recorded Information. Check for semantic similarity, not just exact matches. However, if the same information is expressed differently in a new source and this variation adds valuable context or clarity, it should be included.
|
||||
|
||||
3. **Include Source Information:** When summarizing information extracted from a specific source (like a webpage or article), always include the source title and URL if available. This helps in verifying the information and providing context.
|
||||
3. **Source Information:** Extract and include the source title and URL for each piece of information summarized. This is crucial for verification and context. **The Current Search Results are provided in a specific format, where each item starts with "Title:", followed by the title, then "URL Source:", followed by the URL, and finally "Markdown Content:", followed by the content. Please extract the title and URL from this structure.** If a piece of information cannot be attributed to a specific source from the provided search results, use `"url": "unknown"` and `"title": "unknown"`.
|
||||
|
||||
4. **Format:** Provide your output as a textual summary. When source information is available, use the format: `[title](url): summarized content`. If no specific source is identified, just provide the concise summary. No JSON or other structured output is needed beyond this format.
|
||||
4. **Thinking and Report Structure:** For each extracted piece of information, add a `"thinking"` key. This field should contain your assessment of how this information could be used in a report, which section it might belong to (e.g., introduction, background, analysis, conclusion, specific subtopics), and any other relevant thoughts about its significance or connection to other information.
|
||||
|
||||
**Output Format:**
|
||||
|
||||
Provide your output as a JSON formatted list. Each item in the list must adhere to the following format:
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"url": "source_url_1",
|
||||
"title": "source_title_1",
|
||||
"summary_content": "Concise summary of content. Remember to include key data and figures here.",
|
||||
"thinking": "This could be used in the introduction to set the context. It also relates to the section on the history of the topic."
|
||||
},
|
||||
// ... more entries
|
||||
{
|
||||
"url": "unknown",
|
||||
"title": "unknown",
|
||||
"summary_content": "concise_summary_of_content_without_clear_source",
|
||||
"thinking": "This might be useful background information, but I need to verify its accuracy. Could be used in the methodology section to explain how data was collected."
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
**Inputs:**
|
||||
|
||||
1. **User Instruction:** The original instruction given by the user. This helps you determine what kind of information will be useful.
|
||||
2. **Current Search Results:** Textual data gathered from the most recent search query.
|
||||
3. **Previous Recorded Information:** Textual data gathered and recorded from previous searches and processing, represented as a single text string. This string might be empty if no information has been recorded yet.
|
||||
1. **User Instruction:** The original instruction given by the user. This helps you determine what kind of information will be useful and how to structure your thinking.
|
||||
2. **Previous Recorded Information:** Textual data gathered and recorded from previous searches and processing, represented as a single text string.
|
||||
3. **Current Search Results:** Textual data gathered from the most recent search query.
|
||||
"""
|
||||
record_messages = [SystemMessage(content=record_system_prompt)]
|
||||
|
||||
browser = Browser(
|
||||
config=BrowserConfig(
|
||||
disable_security=True,
|
||||
headless=False, # Set to False to see browser actions
|
||||
headless=kwargs.get("headless", False), # Set to False to see browser actions
|
||||
)
|
||||
)
|
||||
search_iteration = 0
|
||||
max_search_iterations = 5 # Limit search iterations to prevent infinite loop
|
||||
max_history_len = 2
|
||||
use_vision = True
|
||||
controller = CustomController()
|
||||
|
||||
search_iteration = 0
|
||||
max_search_iterations = kwargs.get("max_search_iterations", 10) # Limit search iterations to prevent infinite loop
|
||||
use_vision = kwargs.get("use_vision", False)
|
||||
|
||||
history_query = []
|
||||
history_infos = []
|
||||
try:
|
||||
while search_iteration < max_search_iterations:
|
||||
search_iteration += 1
|
||||
print(f"开始第 {search_iteration} 轮搜索...")
|
||||
|
||||
query_prompt = f"User Instruction:{task} \n Previous Search Results:\n {search_infos}"
|
||||
logger.info(f"Start {search_iteration}th Search...")
|
||||
history_query_ = json.dumps(history_query, indent=4)
|
||||
history_infos_ = json.dumps(history_infos, indent=4)
|
||||
query_prompt = f"This is search {search_iteration} of {max_search_iterations} maximum searches allowed.\n User Instruction:{task} \n Previous Queries:\n {history_query_} \n Previous Search Results:\n {history_infos_}\n"
|
||||
search_messages.append(HumanMessage(content=query_prompt))
|
||||
ai_query_msg = llm.invoke(search_messages[:1] + search_messages[1:][-max_history_len:])
|
||||
ai_query_msg = llm.invoke(search_messages[:1] + search_messages[1:][-1:])
|
||||
search_messages.append(ai_query_msg)
|
||||
if hasattr(ai_query_msg, "reasoning_content"):
|
||||
print("🤯 Start Search Deep Thinking: ")
|
||||
print(ai_query_msg.reasoning_content)
|
||||
print("🤯 End Search Deep Thinking")
|
||||
ai_content = ai_query_msg.content.replace("```json", "").replace("```", "")
|
||||
ai_content = repair_json(ai_content)
|
||||
query_tasks = json.loads(ai_content)
|
||||
logger.info("🤯 Start Search Deep Thinking: ")
|
||||
logger.info(ai_query_msg.reasoning_content)
|
||||
logger.info("🤯 End Search Deep Thinking")
|
||||
ai_query_content = ai_query_msg.content.replace("```json", "").replace("```", "")
|
||||
ai_query_content = repair_json(ai_query_content)
|
||||
ai_query_content = json.loads(ai_query_content)
|
||||
query_plan = ai_query_content["plan"]
|
||||
logger.info(f"Current Iteration {search_iteration} Planing:")
|
||||
logger.info(query_plan)
|
||||
query_tasks = ai_query_content["queries"]
|
||||
if not query_tasks:
|
||||
break
|
||||
else:
|
||||
search_messages.append(ai_query_msg)
|
||||
print(f"搜索关键词/问题: {query_tasks}")
|
||||
history_query.extend(query_tasks)
|
||||
logger.info("Query tasks:")
|
||||
logger.info(query_tasks)
|
||||
|
||||
# 2. Perform Web Search and Auto exec
|
||||
agents = [CustomAgent(task=task + ". Please click on the most relevant link to get information and go deeper, instead of just staying on the search page.",
|
||||
llm=llm_bu,
|
||||
# Paralle BU agents
|
||||
agents = [CustomAgent(
|
||||
task=task + ". Please click on the most relevant link to get information and go deeper, instead of just staying on the search page.",
|
||||
llm=llm,
|
||||
browser=browser,
|
||||
use_vision=use_vision,
|
||||
system_prompt_class=CustomSystemPrompt,
|
||||
agent_prompt_class=CustomAgentMessagePrompt,
|
||||
max_actions_per_step=5
|
||||
max_actions_per_step=5,
|
||||
controller=controller
|
||||
) for task in query_tasks]
|
||||
query_results = await asyncio.gather(*[agent.run(max_steps=10) for agent in agents])
|
||||
query_results = await asyncio.gather(*[agent.run(max_steps=kwargs.get("max_steps", 10)) for agent in agents])
|
||||
|
||||
# 3. Summarize Search Result
|
||||
cur_search_rets = ""
|
||||
query_result_dir = os.path.join(save_dir, "query_results")
|
||||
os.makedirs(query_result_dir, exist_ok=True)
|
||||
for i in range(len(query_tasks)):
|
||||
cur_search_rets += f"{i+1}. {query_tasks[i]}\n {query_results[i].final_result()}\n"
|
||||
record_prompt = f"User Instruction:{task}. \n Current Search Results: {cur_search_rets}\n Previous Search Results:\n {search_infos}"
|
||||
query_result = query_results[i].final_result()
|
||||
querr_save_path = os.path.join(query_result_dir, f"{search_iteration}-{i}.md")
|
||||
logger.info(f"save query: {query_tasks[i]} at {querr_save_path}")
|
||||
with open(querr_save_path, "w", encoding="utf-8") as fw:
|
||||
fw.write(f"Query: {query_tasks[i]}\n")
|
||||
fw.write(query_result)
|
||||
history_infos_ = json.dumps(history_infos, indent=4)
|
||||
record_prompt = f"User Instruction:{task}. \nPrevious Recorded Information:\n {json.dumps(history_infos_)} \n Current Search Results: {query_result}\n "
|
||||
record_messages.append(HumanMessage(content=record_prompt))
|
||||
ai_record_msg = llm.invoke(record_messages[:1] + record_messages[-1:])
|
||||
if hasattr(ai_record_msg, "reasoning_content"):
|
||||
print("🤯 Start Record Deep Thinking: ")
|
||||
print(ai_record_msg.reasoning_content)
|
||||
print("🤯 End Record Deep Thinking")
|
||||
record_content = ai_record_msg.content
|
||||
search_infos += record_content + "\n"
|
||||
record_messages.append(ai_record_msg)
|
||||
print(search_infos)
|
||||
if hasattr(ai_record_msg, "reasoning_content"):
|
||||
logger.info("🤯 Start Record Deep Thinking: ")
|
||||
logger.info(ai_record_msg.reasoning_content)
|
||||
logger.info("🤯 End Record Deep Thinking")
|
||||
record_content = ai_record_msg.content
|
||||
record_content = repair_json(record_content)
|
||||
new_record_infos = json.loads(record_content)
|
||||
history_infos.extend(new_record_infos)
|
||||
|
||||
print("\n搜索完成, 开始生成报告...")
|
||||
logger.info("\nFinish Searching, Start Generating Report...")
|
||||
|
||||
# 5. Report Generation in Markdown (or JSON if you prefer)
|
||||
writer_system_prompt = """
|
||||
create polished, high-quality reports that fully meet the user's needs, based on the user's instructions and the relevant information provided. Please write the report using Markdown format, ensuring it is both informative and visually appealing.
|
||||
You are a **Deep Researcher** and a professional report writer tasked with creating polished, high-quality reports that fully meet the user's needs, based on the user's instructions and the relevant information provided. You will write the report using Markdown format, ensuring it is both informative and visually appealing.
|
||||
|
||||
**Specific Instructions:**
|
||||
|
||||
Specific Instructions:
|
||||
* **Structure for Impact:** The report must have a clear, logical, and impactful structure. Begin with a compelling introduction that immediately grabs the reader's attention. Develop well-structured body paragraphs that flow smoothly and logically, and conclude with a concise and memorable conclusion that summarizes key takeaways and leaves a lasting impression.
|
||||
* **Engaging and Vivid Language:** Employ precise, vivid, and descriptive language to make the report captivating and enjoyable to read. Use stylistic techniques to enhance engagement. Tailor your tone, vocabulary, and writing style to perfectly suit the subject matter and the intended audience to maximize impact and readability.
|
||||
* **Accuracy and Credibility:** Ensure that all information presented is meticulously accurate, rigorously truthful, and robustly supported by the available data. Cite sources professionally and appropriately to enhance credibility and allow for verification.
|
||||
* **Accuracy, Credibility, and Citations:** Ensure that all information presented is meticulously accurate, rigorously truthful, and robustly supported by the available data. **Cite sources exclusively using bracketed sequential numbers within the text (e.g., [1], [2], etc.). If no references are used, omit citations entirely.** These numbers must correspond to a numbered list of references at the end of the report.
|
||||
* **Publication-Ready Formatting:** Adhere strictly to Markdown formatting for excellent readability and a clean, highly professional visual appearance. Pay close attention to formatting details like headings, lists, emphasis, and spacing to optimize the visual presentation and reader experience. The report should be ready for immediate publication upon completion, requiring minimal to no further editing for style or format.
|
||||
* **Conciseness and Clarity (Unless Specified Otherwise):** When the user does not provide a specific length, prioritize concise and to-the-point writing, maximizing information density while maintaining clarity.
|
||||
* **Data-Driven Comparisons with Tables:** **When appropriate and beneficial for enhancing clarity and impact, present data comparisons in well-structured Markdown tables. This is especially encouraged when dealing with numerical data or when a visual comparison can significantly improve the reader's understanding.**
|
||||
* **Length Adherence:** When the user specifies a length constraint, meticulously stay within reasonable bounds of that specification, ensuring the content is appropriately scaled without sacrificing quality or completeness.
|
||||
* **Comprehensive Instruction Following:** Pay meticulous attention to all details and nuances provided in the user instructions. Strive to fulfill every aspect of the user's request with the highest degree of accuracy and attention to detail, creating a report that not only meets but exceeds expectations for quality and professionalism.
|
||||
* **Output Final Report Only Instruction:** This new instruction is explicitly added at the end to directly address the user's requirement. It clearly commands the LLM to output *only* the final article and to avoid any other elements. The bolded emphasis further reinforces this crucial point.
|
||||
* **Reference List Formatting:** The reference list at the end must be formatted as follows:
|
||||
`[1] Title (URL, if available)`
|
||||
**Each reference must be separated by a blank line to ensure proper spacing.** For example:
|
||||
|
||||
```
|
||||
[1] Title 1 (URL1, if available)
|
||||
|
||||
[2] Title 2 (URL2, if available)
|
||||
```
|
||||
**Furthermore, ensure that the reference list is free of duplicates. Each unique source should be listed only once, regardless of how many times it is cited in the text.**
|
||||
* **ABSOLUTE FINAL OUTPUT RESTRICTION:** **Your output must contain ONLY the finished, publication-ready Markdown report. Do not include ANY extraneous text, phrases, preambles, meta-commentary, or markdown code indicators (e.g., "```markdown```"). The report should begin directly with the title and introductory paragraph, and end directly after the conclusion and the reference list (if applicable).** **Your response will be deemed a failure if this instruction is not followed precisely.**
|
||||
|
||||
**Inputs:**
|
||||
|
||||
1. **User Instruction:** The original instruction given by the user. This helps you determine what kind of information will be useful and how to structure your thinking.
|
||||
2. **Search Information:** Information gathered from the search queries.
|
||||
"""
|
||||
report_prompt = f"User Instruction:{task} \n Search Information:\n {search_infos}"
|
||||
report_messages = [SystemMessage(content=writer_system_prompt), HumanMessage(content=report_prompt)] # New context for report generation
|
||||
|
||||
history_infos_ = json.dumps(history_infos, indent=4)
|
||||
record_json_path = os.path.join(save_dir, "record_infos.json")
|
||||
logger.info(f"save All recorded information at {record_json_path}")
|
||||
with open(record_json_path, "w") as fw:
|
||||
json.dump(history_infos, fw, indent=4)
|
||||
report_prompt = f"User Instruction:{task} \n Search Information:\n {history_infos_}"
|
||||
report_messages = [SystemMessage(content=writer_system_prompt),
|
||||
HumanMessage(content=report_prompt)] # New context for report generation
|
||||
ai_report_msg = llm.invoke(report_messages)
|
||||
if hasattr(ai_report_msg, "reasoning_content"):
|
||||
print("🤯 Start Report Deep Thinking: ")
|
||||
print(ai_report_msg.reasoning_content)
|
||||
print("🤯 End Report Deep Thinking")
|
||||
logger.info("🤯 Start Report Deep Thinking: ")
|
||||
logger.info(ai_report_msg.reasoning_content)
|
||||
logger.info("🤯 End Report Deep Thinking")
|
||||
report_content = ai_report_msg.content
|
||||
|
||||
if report_content:
|
||||
report_file_path = os.path.join(save_dir, "result.md")
|
||||
report_file_path = os.path.join(save_dir, "final_report.md")
|
||||
with open(report_file_path, "w", encoding="utf-8") as f:
|
||||
f.write(report_content)
|
||||
print(f"报告已生成并保存到: {report_file_path}")
|
||||
|
||||
print("\nFinal Result: (Report Content)")
|
||||
pprint(report_content, indent=4) # Print the final report content
|
||||
|
||||
else:
|
||||
print("未能生成报告内容。")
|
||||
|
||||
logger.info(f"Save Report at: {report_file_path}")
|
||||
return report_content, report_file_path
|
||||
|
||||
except Exception as e:
|
||||
print(f"Deep research 过程中发生错误: {e}")
|
||||
logger.error(f"Deep research Error: {e}")
|
||||
return "", None
|
||||
finally:
|
||||
if browser:
|
||||
await browser.close()
|
||||
print("Browser closed.")
|
||||
logger.info("Browser closed.")
|
||||
@@ -143,7 +143,7 @@ model_names = {
|
||||
"anthropic": ["claude-3-5-sonnet-20240620", "claude-3-opus-20240229"],
|
||||
"openai": ["gpt-4o", "gpt-4", "gpt-3.5-turbo", "o3-mini"],
|
||||
"deepseek": ["deepseek-chat", "deepseek-reasoner"],
|
||||
"gemini": ["gemini-2.0-flash-exp", "gemini-2.0-flash-thinking-exp", "gemini-1.5-flash-latest", "gemini-1.5-flash-8b-latest", "gemini-2.0-flash-thinking-exp-1219" ],
|
||||
"gemini": ["gemini-2.0-flash-exp", "gemini-2.0-flash-thinking-exp", "gemini-1.5-flash-latest", "gemini-1.5-flash-8b-latest", "gemini-2.0-flash-thinking-exp-01-21"],
|
||||
"ollama": ["qwen2.5:7b", "llama2:7b", "deepseek-r1:14b", "deepseek-r1:32b"],
|
||||
"azure_openai": ["gpt-4o", "gpt-4", "gpt-3.5-turbo"],
|
||||
"mistral": ["pixtral-large-latest", "mistral-large-latest", "mistral-small-latest", "ministral-8b-latest"]
|
||||
|
||||
@@ -1,265 +0,0 @@
|
||||
import pdb
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
import sys
|
||||
|
||||
sys.path.append(".")
|
||||
import asyncio
|
||||
import os
|
||||
import sys
|
||||
from pprint import pprint
|
||||
from uuid import uuid4
|
||||
from src.utils import utils
|
||||
from src.agent.custom_agent import CustomAgent
|
||||
import json
|
||||
from browser_use.agent.service import Agent
|
||||
from browser_use.browser.browser import BrowserConfig, Browser
|
||||
from langchain.schema import SystemMessage, HumanMessage
|
||||
from json_repair import repair_json
|
||||
from src.agent.custom_prompts import CustomSystemPrompt, CustomAgentMessagePrompt
|
||||
from src.controller.custom_controller import CustomController
|
||||
|
||||
|
||||
async def deep_research():
|
||||
# define task
|
||||
task = "中文写一篇关于2025年股票投资建议,从各个方面进行论述,2000字以上"
|
||||
task_id = uuid4().__str__()
|
||||
save_dir = os.path.join(f"./tmp/deep_research/{task_id}")
|
||||
os.makedirs(save_dir, exist_ok=True)
|
||||
|
||||
llm = utils.get_llm_model(provider="gemini", model_name="gemini-2.0-flash-thinking-exp-01-21", temperature=0.7)
|
||||
# llm = utils.get_llm_model(provider="deepseek", model_name="deepseek-reasoner", temperature=0.7)
|
||||
llm_bu = utils.get_llm_model(provider="azure_openai", model_name="gpt-4o", temperature=0.7)
|
||||
|
||||
# 搜索的LLM历史信息
|
||||
max_query_num = 3
|
||||
search_system_prompt = """
|
||||
You are a **Deep Researcher**, an AI agent specializing in in-depth information gathering and research using a web browser with **automated execution capabilities**. Your expertise lies in formulating comprehensive research plans and executing them meticulously to fulfill complex user requests. You will analyze user instructions, devise a detailed research plan, and determine the necessary search queries to gather the required information.
|
||||
|
||||
**Your Task:**
|
||||
|
||||
Given a user's research topic, you will:
|
||||
|
||||
1. **Develop a Research Plan:** Outline the key aspects and subtopics that need to be investigated to thoroughly address the user's request. This plan should be a high-level overview of the research direction.
|
||||
2. **Generate Search Queries:** Based on your research plan, generate a list of specific search queries to be executed in a web browser. These queries should be designed to efficiently gather relevant information for each aspect of your plan.
|
||||
|
||||
**Output Format:**
|
||||
|
||||
Your output will be a JSON object with the following structure:
|
||||
|
||||
```json
|
||||
{
|
||||
"plan": "A concise, high-level research plan outlining the key areas to investigate.",
|
||||
"queries": [
|
||||
"search query 1",
|
||||
"search query 2",
|
||||
//... up to a maximum of 3 search queries
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Important:**
|
||||
|
||||
* Limit your output to a **maximum of 3** search queries.
|
||||
* Make the search queries to help the automated agent find the needed information. Consider what keywords are most likely to lead to useful results.
|
||||
* If you have gathered for all the information you want and no further search queries are required, output queries with an empty list: `[]`
|
||||
* Make sure output search queries are different from the history queries.
|
||||
|
||||
**Inputs:**
|
||||
|
||||
1. **User Instruction:** The original instruction given by the user.
|
||||
2. **Previous Queries:** History Queries.
|
||||
3. **Previous Search Results:** Textual data gathered from prior search queries. If there are no previous search results this string will be empty.
|
||||
"""
|
||||
search_messages = [SystemMessage(content=search_system_prompt)]
|
||||
|
||||
# 记录和总结的历史信息,保存到raw_infos
|
||||
record_system_prompt = """
|
||||
You are an expert information recorder. Your role is to process user instructions, current search results, and previously recorded information to extract, summarize, and record new, useful information that helps fulfill the user's request. Your output will be a JSON formatted list, where each element represents a piece of extracted information and follows the structure: `{"url": "source_url", "title": "source_title", "summary_content": "concise_summary", "thinking": "reasoning"}`.
|
||||
|
||||
**Important Considerations:**
|
||||
|
||||
1. **Minimize Information Loss:** While concise, prioritize retaining important details and nuances from the sources. Aim for a summary that captures the essence of the information without over-simplification. **Crucially, ensure to preserve key data and figures within the `summary_content`. This is essential for later stages, such as generating tables and reports.**
|
||||
|
||||
2. **Avoid Redundancy:** Do not record information that is already present in the Previous Recorded Information. Check for semantic similarity, not just exact matches. However, if the same information is expressed differently in a new source and this variation adds valuable context or clarity, it should be included.
|
||||
|
||||
3. **Source Information:** Extract and include the source title and URL for each piece of information summarized. This is crucial for verification and context. **The Current Search Results are provided in a specific format, where each item starts with "Title:", followed by the title, then "URL Source:", followed by the URL, and finally "Markdown Content:", followed by the content. Please extract the title and URL from this structure.** If a piece of information cannot be attributed to a specific source from the provided search results, use `"url": "unknown"` and `"title": "unknown"`.
|
||||
|
||||
4. **Thinking and Report Structure:** For each extracted piece of information, add a `"thinking"` key. This field should contain your assessment of how this information could be used in a report, which section it might belong to (e.g., introduction, background, analysis, conclusion, specific subtopics), and any other relevant thoughts about its significance or connection to other information.
|
||||
|
||||
**Output Format:**
|
||||
|
||||
Provide your output as a JSON formatted list. Each item in the list must adhere to the following format:
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"url": "source_url_1",
|
||||
"title": "source_title_1",
|
||||
"summary_content": "Concise summary of content. Remember to include key data and figures here.",
|
||||
"thinking": "This could be used in the introduction to set the context. It also relates to the section on the history of the topic."
|
||||
},
|
||||
// ... more entries
|
||||
{
|
||||
"url": "unknown",
|
||||
"title": "unknown",
|
||||
"summary_content": "concise_summary_of_content_without_clear_source",
|
||||
"thinking": "This might be useful background information, but I need to verify its accuracy. Could be used in the methodology section to explain how data was collected."
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
**Inputs:**
|
||||
|
||||
1. **User Instruction:** The original instruction given by the user. This helps you determine what kind of information will be useful and how to structure your thinking.
|
||||
2. **Previous Recorded Information:** Textual data gathered and recorded from previous searches and processing, represented as a single text string.
|
||||
3. **Current Search Results:** Textual data gathered from the most recent search query.
|
||||
"""
|
||||
record_messages = [SystemMessage(content=record_system_prompt)]
|
||||
|
||||
browser = Browser(
|
||||
config=BrowserConfig(
|
||||
disable_security=True,
|
||||
headless=False, # Set to False to see browser actions
|
||||
)
|
||||
)
|
||||
controller = CustomController()
|
||||
|
||||
search_iteration = 0
|
||||
max_search_iterations = 4 # Limit search iterations to prevent infinite loop
|
||||
use_vision = False
|
||||
|
||||
history_query = []
|
||||
history_infos = []
|
||||
try:
|
||||
while search_iteration < max_search_iterations:
|
||||
search_iteration += 1
|
||||
print(f"Start {search_iteration}th Search...")
|
||||
history_queries = ""
|
||||
for i in range(len(history_query)):
|
||||
history_queries += f"{i + 1}. {history_query[i]}\n"
|
||||
history_infos_ = json.dumps(history_infos, indent=4)
|
||||
query_prompt = f"User Instruction:{task} \n Previous Queries: {history_queries} \n Previous Search Results:\n {history_infos_}"
|
||||
search_messages.append(HumanMessage(content=query_prompt))
|
||||
ai_query_msg = llm.invoke(search_messages[:1] + search_messages[1:][-1:])
|
||||
if hasattr(ai_query_msg, "reasoning_content"):
|
||||
print("🤯 Start Search Deep Thinking: ")
|
||||
print(ai_query_msg.reasoning_content)
|
||||
print("🤯 End Search Deep Thinking")
|
||||
ai_query_content = ai_query_msg.content.replace("```json", "").replace("```", "")
|
||||
ai_query_content = repair_json(ai_query_content)
|
||||
ai_query_content = json.loads(ai_query_content)
|
||||
query_plan = ai_query_content["plan"]
|
||||
print("Current Planing:")
|
||||
print(query_plan)
|
||||
query_tasks = ai_query_content["queries"]
|
||||
if not query_tasks:
|
||||
break
|
||||
else:
|
||||
history_query.extend(query_tasks)
|
||||
print("Query tasks:")
|
||||
print(query_tasks)
|
||||
search_messages.append(ai_query_msg)
|
||||
|
||||
# 2. Perform Web Search and Auto exec
|
||||
agents = [CustomAgent(
|
||||
task=task + ". Please click on the most relevant link to get information and go deeper, instead of just staying on the search page.",
|
||||
llm=llm_bu,
|
||||
browser=browser,
|
||||
use_vision=use_vision,
|
||||
system_prompt_class=CustomSystemPrompt,
|
||||
agent_prompt_class=CustomAgentMessagePrompt,
|
||||
max_actions_per_step=5,
|
||||
controller=controller
|
||||
) for task in query_tasks]
|
||||
query_results = await asyncio.gather(*[agent.run(max_steps=5) for agent in agents])
|
||||
|
||||
# 3. Summarize Search Result
|
||||
query_result_dir = os.path.join(save_dir, "query_results")
|
||||
os.makedirs(query_result_dir, exist_ok=True)
|
||||
for i in range(len(query_tasks)):
|
||||
query_result = query_results[i].final_result()
|
||||
with open(os.path.join(query_result_dir, f"{search_iteration}-{i}.md"), "w", encoding="utf-8") as fw:
|
||||
fw.write(f"Query: {query_tasks[i]}\n")
|
||||
fw.write(query_result)
|
||||
history_infos_ = json.dumps(history_infos, indent=4)
|
||||
record_prompt = f"User Instruction:{task}. \nPrevious Recorded Information:\n {json.dumps(history_infos_)} \n Current Search Results: {query_result}\n "
|
||||
record_messages.append(HumanMessage(content=record_prompt))
|
||||
ai_record_msg = llm.invoke(record_messages[:1] + record_messages[-1:])
|
||||
if hasattr(ai_record_msg, "reasoning_content"):
|
||||
print("🤯 Start Record Deep Thinking: ")
|
||||
print(ai_record_msg.reasoning_content)
|
||||
print("🤯 End Record Deep Thinking")
|
||||
record_content = ai_record_msg.content
|
||||
record_content = repair_json(record_content)
|
||||
new_record_infos = json.loads(record_content)
|
||||
history_infos.extend(new_record_infos)
|
||||
record_messages.append(ai_record_msg)
|
||||
|
||||
print("\nFinish Searching, Start Generating Report...")
|
||||
|
||||
# 5. Report Generation in Markdown (or JSON if you prefer)
|
||||
writer_system_prompt = """
|
||||
You are a professional report writer tasked with creating polished, high-quality reports that fully meet the user's needs, based on the user's instructions and the relevant information provided. You will write the report using Markdown format, ensuring it is both informative and visually appealing.
|
||||
|
||||
**Specific Instructions:**
|
||||
|
||||
* **Structure for Impact:** The report must have a clear, logical, and impactful structure. Begin with a compelling introduction that immediately grabs the reader's attention. Develop well-structured body paragraphs that flow smoothly and logically, and conclude with a concise and memorable conclusion that summarizes key takeaways and leaves a lasting impression.
|
||||
* **Engaging and Vivid Language:** Employ precise, vivid, and descriptive language to make the report captivating and enjoyable to read. Use stylistic techniques to enhance engagement. Tailor your tone, vocabulary, and writing style to perfectly suit the subject matter and the intended audience to maximize impact and readability.
|
||||
* **Accuracy, Credibility, and Citations:** Ensure that all information presented is meticulously accurate, rigorously truthful, and robustly supported by the available data. **Cite sources exclusively using bracketed sequential numbers within the text (e.g., [1], [2], etc.). If no references are used, omit citations entirely.** These numbers must correspond to a numbered list of references at the end of the report.
|
||||
* **Publication-Ready Formatting:** Adhere strictly to Markdown formatting for excellent readability and a clean, highly professional visual appearance. Pay close attention to formatting details like headings, lists, emphasis, and spacing to optimize the visual presentation and reader experience. The report should be ready for immediate publication upon completion, requiring minimal to no further editing for style or format.
|
||||
* **Conciseness and Clarity (Unless Specified Otherwise):** When the user does not provide a specific length, prioritize concise and to-the-point writing, maximizing information density while maintaining clarity.
|
||||
* **Data-Driven Comparisons with Tables:** **When appropriate and beneficial for enhancing clarity and impact, present data comparisons in well-structured Markdown tables. This is especially encouraged when dealing with numerical data or when a visual comparison can significantly improve the reader's understanding.**
|
||||
* **Length Adherence:** When the user specifies a length constraint, meticulously stay within reasonable bounds of that specification, ensuring the content is appropriately scaled without sacrificing quality or completeness.
|
||||
* **Comprehensive Instruction Following:** Pay meticulous attention to all details and nuances provided in the user instructions. Strive to fulfill every aspect of the user's request with the highest degree of accuracy and attention to detail, creating a report that not only meets but exceeds expectations for quality and professionalism.
|
||||
* **Reference List Formatting:** The reference list at the end must be formatted as follows:
|
||||
`[1] Title (URL, if available)`
|
||||
**Each reference must be separated by a blank line to ensure proper spacing.** For example:
|
||||
|
||||
```
|
||||
[1] Title 1 (URL1, if available)
|
||||
|
||||
[2] Title 2 (URL2, if available)
|
||||
```
|
||||
**Furthermore, ensure that the reference list is free of duplicates. Each unique source should be listed only once, regardless of how many times it is cited in the text.**
|
||||
* **ABSOLUTE FINAL OUTPUT RESTRICTION:** **Your output must contain ONLY the finished, publication-ready Markdown report. Do not include ANY extraneous text, phrases, preambles, meta-commentary, or markdown code indicators (e.g., "```markdown```"). The report should begin directly with the title and introductory paragraph, and end directly after the conclusion and the reference list (if applicable).** **Your response will be deemed a failure if this instruction is not followed precisely.**
|
||||
|
||||
**Inputs:**
|
||||
|
||||
1. **User Instruction:** The original instruction given by the user. This helps you determine what kind of information will be useful and how to structure your thinking.
|
||||
3. **Search Information:** Information gathered from the recent search queries.
|
||||
"""
|
||||
with open(os.path.join(save_dir, "record_infos.json"), "w") as fw:
|
||||
json.dump(history_infos, fw, indent=4)
|
||||
history_infos_ = json.dumps(history_infos, indent=4)
|
||||
report_prompt = f"User Instruction:{task} \n Search Information:\n {history_infos_}"
|
||||
report_messages = [SystemMessage(content=writer_system_prompt),
|
||||
HumanMessage(content=report_prompt)] # New context for report generation
|
||||
ai_report_msg = llm.invoke(report_messages)
|
||||
if hasattr(ai_report_msg, "reasoning_content"):
|
||||
print("🤯 Start Report Deep Thinking: ")
|
||||
print(ai_report_msg.reasoning_content)
|
||||
print("🤯 End Report Deep Thinking")
|
||||
report_content = ai_report_msg.content
|
||||
|
||||
if report_content:
|
||||
report_file_path = os.path.join(save_dir, "final_report.md")
|
||||
with open(report_file_path, "w", encoding="utf-8") as f:
|
||||
f.write(report_content)
|
||||
print(f"报告已生成并保存到: {report_file_path}")
|
||||
|
||||
else:
|
||||
print("未能生成报告内容。")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Deep research 过程中发生错误: {e}")
|
||||
finally:
|
||||
if browser:
|
||||
await browser.close()
|
||||
print("Browser closed.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(deep_research())
|
||||
36
webui.py
36
webui.py
@@ -598,6 +598,24 @@ async def close_global_browser():
|
||||
await _global_browser.close()
|
||||
_global_browser = None
|
||||
|
||||
async def run_deep_search(research_task, max_search_iteration_input, max_query_per_iter_input, llm_provider, llm_model_name, llm_temperature, llm_base_url, llm_api_key, use_vision, headless):
|
||||
from src.utils.deep_research import deep_research
|
||||
|
||||
llm = utils.get_llm_model(
|
||||
provider=llm_provider,
|
||||
model_name=llm_model_name,
|
||||
temperature=llm_temperature,
|
||||
base_url=llm_base_url,
|
||||
api_key=llm_api_key,
|
||||
)
|
||||
markdown_content, file_path = await deep_research(research_task, llm,
|
||||
max_search_iterations=max_search_iteration_input,
|
||||
max_query_num=max_query_per_iter_input,
|
||||
use_vision=use_vision,
|
||||
headless=headless)
|
||||
return markdown_content, file_path
|
||||
|
||||
|
||||
def create_ui(config, theme_name="Ocean"):
|
||||
css = """
|
||||
.gradio-container {
|
||||
@@ -797,6 +815,17 @@ def create_ui(config, theme_name="Ocean"):
|
||||
label="Live Browser View",
|
||||
)
|
||||
|
||||
with gr.TabItem("🧐 Deep Research"):
|
||||
with gr.Group():
|
||||
research_task_input = gr.Textbox(label="Research Task", lines=5, value="Compose a report on the use of Reinforcement Learning for training Large Language Models, encompassing its origins, current advancements, and future prospects, substantiated with examples of relevant models and techniques. The report should reflect original insights and analysis, moving beyond mere summarization of existing literature.")
|
||||
with gr.Row():
|
||||
max_search_iteration_input = gr.Number(label="Max Search Iteration", value=20, precision=0) # precision=0 确保是整数
|
||||
max_query_per_iter_input = gr.Number(label="Max Query per Iteration", value=5, precision=0) # precision=0 确保是整数
|
||||
research_button = gr.Button("Run Deep Research")
|
||||
markdown_output_display = gr.Markdown(label="Research Report")
|
||||
markdown_download = gr.File(label="Download Research Report")
|
||||
|
||||
|
||||
with gr.TabItem("📁 Configuration", id=5):
|
||||
with gr.Group():
|
||||
config_file_input = gr.File(
|
||||
@@ -897,6 +926,13 @@ def create_ui(config, theme_name="Ocean"):
|
||||
],
|
||||
)
|
||||
|
||||
# Run Deep Research
|
||||
research_button.click(
|
||||
fn=run_deep_search,
|
||||
inputs=[research_task_input, max_search_iteration_input, max_query_per_iter_input, llm_provider, llm_model_name, llm_temperature, llm_base_url, llm_api_key, use_vision, headless],
|
||||
outputs=[markdown_output_display, markdown_download]
|
||||
)
|
||||
|
||||
with gr.TabItem("🎥 Recordings", id=7):
|
||||
def list_recordings(save_recording_path):
|
||||
if not os.path.exists(save_recording_path):
|
||||
|
||||
Reference in New Issue
Block a user