Merge pull request #14 from HKUDS/autoagent

Autoagent
This commit is contained in:
Jiabin Tang
2025-02-16 22:48:54 +08:00
committed by GitHub
98 changed files with 805 additions and 1143 deletions

View File

@@ -3,7 +3,7 @@ from .types import Agent, Response
# from .workflow import Graph, meta_workflow, FlowEngine
from .flow import default_drive
import metachain.workflows
import metachain.tools
import metachain.agents
import autoagent.workflows
import autoagent.tools
import autoagent.agents
__all__ = ["MetaChain", "Agent", "Response", "default_drive", ]

View File

@@ -1,21 +1,21 @@
# from metachain.agents.programming_agent import get_programming_agent
# from metachain.agents.tool_retriver_agent import get_tool_retriver_agent
# from metachain.agents.agent_check_agent import get_agent_check_agent
# from metachain.agents.tool_check_agent import get_tool_check_agent
# from metachain.agents.github_agent import get_github_agent
# from metachain.agents.programming_triage_agent import get_programming_triage_agent
# from metachain.agents.plan_agent import get_plan_agent
# from autoagent.agents.programming_agent import get_programming_agent
# from autoagent.agents.tool_retriver_agent import get_tool_retriver_agent
# from autoagent.agents.agent_check_agent import get_agent_check_agent
# from autoagent.agents.tool_check_agent import get_tool_check_agent
# from autoagent.agents.github_agent import get_github_agent
# from autoagent.agents.programming_triage_agent import get_programming_triage_agent
# from autoagent.agents.plan_agent import get_plan_agent
# import os
# import importlib
# from metachain.registry import registry
# from autoagent.registry import registry
# # 获取当前目录下的所有 .py 文件
# current_dir = os.path.dirname(__file__)
# for file in os.listdir(current_dir):
# if file.endswith('.py') and not file.startswith('__'):
# module_name = file[:-3]
# importlib.import_module(f'metachain.agents.{module_name}')
# importlib.import_module(f'autoagent.agents.{module_name}')
# # 导出所有注册的 agent 创建函数
# globals().update(registry.agents)
@@ -24,7 +24,7 @@
import os
import importlib
from metachain.registry import registry
from autoagent.registry import registry
def import_agents_recursively(base_dir: str, base_package: str):
"""Recursively import all agents in .py files
@@ -55,7 +55,7 @@ def import_agents_recursively(base_dir: str, base_package: str):
# get the current directory and import all agents
current_dir = os.path.dirname(__file__)
import_agents_recursively(current_dir, 'metachain.agents')
import_agents_recursively(current_dir, 'autoagent.agents')
# export all agent creation functions
globals().update(registry.agents)

View File

@@ -1,12 +1,12 @@
from metachain.types import Agent
from metachain.tools import tool_dummy
from autoagent.types import Agent
from autoagent.tools import tool_dummy
from typing import Union
from metachain.registry import register_plugin_agent # import the register_agent function from the registry
from autoagent.registry import register_plugin_agent # import the register_agent function from the registry
@register_plugin_agent(name = "Dummy Agent", func_name="get_dummy_agent") # You must register the agent in the registry, otherwise the agent will not be loaded. The name of register_agent is get_xxx_agent.
def get_dummy_agent(model: str):
"""
This is a dummy agent, it's used for demonstrating the usage of the metachain.
This is a dummy agent, it's used for demonstrating the usage of the autoagent.
Args:
model: The model to be used for the agent.
Returns:
@@ -30,7 +30,7 @@ def get_dummy_agent(model: str):
Form to create an agent:
agent_name = "Dummy Agent"
agent_description = "This is a dummy agent, it's used for demonstrating the usage of the metachain."
agent_description = "This is a dummy agent, it's used for demonstrating the usage of the autoagent."
agent_instructions = "..." | "...{global_variables}..."
agent_tools = [tool_dummy]
"""

View File

@@ -1,15 +1,15 @@
from metachain.types import Agent
from metachain.tools import (
from autoagent.types import Agent
from autoagent.tools import (
push_changes, submit_pull_request
)
from metachain.registry import register_agent
from autoagent.registry import register_agent
@register_agent(name = "Github Agent", func_name="get_github_agent")
def get_github_agent(model: str):
def instructions(context_variables):
return \
f"""You are an agent that helps user to manage the GitHub repository named 'metachain'.
f"""You are an agent that helps user to manage the GitHub repository named 'autoagent'.
The user will give you the suggestion of the changes to be pushed to the repository.
Follow the following routine with the user:
1. First, use `push_changes` to push the changes to the repository. (If the user want to push all the changes, use `push_changes` with `file_paths=None` as the argument.)

View File

@@ -1,6 +1,6 @@
from metachain.types import Agent
from autoagent.types import Agent
from metachain.registry import register_plugin_agent
from autoagent.registry import register_plugin_agent
@register_plugin_agent(name="Math Solver Agent", func_name="get_math_solver_agent")
def get_math_solver_agent(model: str):

View File

@@ -1,6 +1,6 @@
from metachain.types import Agent
from autoagent.types import Agent
from metachain.registry import register_plugin_agent
from autoagent.registry import register_plugin_agent
@register_plugin_agent(name="Vote Aggregator Agent", func_name="get_vote_aggregator_agent")
def get_vote_aggregator_agent(model: str):

View File

@@ -1,9 +1,9 @@
from metachain.registry import register_agent
from metachain.tools.meta.edit_agents import list_agents, create_agent, delete_agent, run_agent, read_agent, create_orchestrator_agent
from metachain.tools.meta.edit_tools import list_tools, create_tool, delete_tool, run_tool
from metachain.tools.terminal_tools import execute_command, terminal_page_down, terminal_page_up, terminal_page_to
from metachain.types import Agent
from metachain.io_utils import read_file
from autoagent.registry import register_agent
from autoagent.tools.meta.edit_agents import list_agents, create_agent, delete_agent, run_agent, read_agent, create_orchestrator_agent
from autoagent.tools.meta.edit_tools import list_tools, create_tool, delete_tool, run_tool
from autoagent.tools.terminal_tools import execute_command, terminal_page_down, terminal_page_up, terminal_page_to
from autoagent.types import Agent
from autoagent.io_utils import read_file
@register_agent(name = "Agent Creator Agent", func_name="get_agent_creator_agent")

View File

@@ -1,8 +1,8 @@
from metachain.registry import register_agent
from metachain.tools.meta.edit_agents import list_agents, create_agent, delete_agent, run_agent
from metachain.tools.terminal_tools import execute_command
from metachain.types import Agent
from metachain.io_utils import read_file
from autoagent.registry import register_agent
from autoagent.tools.meta.edit_agents import list_agents, create_agent, delete_agent, run_agent
from autoagent.tools.terminal_tools import execute_command
from autoagent.types import Agent
from autoagent.io_utils import read_file
@register_agent(name = "Agent Editor Agent", func_name="get_agent_editor_agent")
def get_agent_editor_agent(model: str) -> str:
@@ -19,7 +19,7 @@ The existing agents are shown below:
If you want to create a new agent, you should:
1. follow the format of the `get_dummy_agent` below:
```python
{read_file('metachain/agents/dummy_agent.py')}
{read_file('autoagent/agents/dummy_agent.py')}
```
2. you successfully create the agent only after you have successfully run the agent with the `run_agent` function to satisfy the user's request.

View File

@@ -1,9 +1,9 @@
from metachain.registry import register_agent
from metachain.tools.meta.edit_agents import list_agents, create_agent, delete_agent, run_agent, read_agent
from metachain.tools.meta.edit_tools import list_tools, create_tool, delete_tool, run_tool
from metachain.tools.terminal_tools import execute_command
from metachain.types import Agent
from metachain.io_utils import read_file
from autoagent.registry import register_agent
from autoagent.tools.meta.edit_agents import list_agents, create_agent, delete_agent, run_agent, read_agent
from autoagent.tools.meta.edit_tools import list_tools, create_tool, delete_tool, run_tool
from autoagent.tools.terminal_tools import execute_command
from autoagent.types import Agent
from autoagent.io_utils import read_file
from pydantic import BaseModel, Field
from typing import List
@@ -238,7 +238,7 @@ Follow these examples and guidelines to create appropriate agent forms based on
)
if __name__ == "__main__":
from metachain import MetaChain
from autoagent import MetaChain
agent = get_agent_former_agent("claude-3-5-sonnet-20241022")
client = MetaChain()
task_yaml = """\

View File

@@ -137,26 +137,3 @@ def parse_agent_form(xml_content: str) -> Optional[AgentForm]:
except Exception as e:
print(f"Unexpected error: {e}")
return None
# 使用示例
if __name__ == "__main__":
# 读取和解析XML文件
import json
with open("/Users/tangjiabin/Documents/reasoning/metachain/metachain/agents/meta_agent/agent_form/financial_agent_2.xml", 'r', encoding='utf-8') as f:
xml_content = f.read()
form = parse_agent_form(xml_content)
with open("/Users/tangjiabin/Documents/reasoning/metachain/metachain/agents/meta_agent/agent_form/financial_agent_2.json", 'w', encoding='utf-8') as f:
json.dump(form.model_dump(), f, indent=4)
if form:
# 访问数据(现在有类型提示和验证)
print(f"System input: {form.system_input}")
print(f"Number of agents: {len(form.agents)}")
# 访问第一个agent的信息
first_agent = form.agents[0]
print(f"First agent name: {first_agent.name}")
print(f"First agent tools: {first_agent.tools}")
# 转换为字典
form_dict = form.model_dump()
print(json.dumps(form_dict, indent=4))

View File

@@ -1,11 +1,11 @@
from metachain.registry import register_agent
from metachain.tools.meta.edit_tools import list_tools, create_tool, delete_tool, run_tool, get_metachain_path
from metachain.tools.meta.tool_retriever import get_api_plugin_tools_doc
from metachain.tools.meta.search_tools import search_trending_models_on_huggingface, get_hf_model_tools_doc
from autoagent.registry import register_agent
from autoagent.tools.meta.edit_tools import list_tools, create_tool, delete_tool, run_tool, get_metachain_path
from autoagent.tools.meta.tool_retriever import get_api_plugin_tools_doc
from autoagent.tools.meta.search_tools import search_trending_models_on_huggingface, get_hf_model_tools_doc
from metachain.types import Agent
from metachain.io_utils import read_file
from metachain.tools.terminal_tools import execute_command, terminal_page_down, terminal_page_up, terminal_page_to
from autoagent.types import Agent
from autoagent.io_utils import read_file
from autoagent.tools.terminal_tools import execute_command, terminal_page_down, terminal_page_up, terminal_page_to
@register_agent(name = "Tool Editor Agent", func_name="get_tool_editor_agent")
def get_tool_editor_agent(model: str) -> Agent:
"""
@@ -19,7 +19,7 @@ You are a tool editor agent responsible for managing plugin tools in the MetaCha
- Plugin tools are the building blocks of MetaChain
- All available plugin tools are as follows:
{list_tools(context_variables)}
- Plugin tools can ONLY be executed using `run_tool(tool_name, run_code)`. You should import `run_tool` by `from metachain.tools import run_tool`.
- Plugin tools can ONLY be executed using `run_tool(tool_name, run_code)`. You should import `run_tool` by `from autoagent.tools import run_tool`.
- NEVER try to import and run plugin tools directly - always use `run_tool`
[TOOL CREATION WORKFLOW]
@@ -38,15 +38,15 @@ You are a tool editor agent responsible for managing plugin tools in the MetaCha
- Only use internal knowledge if no suitable models are found
c. For visual analysis tasks (images/videos):
- MUST use the existing `visual_question_answering` plugin tool by `run_tool("visual_question_answering", "from metachain.tools import visual_question_answering; ...")`. DO NOT use it directly without `run_tool`.
- MUST use the existing `visual_question_answering` plugin tool by `run_tool("visual_question_answering", "from autoagent.tools import visual_question_answering; ...")`. DO NOT use it directly without `run_tool`.
- NO direct implementation of visual processing
- Chain with other tools as needed
3. Plugin Tool Implementation Requirements:
- Use @register_plugin_tool decorator (REQUIRED). You should import `register_plugin_tool` by `from metachain.registry import register_plugin_tool`.
- Use @register_plugin_tool decorator (REQUIRED). You should import `register_plugin_tool` by `from autoagent.registry import register_plugin_tool`.
- Follow this template:
```python
{read_file('metachain/tools/dummy_tool.py')}
{read_file('autoagent/tools/dummy_tool.py')}
```
- Include clear type hints
- Make tools abstract and reusable
@@ -103,7 +103,7 @@ You are a tool editor agent responsible for managing plugin tools in the MetaCha
[TESTING]
Test new tools using `run_tool`:
`run_tool(tool_name="your_tool", run_code="from metachain.tools import your_tool; print(your_tool(param1='value1'))")`
`run_tool(tool_name="your_tool", run_code="from autoagent.tools import your_tool; print(your_tool(param1='value1'))")`
"""
tool_list = [list_tools, create_tool, run_tool, delete_tool, get_api_plugin_tools_doc, execute_command, terminal_page_down, terminal_page_up, terminal_page_to, search_trending_models_on_huggingface, get_hf_model_tools_doc]
return Agent(
@@ -131,16 +131,16 @@ The existing tools are shown below:
If you want to create a new tool, you should:
1. follow the format of the `tool_dummy` below. Note that if the tool should be used with third-part api key, you should write the api key inside the definition of the tool:
```python
{read_file('metachain/tools/dummy_tool.py')}
{read_file('autoagent/tools/dummy_tool.py')}
```
2. you successfully create the tool only after you have successfully run the tool with the `run_tool` function, and an example of testing the tool is shown below.:
```python
from metachain.tools import tool_dummy
from autoagent.tools import tool_dummy
if __name__ == "__main__":
... # some pre-operations
print(run_tool(tool_name="tool_dummy", run_code="from metachain.tools import tool_dummy; print(tool_dummy(args1=args1, args2=args1, ...))"))
print(run_tool(tool_name="tool_dummy", run_code="from autoagent.tools import tool_dummy; print(tool_dummy(args1=args1, args2=args1, ...))"))
```
3. If you encounter any error while creating and running the tool, like dependency missing, you should use the `execute_command` function to install the dependency.
@@ -177,11 +177,11 @@ You are a tool editor agent responsible for managing plugin tools in the MetaCha
2. create_tool(tool_name: str, tool_code: str):
- Creates new plugin tools
- Requires proper registration using @register_plugin_tool, and you MUST import `register_plugin_tool` by `from metachain.registry import register_plugin_tool`
- Requires proper registration using @register_plugin_tool, and you MUST import `register_plugin_tool` by `from autoagent.registry import register_plugin_tool`
3. run_tool(tool_name: str, run_code: str,):
- REQUIRED method to execute any plugin tool
- Format: run_tool("tool_name", "from metachain.tools import tool_name; print(tool_name(args))")
- Format: run_tool("tool_name", "from autoagent.tools import tool_name; print(tool_name(args))")
4. delete_tool(tool_name: str,):
- Removes existing plugin tools
@@ -231,7 +231,7 @@ You are a tool editor agent responsible for managing plugin tools in the MetaCha
a. Design generic, reusable interface
b. Follow the template format:
```python
{read_file('metachain/tools/dummy_tool.py')}
{read_file('autoagent/tools/dummy_tool.py')}
```
c. Create using `create_tool`
d. Test using `run_tool`
@@ -250,7 +250,7 @@ Correct way to test a plugin tool:
```python
result = run_tool(
tool_name="your_tool",
run_code="from metachain.tools import your_tool; print(your_tool(param1='value1'))",
run_code="from autoagent.tools import your_tool; print(your_tool(param1='value1'))",
context_variables=context_variables
)
```

View File

@@ -1,9 +1,9 @@
from metachain.registry import register_agent
from metachain.tools.meta.edit_agents import list_agents, create_agent, delete_agent, run_agent, read_agent, create_orchestrator_agent
from metachain.tools.meta.edit_workflow import list_workflows, create_workflow, run_workflow
from metachain.tools.terminal_tools import execute_command, terminal_page_down, terminal_page_up, terminal_page_to
from metachain.types import Agent
from metachain.io_utils import read_file
from autoagent.registry import register_agent
from autoagent.tools.meta.edit_agents import list_agents, create_agent, delete_agent, run_agent, read_agent, create_orchestrator_agent
from autoagent.tools.meta.edit_workflow import list_workflows, create_workflow, run_workflow
from autoagent.tools.terminal_tools import execute_command, terminal_page_down, terminal_page_up, terminal_page_to
from autoagent.types import Agent
from autoagent.io_utils import read_file
@register_agent(name = "Workflow Creator Agent", func_name="get_workflow_creator_agent")

View File

@@ -1,10 +1,10 @@
from metachain.registry import register_agent
from metachain.tools.meta.edit_agents import list_agents, create_agent, delete_agent, run_agent, read_agent
from metachain.tools.meta.edit_tools import list_tools, create_tool, delete_tool, run_tool
from metachain.tools.meta.edit_workflow import list_workflows
from metachain.tools.terminal_tools import execute_command
from metachain.types import Agent
from metachain.io_utils import read_file
from autoagent.registry import register_agent
from autoagent.tools.meta.edit_agents import list_agents, create_agent, delete_agent, run_agent, read_agent
from autoagent.tools.meta.edit_tools import list_tools, create_tool, delete_tool, run_tool
from autoagent.tools.meta.edit_workflow import list_workflows
from autoagent.tools.terminal_tools import execute_command
from autoagent.types import Agent
from autoagent.io_utils import read_file
from pydantic import BaseModel, Field
from typing import List
import json
@@ -548,7 +548,7 @@ Follow these examples and guidelines to create appropriate workflow forms based
)
if __name__ == "__main__":
from metachain import MetaChain
from autoagent import MetaChain
agent = get_workflow_former_agent("claude-3-5-sonnet-20241022")
client = MetaChain()
# task_yaml = """\

View File

@@ -322,14 +322,3 @@ def parse_workflow_form(xml_content: str) -> Optional[WorkflowForm]:
except Exception as e:
return f"Unexpected error: {e}"
# 使用示例
if __name__ == "__main__":
# 读取和解析XML文件
import json
with open("/Users/tangjiabin/Documents/reasoning/metachain/metachain/agents/meta_agent/workflow_form/condition_mining.xml", 'r', encoding='utf-8') as f:
xml_content = f.read()
form = parse_workflow_form(xml_content)
print(form)
with open("/Users/tangjiabin/Documents/reasoning/metachain/metachain/agents/meta_agent/workflow_form/condition_mining.json", 'w', encoding='utf-8') as f:
json.dump(form.model_dump(), f, indent=4)
workflow_form = form.model_dump()

View File

@@ -1,8 +1,8 @@
from metachain.types import Agent
from metachain.registry import register_agent
from metachain.tools import open_local_file, page_up_markdown, page_down_markdown, find_on_page_ctrl_f, find_next, visual_question_answering
from metachain.tools.file_surfer_tool import with_env
from metachain.environment.markdown_browser import RequestsMarkdownBrowser
from autoagent.types import Agent
from autoagent.registry import register_agent
from autoagent.tools import open_local_file, page_up_markdown, page_down_markdown, find_on_page_ctrl_f, find_next, visual_question_answering
from autoagent.tools.file_surfer_tool import with_env
from autoagent.environment.markdown_browser import RequestsMarkdownBrowser
import time
from inspect import signature
from constant import LOCAL_ROOT, DOCKER_WORKPLACE_NAME

View File

@@ -1,11 +1,11 @@
from metachain.types import Agent
from metachain.tools import (
from autoagent.types import Agent
from autoagent.tools import (
gen_code_tree_structure, execute_command, read_file, create_file, write_file, list_files, create_directory, run_python, terminal_page_up, terminal_page_down, terminal_page_to
)
from metachain.util import make_message, make_tool_message
from metachain.registry import register_agent, register_plugin_agent
from autoagent.util import make_message, make_tool_message
from autoagent.registry import register_agent, register_plugin_agent
from constant import LOCAL_ROOT, DOCKER_WORKPLACE_NAME
from metachain.environment import DockerEnv, BrowserEnv, LocalEnv
from autoagent.environment import DockerEnv, BrowserEnv, LocalEnv
from typing import Union
from inspect import signature
def examples(context_variables):

View File

@@ -1,9 +1,9 @@
from .filesurfer_agent import get_filesurfer_agent
from .programming_agent import get_coding_agent
from .websurfer_agent import get_websurfer_agent
from metachain.registry import register_agent
from metachain.types import Agent, Result
from metachain.tools.inner import case_resolved, case_not_resolved
from autoagent.registry import register_agent
from autoagent.types import Agent, Result
from autoagent.tools.inner import case_resolved, case_not_resolved
@register_agent(name = "System Triage Agent", func_name="get_system_triage_agent")
def get_system_triage_agent(model: str, **kwargs):

View File

@@ -1,8 +1,8 @@
from metachain.types import Agent
from metachain.registry import register_agent
from metachain.tools import click, page_down, page_up, history_back, history_forward, web_search, input_text, sleep, visit_url, get_page_markdown
from metachain.tools.web_tools import with_env
from metachain.environment.browser_env import BrowserEnv
from autoagent.types import Agent
from autoagent.registry import register_agent
from autoagent.tools import click, page_down, page_up, history_back, history_forward, web_search, input_text, sleep, visit_url, get_page_markdown
from autoagent.tools.web_tools import with_env
from autoagent.environment.browser_env import BrowserEnv
import time
from constant import DOCKER_WORKPLACE_NAME, LOCAL_ROOT
@register_agent(name = "Web Surfer Agent", func_name="get_websurfer_agent")

View File

@@ -1,9 +1,9 @@
from metachain.types import Agent
from metachain.tools import (
from autoagent.types import Agent
from autoagent.tools import (
get_api_plugin_tools_doc
)
from metachain.util import make_message, make_tool_message
from metachain.registry import register_agent
from autoagent.util import make_message, make_tool_message
from autoagent.registry import register_agent
@register_agent(name = "Tool Retriver Agent", func_name="get_tool_retriver_agent")
def get_tool_retriver_agent(model: str):
def instructions(context_variables):

399
autoagent/cli.py Normal file
View File

@@ -0,0 +1,399 @@
import click
import importlib
from autoagent import MetaChain
from autoagent.util import debug_print
import asyncio
from constant import DOCKER_WORKPLACE_NAME
from autoagent.io_utils import read_yaml_file, get_md5_hash_bytext, read_file
from autoagent.environment.utils import setup_metachain
from autoagent.types import Response
from autoagent import MetaChain
from autoagent.util import ask_text, single_select_menu, print_markdown, debug_print, UserCompleter
from prompt_toolkit import PromptSession
from prompt_toolkit.completion import Completer, Completion
from prompt_toolkit.formatted_text import HTML
from prompt_toolkit.styles import Style
from rich.progress import Progress, SpinnerColumn, TextColumn
import json
import argparse
from datetime import datetime
from autoagent.agents.meta_agent import tool_editor, agent_editor
from autoagent.tools.meta.edit_tools import list_tools
from autoagent.tools.meta.edit_agents import list_agents
from loop_utils.font_page import MC_LOGO, version_table, NOTES, GOODBYE_LOGO
from rich.live import Live
from autoagent.environment.docker_env import DockerEnv, DockerConfig, check_container_ports
from autoagent.environment.browser_env import BrowserEnv
from autoagent.environment.markdown_browser import RequestsMarkdownBrowser
from evaluation.utils import update_progress, check_port_available, run_evaluation, clean_msg
import os
import os.path as osp
from autoagent.agents import get_system_triage_agent
from autoagent.logger import LoggerManager, MetaChainLogger
from rich.console import Console
from rich.markdown import Markdown
from rich.table import Table
from rich.columns import Columns
from rich.text import Text
from rich.panel import Panel
import re
from autoagent.cli_utils.metachain_meta_agent import meta_agent
from autoagent.cli_utils.metachain_meta_workflow import meta_workflow
from autoagent.cli_utils.file_select import select_and_copy_files
from evaluation.utils import update_progress, check_port_available, run_evaluation, clean_msg
from constant import COMPLETION_MODEL
@click.group()
def cli():
"""The command line interface for autoagent"""
pass
@cli.command()
@click.option('--model', default='gpt-4o-2024-08-06', help='the name of the model')
@click.option('--agent_func', default='get_dummy_agent', help='the function to get the agent')
@click.option('--query', default='...', help='the user query to the agent')
@click.argument('context_variables', nargs=-1)
def agent(model: str, agent_func: str, query: str, context_variables):
"""
Run an agent with a given model, agent function, query, and context variables.
Args:
model (str): The name of the model.
agent_func (str): The function to get the agent.
query (str): The user query to the agent.
context_variables (list): The context variables to pass to the agent.
Usage:
mc agent --model=gpt-4o-2024-08-06 --agent_func=get_weather_agent --query="What is the weather in Tokyo?" city=Tokyo unit=C timestamp=2024-01-01
"""
context_storage = {}
for arg in context_variables:
if '=' in arg:
key, value = arg.split('=', 1)
context_storage[key] = value
agent_module = importlib.import_module(f'autoagent.agents')
try:
agent_func = getattr(agent_module, agent_func)
except AttributeError:
raise ValueError(f'Agent function {agent_func} not found, you shoud check in the `autoagent.agents` directory for the correct function name')
agent = agent_func(model)
mc = MetaChain()
messages = [
{"role": "user", "content": query}
]
response = mc.run(agent, messages, context_storage, debug=True)
debug_print(True, response.messages[-1]['content'], title = f'Result of running {agent.name} agent', color = 'pink3')
return response.messages[-1]['content']
@cli.command()
@click.option('--workflow_name', default=None, help='the name of the workflow')
@click.option('--system_input', default='...', help='the user query to the agent')
def workflow(workflow_name: str, system_input: str):
"""命令行函数的同步包装器"""
return asyncio.run(async_workflow(workflow_name, system_input))
async def async_workflow(workflow_name: str, system_input: str):
"""异步实现的workflow函数"""
workflow_module = importlib.import_module(f'autoagent.workflows')
try:
workflow_func = getattr(workflow_module, workflow_name)
except AttributeError:
raise ValueError(f'Workflow function {workflow_name} not found...')
result = await workflow_func(system_input) # 使用 await 等待异步函数完成
debug_print(True, result, title=f'Result of running {workflow_name} workflow', color='pink3')
return result
def clear_screen():
console = Console()
console.print("[bold green]Coming soon...[/bold green]")
print('\033[u\033[J\033[?25h', end='') # Restore cursor and clear everything after it, show cursor
def get_config(container_name, port):
container_name = container_name
port_info = check_container_ports(container_name)
if port_info:
port = port_info[0]
else:
# while not check_port_available(port):
# port += 1
# 使用文件锁来确保端口分配的原子性
import filelock
lock_file = os.path.join(os.getcwd(), ".port_lock")
lock = filelock.FileLock(lock_file)
with lock:
port = port
while not check_port_available(port):
port += 1
print(f'{port} is not available, trying {port+1}')
# 立即标记该端口为已使用
with open(os.path.join(os.getcwd(), f".port_{port}"), 'w') as f:
f.write(container_name)
local_root = os.path.join(os.getcwd(), f"workspace_meta_showcase", f"showcase_{container_name}")
os.makedirs(local_root, exist_ok=True)
print("port: ", port)
docker_config = DockerConfig(
workplace_name=DOCKER_WORKPLACE_NAME,
container_name=container_name,
communication_port=port,
conda_path='/root/miniconda3',
local_root=local_root,
)
return docker_config
def create_environment(docker_config: DockerConfig):
"""
1. create the code environment
2. create the web environment
3. create the file environment
"""
code_env = DockerEnv(docker_config)
code_env.init_container()
web_env = BrowserEnv(browsergym_eval_env = None, local_root=docker_config.local_root, workplace_name=docker_config.workplace_name)
file_env = RequestsMarkdownBrowser(viewport_size=1024 * 5, local_root=docker_config.local_root, workplace_name=docker_config.workplace_name, downloads_folder=os.path.join(docker_config.local_root, docker_config.workplace_name, "downloads"))
return code_env, web_env, file_env
def update_guidance(context_variables):
console = Console()
# print the logo
logo_text = Text(MC_LOGO, justify="center")
console.print(Panel(logo_text, style="bold salmon1", expand=True))
console.print(version_table)
console.print(Panel(NOTES,title="Important Notes", expand=True))
@cli.command(name='main') # 修改这里,使用连字符
@click.option('--container_name', default='quick_start', help='the function to get the agent')
@click.option('--port', default=12345, help='the port to run the container')
@click.option('--test_pull_name', default='autoagent_mirror', help='the name of the test pull')
@click.option('--git_clone', default=True, help='whether to clone a mirror of the repository')
def main(container_name: str, port: int, test_pull_name: str, git_clone: bool):
"""
Run deep research with a given model, container name, port
"""
print(f"port: {port}")
model = COMPLETION_MODEL
print('\033[s\033[?25l', end='') # Save cursor position and hide cursor
with Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
transient=True # 这会让进度条完成后消失
) as progress:
task = progress.add_task("[cyan]Initializing...", total=None)
progress.update(task, description="[cyan]Initializing config...[/cyan]\n")
docker_config = get_config(container_name, port)
progress.update(task, description="[cyan]Setting up logger...[/cyan]\n")
log_path = osp.join("casestudy_results", 'logs', f'agent_{container_name}_{model}.log')
LoggerManager.set_logger(MetaChainLogger(log_path = None))
progress.update(task, description="[cyan]Creating environment...[/cyan]\n")
code_env, web_env, file_env = create_environment(docker_config)
progress.update(task, description="[cyan]Setting up autoagent...[/cyan]\n")
clear_screen()
context_variables = {"working_dir": docker_config.workplace_name, "code_env": code_env, "web_env": web_env, "file_env": file_env}
# select the mode
while True:
update_guidance(context_variables)
mode = single_select_menu(['user mode', 'agent editor', 'workflow editor', 'exit'], "Please select the mode:")
match mode:
case 'user mode':
clear_screen()
user_mode(model, context_variables, False)
case 'agent editor':
clear_screen()
meta_agent(model, context_variables, False)
case 'workflow editor':
clear_screen()
meta_workflow(model, context_variables, False)
case 'exit':
console = Console()
logo_text = Text(GOODBYE_LOGO, justify="center")
console.print(Panel(logo_text, style="bold salmon1", expand=True))
break
def user_mode(model: str, context_variables: dict, debug: bool = True):
logger = LoggerManager.get_logger()
console = Console()
system_triage_agent = get_system_triage_agent(model)
assert system_triage_agent.agent_teams != {}, "System Triage Agent must have agent teams"
messages = []
agent = system_triage_agent
agents = {system_triage_agent.name.replace(' ', '_'): system_triage_agent}
for agent_name in system_triage_agent.agent_teams.keys():
agents[agent_name.replace(' ', '_')] = system_triage_agent.agent_teams[agent_name]("placeholder").agent
agents["Upload_files"] = "select"
style = Style.from_dict({
'bottom-toolbar': 'bg:#333333 #ffffff',
})
# 创建会话
session = PromptSession(
completer=UserCompleter(agents.keys()),
complete_while_typing=True,
style=style
)
client = MetaChain(log_path=logger)
while True:
# query = ask_text("Tell me what you want to do:")
query = session.prompt(
'Tell me what you want to do (type "exit" to quit): ',
bottom_toolbar=HTML('<b>Prompt:</b> Enter <b>@</b> to mention Agents')
)
if query.strip().lower() == 'exit':
# logger.info('User mode completed. See you next time! :waving_hand:', color='green', title='EXIT')
logo_text = "User mode completed. See you next time! :waving_hand:"
console.print(Panel(logo_text, style="bold salmon1", expand=True))
break
words = query.split()
console.print(f"[bold green]Your request: {query}[/bold green]", end=" ")
for word in words:
if word.startswith('@') and word[1:] in agents.keys():
# print(f"[bold magenta]{word}[bold magenta]", end=' ')
agent = agents[word.replace('@', '')]
else:
# print(word, end=' ')
pass
print()
if hasattr(agent, "name"):
agent_name = agent.name
console.print(f"[bold green][bold magenta]@{agent_name}[/bold magenta] will help you, be patient...[/bold green]")
messages.append({"role": "user", "content": query})
response = client.run(agent, messages, context_variables, debug=debug)
messages.extend(response.messages)
model_answer_raw = response.messages[-1]['content']
# attempt to parse model_answer
if model_answer_raw.startswith('Case resolved'):
model_answer = re.findall(r'<solution>(.*?)</solution>', model_answer_raw, re.DOTALL)
if len(model_answer) == 0:
model_answer = model_answer_raw
else:
model_answer = model_answer[0]
else:
model_answer = model_answer_raw
console.print(f"[bold green][bold magenta]@{agent_name}[/bold magenta] has finished with the response:\n[/bold green] [bold blue]{model_answer}[/bold blue]")
agent = response.agent
elif agent == "select":
code_env: DockerEnv = context_variables["code_env"]
local_workplace = code_env.local_workplace
files_dir = os.path.join(local_workplace, "files")
os.makedirs(files_dir, exist_ok=True)
select_and_copy_files(files_dir, console)
else:
console.print(f"[bold red]Unknown agent: {agent}[/bold red]")
@cli.command(name='deep-research') # 修改这里,使用连字符
@click.option('--container_name', default='deepresearch', help='the function to get the agent')
@click.option('--port', default=12346, help='the port to run the container')
def deep_research(container_name: str, port: int):
"""
Run deep research with a given model, container name, port
"""
print(f"port: {port}")
model = COMPLETION_MODEL
print('\033[s\033[?25l', end='') # Save cursor position and hide cursor
with Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
transient=True # 这会让进度条完成后消失
) as progress:
task = progress.add_task("[cyan]Initializing...", total=None)
progress.update(task, description="[cyan]Initializing config...[/cyan]\n")
docker_config = get_config(container_name, port)
progress.update(task, description="[cyan]Setting up logger...[/cyan]\n")
log_path = osp.join("casestudy_results", 'logs', f'agent_{container_name}_{model}.log')
LoggerManager.set_logger(MetaChainLogger(log_path = None))
progress.update(task, description="[cyan]Creating environment...[/cyan]\n")
code_env, web_env, file_env = create_environment(docker_config)
progress.update(task, description="[cyan]Setting up autoagent...[/cyan]\n")
clear_screen()
context_variables = {"working_dir": docker_config.workplace_name, "code_env": code_env, "web_env": web_env, "file_env": file_env}
update_guidance(context_variables)
logger = LoggerManager.get_logger()
console = Console()
system_triage_agent = get_system_triage_agent(model)
assert system_triage_agent.agent_teams != {}, "System Triage Agent must have agent teams"
messages = []
agent = system_triage_agent
agents = {system_triage_agent.name.replace(' ', '_'): system_triage_agent}
for agent_name in system_triage_agent.agent_teams.keys():
agents[agent_name.replace(' ', '_')] = system_triage_agent.agent_teams[agent_name]("placeholder").agent
agents["Upload_files"] = "select"
style = Style.from_dict({
'bottom-toolbar': 'bg:#333333 #ffffff',
})
# 创建会话
session = PromptSession(
completer=UserCompleter(agents.keys()),
complete_while_typing=True,
style=style
)
client = MetaChain(log_path=logger)
while True:
# query = ask_text("Tell me what you want to do:")
query = session.prompt(
'Tell me what you want to do (type "exit" to quit): ',
bottom_toolbar=HTML('<b>Prompt:</b> Enter <b>@</b> to mention Agents')
)
if query.strip().lower() == 'exit':
# logger.info('User mode completed. See you next time! :waving_hand:', color='green', title='EXIT')
logo_text = "See you next time! :waving_hand:"
console.print(Panel(logo_text, style="bold salmon1", expand=True))
break
words = query.split()
console.print(f"[bold green]Your request: {query}[/bold green]", end=" ")
for word in words:
if word.startswith('@') and word[1:] in agents.keys():
# print(f"[bold magenta]{word}[bold magenta]", end=' ')
agent = agents[word.replace('@', '')]
else:
# print(word, end=' ')
pass
print()
if hasattr(agent, "name"):
agent_name = agent.name
console.print(f"[bold green][bold magenta]@{agent_name}[/bold magenta] will help you, be patient...[/bold green]")
messages.append({"role": "user", "content": query})
response = client.run(agent, messages, context_variables, debug=False)
messages.extend(response.messages)
model_answer_raw = response.messages[-1]['content']
# attempt to parse model_answer
if model_answer_raw.startswith('Case resolved'):
model_answer = re.findall(r'<solution>(.*?)</solution>', model_answer_raw, re.DOTALL)
if len(model_answer) == 0:
model_answer = model_answer_raw
else:
model_answer = model_answer[0]
else:
model_answer = model_answer_raw
console.print(f"[bold green][bold magenta]@{agent_name}[/bold magenta] has finished with the response:\n[/bold green] [bold blue]{model_answer}[/bold blue]")
agent = response.agent
elif agent == "select":
code_env: DockerEnv = context_variables["code_env"]
local_workplace = code_env.local_workplace
files_dir = os.path.join(local_workplace, "files")
os.makedirs(files_dir, exist_ok=True)
select_and_copy_files(files_dir, console)
else:
console.print(f"[bold red]Unknown agent: {agent}[/bold red]")

View File

@@ -1,16 +1,16 @@
from metachain import MetaChain
from metachain.util import UserCompleter
from autoagent import MetaChain
from autoagent.util import UserCompleter
from prompt_toolkit import PromptSession
from prompt_toolkit.formatted_text import HTML
from prompt_toolkit.styles import Style
from metachain.logger import LoggerManager, MetaChainLogger
from autoagent.logger import LoggerManager, MetaChainLogger
from rich.console import Console
from rich.panel import Panel
from metachain.agents.meta_agent.agent_former import get_agent_former_agent
from metachain.agents.meta_agent.tool_editor import get_tool_editor_agent
from metachain.agents.meta_agent.agent_creator import get_agent_creator_agent
from autoagent.agents.meta_agent.agent_former import get_agent_former_agent
from autoagent.agents.meta_agent.tool_editor import get_tool_editor_agent
from autoagent.agents.meta_agent.agent_creator import get_agent_creator_agent
import re
from metachain.agents.meta_agent.form_complie import parse_agent_form
from autoagent.agents.meta_agent.form_complie import parse_agent_form
def extract_agents_content(text):

View File

@@ -1,15 +1,15 @@
from metachain import MetaChain
from metachain.util import ask_text, single_select_menu, print_markdown, debug_print, UserCompleter
from autoagent import MetaChain
from autoagent.util import ask_text, single_select_menu, print_markdown, debug_print, UserCompleter
from prompt_toolkit import PromptSession
from prompt_toolkit.formatted_text import HTML
from prompt_toolkit.styles import Style
from metachain.logger import LoggerManager, MetaChainLogger
from autoagent.logger import LoggerManager, MetaChainLogger
from rich.console import Console
from rich.panel import Panel
from metachain.agents.meta_agent.workflow_former import get_workflow_former_agent
from metachain.agents.meta_agent.workflow_creator import get_workflow_creator_agent
from autoagent.agents.meta_agent.workflow_former import get_workflow_former_agent
from autoagent.agents.meta_agent.workflow_creator import get_workflow_creator_agent
import re
from metachain.agents.meta_agent.worklow_form_complie import parse_workflow_form, WorkflowForm
from autoagent.agents.meta_agent.worklow_form_complie import parse_workflow_form, WorkflowForm
def workflow_profiling(workflow_former, client, messages, context_variables, requirements, debug):
messages.append({"role": "user", "content": requirements + """

View File

@@ -31,7 +31,7 @@ from openai import AsyncOpenAI
import litellm
import inspect
from constant import MC_MODE, FN_CALL, API_BASE_URL, NOT_SUPPORT_SENDER, ADD_USER, NON_FN_CALL
from metachain.fn_call_converter import convert_tools_to_description, convert_non_fncall_messages_to_fncall_messages, SYSTEM_PROMPT_SUFFIX_TEMPLATE, convert_fn_messages_to_non_fn_messages, interleave_user_into_messages
from autoagent.fn_call_converter import convert_tools_to_description, convert_non_fncall_messages_to_fncall_messages, SYSTEM_PROMPT_SUFFIX_TEMPLATE, convert_fn_messages_to_non_fn_messages, interleave_user_into_messages
from litellm.types.utils import Message as litellmMessage
# litellm.set_verbose=True
# client = AsyncOpenAI()

View File

@@ -13,8 +13,8 @@ import numpy as np
import tenacity
from browsergym.utils.obs import flatten_dom_to_str
from PIL import Image
from metachain.util import debug_print
from metachain.logger import LoggerManager
from autoagent.util import debug_print
from autoagent.logger import LoggerManager
import inspect
import textwrap
@@ -26,10 +26,10 @@ from browsergym.core.action.functions import goto, page, get_elem_by_bid, demo_m
import os
from typing import Dict, Union, cast, Literal
from playwright.sync_api import Page, Download
from metachain.io_utils import read_file
from metachain.environment.mdconvert import _get_page_markdown
from metachain.environment.browser_cookies import convert_cookies_to_python
from metachain.environment.cookies_data import COOKIES_LIST
from autoagent.io_utils import read_file
from autoagent.environment.mdconvert import _get_page_markdown
from autoagent.environment.browser_cookies import convert_cookies_to_python
from autoagent.environment.cookies_data import COOKIES_LIST
# from constant import DOCKER_WORKPLACE_NAME, LOCAL_ROOT
from functools import update_wrapper
from inspect import signature
@@ -50,7 +50,7 @@ def _local_to_docker(local_path: str):
docker_path: the docker path to convert, like `{docker_workplace}/downloads/xxx`
Examples:
_local_to_docker('/Users/tangjiabin/Documents/reasoning/metachain/workplace_gaia_eval/downloads/xxx')
_local_to_docker('/Users/tangjiabin/Documents/reasoning/autoagent/workplace_gaia_eval/downloads/xxx')
"""
local_workplace = None
docker_workplace = None
@@ -426,7 +426,7 @@ class BrowserEnv:
docker_path: the docker path to convert, like `{docker_workplace}/downloads/xxx`
Examples:
_local_to_docker('/Users/tangjiabin/Documents/reasoning/metachain/workplace_gaia_eval/downloads/xxx')
_local_to_docker('/Users/tangjiabin/Documents/reasoning/autoagent/workplace_gaia_eval/downloads/xxx')
"""
local_workplace = None
docker_workplace = None
@@ -440,7 +440,7 @@ class BrowserEnv:
action_set = HighLevelActionSet(subsets = ["chat", "infeas", "bid", "nav", "tab", "custom"], custom_actions = [_visit_page, _click_id, _get_page_markdown, _checkMeetChallenge])
# action_set.python_includes = \
# f"""
# {repr(read_file('metachain/environment/markdown_browser/mdconvert.py'))}
# {repr(read_file('autoagent/environment/markdown_browser/mdconvert.py'))}
# """ + action_set.python_includes

View File

@@ -16,7 +16,7 @@ We recommend you to use the Google Chrome browser with the extension "Export coo
3. After you have exported all cookies, use the following command to convert them to python code:
```bash
cd path/to/MetaChain && python metachain/environment/browser_cookies.py
cd path/to/MetaChain && python autoagent/environment/browser_cookies.py
```
## Recommended websites

View File

@@ -6,7 +6,7 @@ import platform
class LocalEnv:
def __init__(self):
self.docker_workplace = os.getcwd()
if self.docker_workplace.endswith("metachain"):
if self.docker_workplace.endswith("autoagent"):
self.docker_workplace = os.path.dirname(self.docker_workplace)
self.local_workplace = self.docker_workplace
self.conda_sh = self._find_conda_sh()

View File

@@ -1,6 +1,6 @@
from browsergym.core.action.functions import goto, page
from metachain.environment.markdown_browser import MarkdownConverter
from autoagent.environment.markdown_browser import MarkdownConverter
def _get_page_markdown():
"""
Get the markdown content of the current page
@@ -1228,6 +1228,6 @@ if __name__ == "__main__":
# 关闭浏览器
browser.close()
# mdconvert = MarkdownConverter()
# res = mdconvert.convert_local("/Users/tangjiabin/Documents/reasoning/metachain/eval_data/GAIA/2023/validation/2b3ef98c-cc05-450b-a719-711aee40ac65.mp3")
# res = mdconvert.convert_local("/Users/tangjiabin/Documents/reasoning/autoagent/eval_data/GAIA/2023/validation/2b3ef98c-cc05-450b-a719-711aee40ac65.mp3")
# print('title:', res.title)
# print('content:', res.text_content)

View File

@@ -1,16 +1,16 @@
from metachain.util import run_command_in_container
from autoagent.util import run_command_in_container
from .docker_env import DockerEnv
from metachain.io_utils import print_stream
from autoagent.io_utils import print_stream
def setup_metachain(workplace_name: str, env: DockerEnv):
cmd = "pip list | grep metachain"
cmd = "pip list | grep autoagent"
response = env.run_command(cmd, print_stream)
if response['status'] == 0:
print("Metachain is already installed.")
print("AutoAgent is already installed.")
return
cmd = f"cd /{workplace_name}/MetaChain && pip install -e ."
response = env.run_command(cmd, print_stream)
if response['status'] == 0:
print("Metachain is installed.")
print("AutoAgent is installed.")
return
else:
raise Exception(f"Failed to install metachain. {response['result']}")
raise Exception(f"Failed to install autoagent. {response['result']}")

View File

@@ -3,7 +3,7 @@ from rich.console import Console
from rich.markup import escape
import json
from typing import List
from constant import DEBUG, DEFAULT_LOG, LOG_PATH
from constant import DEBUG, DEFAULT_LOG, LOG_PATH, MC_MODE
from pathlib import Path
BAR_LENGTH = 60
class MetaChainLogger:
@@ -31,12 +31,15 @@ class MetaChainLogger:
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
message = "\n".join(map(str, args))
color = kwargs.get("color", "white")
if MC_MODE: color = "grey58"
title = kwargs.get("title", "INFO")
log_str = f"[{timestamp}]\n{message}"
if self.debug:
# print_in_box(log_str, color=color, title=title)
self.console.print(self._wrap_title(title, f"bold {color}"))
self.console.print(escape(log_str), highlight=True, emoji=True)
print_str = escape(log_str)
if MC_MODE: print_str = f"[grey58]{print_str}[/grey58]"
self.console.print(print_str, highlight=True, emoji=True)
log_str = self._wrap_title(title) + "\n" + log_str
if self.log_path: self._write_log(log_str)
def lprint(self, *args: str, **kwargs: dict):
@@ -44,42 +47,61 @@ class MetaChainLogger:
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
message = "\n".join(map(str, args))
color = kwargs.get("color", "white")
if MC_MODE: color = "grey58"
title = kwargs.get("title", "")
log_str = f"[{timestamp}]\n{message}"
# print_in_box(log_str, color=color, title=title)
self.console.print(self._wrap_title(title, f"bold {color}"))
self.console.print(escape(log_str), highlight=True, emoji=True)
print_str = escape(log_str)
if MC_MODE: print_str = f"[grey58]{print_str}[/grey58]"
self.console.print(print_str, highlight=True, emoji=True)
def _wrap_timestamp(self, timestamp: str, color: bool = True):
color_bos = "[grey58]" if color else ""
color_eos = "[/grey58]" if color else ""
return f"{color_bos}[{timestamp}]{color_eos}"
def _print_tool_execution(self, message, timestamp: str):
self.console.print(self._wrap_title("Tool Execution", "bold pink3"))
if MC_MODE: colors = ["grey58"] * 3
else: colors = ["pink3", "blue", "purple"]
self.console.print(self._wrap_title("Tool Execution", f"bold {colors[0]}"))
self.console.print(self._wrap_timestamp(timestamp, color=True))
self.console.print("[bold blue]Tool Execution:[/bold blue]", end=" ")
self.console.print(f"[bold purple]{message['name']}[/bold purple]\n[bold blue]Result:[/bold blue]")
self.console.print(f"---\n{escape(message['content'])}\n---")
self.console.print(f"[bold {colors[1]}]Tool Execution:[/bold {colors[1]}]", end=" ")
self.console.print(f"[bold {colors[2]}]{message['name']}[/bold {colors[2]}]\n[bold {colors[1]}]Result:[/bold {colors[1]}]")
print_str = f"---\n{escape(message['content'])}\n---"
if MC_MODE: print_str = f"[grey58]{print_str}[/grey58]"
self.console.print(print_str, highlight=True, emoji=True)
def _save_tool_execution(self, message, timestamp: str):
self._write_log(self._wrap_title("Tool Execution"))
self._write_log(f"{self._wrap_timestamp(timestamp, color=False)}\ntool execution: {message['name']}\nResult:\n---\n{message['content']}\n---")
def _print_assistant_message(self, message, timestamp: str):
self.console.print(self._wrap_title("Assistant Message", "bold light_salmon3"))
self.console.print(f"{self._wrap_timestamp(timestamp, color=True)}\n[bold blue]{message['sender']}[/bold blue]:", end=" ")
if message["content"]: self.console.print(escape(message["content"]), highlight=True, emoji=True)
else: self.console.print(None, highlight=True, emoji=True)
if MC_MODE: colors = ["grey58"] * 3
else: colors = ["light_salmon3", "blue", "purple"]
self.console.print(self._wrap_title("Assistant Message", f"bold {colors[0]}"))
self.console.print(f"{self._wrap_timestamp(timestamp, color=True)}\n[bold {colors[1]}]{message['sender']}[/bold {colors[1]}]:", end=" ")
if message["content"]:
print_str = escape(message["content"])
if MC_MODE: print_str = f"[grey58]{print_str}[/grey58]"
self.console.print(print_str, highlight=True, emoji=True)
else:
print_str = None
if MC_MODE: print_str = "[grey58]None[/grey58]"
self.console.print(print_str, highlight=True, emoji=True)
def _save_assistant_message(self, message, timestamp: str):
self._write_log(self._wrap_title("Assistant Message"))
content = message["content"] if message["content"] else None
self._write_log(f"{self._wrap_timestamp(timestamp, color=False)}\n{message['sender']}: {content}")
def _print_tool_call(self, tool_calls: List, timestamp: str):
if len(tool_calls) >= 1: self.console.print(self._wrap_title("Tool Calls", "bold light_pink1"))
if MC_MODE: colors = ["grey58"] * 3
else: colors = ["light_pink1", "blue", "purple"]
if len(tool_calls) >= 1: self.console.print(self._wrap_title("Tool Calls", f"bold {colors[0]}"))
for tool_call in tool_calls:
f = tool_call["function"]
name, args = f["name"], f["arguments"]
arg_str = self._warp_args(args)
self.console.print(f"{self._wrap_timestamp(timestamp, color=True)}\n[bold purple]{name}[/bold purple]({escape(arg_str)})")
print_arg_str = escape(arg_str)
if MC_MODE: print_arg_str = f"[grey58]{print_arg_str}[/grey58]"
self.console.print(f"{self._wrap_timestamp(timestamp, color=True)}\n[bold {colors[2]}]{name}[/bold {colors[2]}]({print_arg_str})")
def _save_tool_call(self, tool_calls: List, timestamp: str):
if len(tool_calls) >= 1: self._write_log(self._wrap_title("Tool Calls"))

View File

@@ -1,8 +1,8 @@
from metachain import MetaChain, Agent, Response
from autoagent import MetaChain, Agent, Response
from typing import List
from metachain.logger import MetaChainLogger
from metachain.environment.utils import setup_metachain
from metachain.environment.docker_env import DockerConfig, DockerEnv
from autoagent.logger import MetaChainLogger
from autoagent.environment.utils import setup_metachain
from autoagent.environment.docker_env import DockerConfig, DockerEnv
def case_resolved(result: str):
"""

View File

@@ -1,6 +1,6 @@
import os
from typing import List, Dict
from metachain.memory.rag_memory import Memory, Reranker
from autoagent.memory.rag_memory import Memory, Reranker
from litellm import completion
import re

View File

@@ -1,9 +1,9 @@
import os
from typing import List, Dict
from metachain.memory.rag_memory import Memory, Reranker
from autoagent.memory.rag_memory import Memory, Reranker
import openai
import re
from metachain.memory.code_tree.code_parser import CodeParser, to_dataframe_row
from autoagent.memory.code_tree.code_parser import CodeParser, to_dataframe_row
from tree_sitter import Language
from loguru import logger
from openai import OpenAI

View File

@@ -1,11 +1,11 @@
import pandas as pd
from typing import List, Dict
from metachain.memory.rag_memory import Memory, Reranker
from autoagent.memory.rag_memory import Memory, Reranker
import json
import math
import os
from litellm import completion
from metachain.memory.utils import chunking_by_token_size
from autoagent.memory.utils import chunking_by_token_size
class TextMemory(Memory):
def __init__(

View File

@@ -1,6 +1,6 @@
import pandas as pd
from typing import List, Dict
from metachain.memory.rag_memory import Memory, Reranker
from autoagent.memory.rag_memory import Memory, Reranker
import json
import math
import os

View File

@@ -1,6 +1,6 @@
import json
from metachain.core import MetaChain
from autoagent.core import MetaChain
def process_and_print_streaming_response(response):

View File

@@ -2,9 +2,9 @@ from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from contextlib import asynccontextmanager
from typing import Dict, Any, Optional, List
from metachain.registry import registry
from metachain import MetaChain
from metachain.types import Agent, Response
from autoagent.registry import registry
from autoagent import MetaChain
from autoagent.types import Agent, Response
import importlib
import inspect

View File

@@ -11,14 +11,14 @@
# import os
# import importlib
# from metachain.registry import registry
# from autoagent.registry import registry
# # 获取当前目录下的所有 .py 文件
# current_dir = os.path.dirname(__file__)
# for file in os.listdir(current_dir):
# if file.endswith('.py') and not file.startswith('__'):
# module_name = file[:-3]
# importlib.import_module(f'metachain.tools.{module_name}')
# importlib.import_module(f'autoagent.tools.{module_name}')
# # 导出所有注册的工具
# globals().update(registry.tools)
@@ -27,7 +27,7 @@
import os
import importlib
from metachain.registry import registry
from autoagent.registry import registry
def import_tools_recursively(base_dir: str, base_package: str):
"""Recursively import all tools in .py files
@@ -58,7 +58,7 @@ def import_tools_recursively(base_dir: str, base_package: str):
# get the current directory and import all tools
current_dir = os.path.dirname(__file__)
import_tools_recursively(current_dir, 'metachain.tools')
import_tools_recursively(current_dir, 'autoagent.tools')
# export all tool creation functions
globals().update(registry.tools)

View File

@@ -1,7 +1,7 @@
import requests
from typing import Optional, List, Dict
from metachain.tools.github_client import GitHubSearcher
from metachain.registry import register_tool
from autoagent.tools.github_client import GitHubSearcher
from autoagent.registry import register_tool
from constant import GITHUB_AI_TOKEN
import json
@register_tool("search_github_repos")

View File

@@ -1,11 +1,11 @@
from metachain.types import Result, Agent
from autoagent.types import Result, Agent
from typing import Union
from metachain.registry import register_plugin_tool
from autoagent.registry import register_plugin_tool
@register_plugin_tool("tool_dummy") # the name of registry should be the same as the name of the tool
def tool_dummy(args1, args2, context_variables)-> Union[str, Agent, Result]:
"""
[This is a dummy tool, it's used for demonstrating the usage of the metachain, you should write your own tool instead of using this dummy tool, and the following docstring is just a template, you should modify it to fit your own tool.]
[This is a dummy tool, it's used for demonstrating the usage of the autoagent, you should write your own tool instead of using this dummy tool, and the following docstring is just a template, you should modify it to fit your own tool.]
The tool is used to ...
Args:
@@ -34,7 +34,7 @@ def tool_dummy(args1, args2, context_variables)-> Union[str, Agent, Result]:
4. The value of Result class should be a string, if you want to return a json dictionary, you should convert it to string by json.dumps(result)
```
5. The tools should be created in the python file in the `metachain/metachain/tools` folder.
5. The tools should be created in the python file in the `autoagent/autoagent/tools` folder.
"""
... # the implementation of the tool
return "..." # the return value of the tool.

View File

@@ -1,13 +1,13 @@
from metachain.environment.markdown_browser import RequestsMarkdownBrowser
from metachain.environment import LocalEnv
from autoagent.environment.markdown_browser import RequestsMarkdownBrowser
from autoagent.environment import LocalEnv
from functools import partial, update_wrapper
from inspect import signature
from typing import Tuple
import time
from metachain.registry import register_tool, register_plugin_tool
from autoagent.registry import register_tool, register_plugin_tool
from typing import Union, Optional
from typing import Optional
from metachain.types import Result
from autoagent.types import Result
import requests
import mimetypes
import base64
@@ -342,5 +342,5 @@ if __name__ == "__main__":
# print(find_on_page_ctrl_f(env, "Chain-of-Thought"))
# print("Find next", "~"*100)
# print(find_next(env))
print(visual_question_answering(context_variables = {"file_env": env}, file_path = "//workplace_gaia_meta/metachain/downloaded_video.mp4", question="What is the highest number of bird species to be on camera simultaneously?").value)
print(visual_question_answering(context_variables = {"file_env": env}, file_path = "//workplace_gaia_meta/autoagent/downloaded_video.mp4", question="What is the highest number of bird species to be on camera simultaneously?").value)
# print(visual_question_answering("/workplace_meta/downloads/workflow.png", "What is the main idea of this paper?").image)

View File

@@ -1,17 +1,17 @@
# from metachain.util import run_command_in_container
from metachain.environment import DockerEnv, LocalEnv
# from autoagent.util import run_command_in_container
from autoagent.environment import DockerEnv, LocalEnv
from constant import GITHUB_AI_TOKEN
from metachain.tools.github_client import GitHubClient
from autoagent.tools.github_client import GitHubClient
import json
from metachain.registry import register_tool
from autoagent.registry import register_tool
from typing import Union
@register_tool("get_current_branch")
def get_current_branch(context_variables):
f"""
Get the current branch of the 'metachain'.
Get the current branch of the 'autoagent'.
"""
env: Union[DockerEnv, LocalEnv] = context_variables.get("code_env", LocalEnv())
branch_command = f"cd {env.docker_workplace}/metachain && git branch --show-current"
branch_command = f"cd {env.docker_workplace}/autoagent && git branch --show-current"
result = env.run_command(branch_command)
if result['status'] == 0:
return result['result'].strip()
@@ -21,10 +21,10 @@ def get_current_branch(context_variables):
@register_tool("get_diff")
def get_diff(context_variables):
f"""
Get the diff of the 'metachain'.
Get the diff of the 'autoagent'.
"""
env: Union[DockerEnv, LocalEnv] = context_variables.get("code_env", LocalEnv())
diff_command = f"cd {env.docker_workplace}/metachain && git add -N . && git diff"
diff_command = f"cd {env.docker_workplace}/autoagent && git add -N . && git diff"
result = env.run_command(diff_command)
if result['status'] == 0:
return result['result'].strip()
@@ -45,11 +45,11 @@ def stage_files(context_variables, file_paths=None):
env: Union[DockerEnv, LocalEnv] = context_variables.get("code_env", LocalEnv())
if file_paths is None:
# add all changes to the staging area
command = f"cd {env.docker_workplace}/metachain && git add ."
command = f"cd {env.docker_workplace}/autoagent && git add ."
else:
# add specified files to the staging area
files = ' '.join(file_paths)
command = f"cd {env.docker_workplace}/metachain && git add {files}"
command = f"cd {env.docker_workplace}/autoagent && git add {files}"
result = env.run_command(command)
return result
@@ -74,7 +74,7 @@ def push_changes(context_variables, commit_message, file_paths=None):
return json.dumps({'status': 'error', 'message': f"Failed to stage files: {stage_result['result']}"}, indent=4)
commands = [
f"cd {env.docker_workplace}/metachain",
f"cd {env.docker_workplace}/autoagent",
f'git commit -m "{commit_message}"',
"git push origin $(git branch --show-current)"
]
@@ -107,7 +107,7 @@ def submit_pull_request(title: str, body: str, target_branch: str):
# create a pull request
pr_result = github.create_pull_request(
repo="tjb-tech/metachain",
repo="tjb-tech/autoagent",
title=title,
body=body,
head=get_current_branch(),
@@ -132,7 +132,7 @@ def submit_pull_request(title: str, body: str, target_branch: str):
# """
# # use gh to create a PR. make sure the gh cli is installed in the container and the github token is set
# pr_command = f"""cd /{DOCKER_WORKPLACE_NAME}/metachain && \
# pr_command = f"""cd /{DOCKER_WORKPLACE_NAME}/autoagent && \
# gh pr create \
# --title "{title}" \
# --body "{body}" \

View File

@@ -1,4 +1,4 @@
from metachain.registry import register_tool
from autoagent.registry import register_tool
@register_tool("case_resolved")
def case_resolved(result: str):

View File

@@ -1,7 +1,7 @@
from metachain.registry import registry
from metachain.environment import LocalEnv, DockerEnv, DockerConfig
from autoagent.registry import registry
from autoagent.environment import LocalEnv, DockerEnv, DockerConfig
from typing import Union
from metachain.tools.terminal_tools import (
from autoagent.tools.terminal_tools import (
create_file,
create_directory,
execute_command,
@@ -9,9 +9,9 @@ from metachain.tools.terminal_tools import (
print_stream,
process_terminal_response
)
from metachain.registry import register_tool
from autoagent.registry import register_tool
import json
from metachain.tools.meta.edit_tools import get_metachain_path
from autoagent.tools.meta.edit_tools import get_metachain_path
from string import Formatter
from pydantic import BaseModel
import subprocess
@@ -30,7 +30,7 @@ def list_agents(context_variables):
path = get_metachain_path(env)
except Exception as e:
return "[ERROR] Failed to list agents. Error: " + str(e)
python_code = '"from metachain.registry import registry; import json; print(\\"AGENT_LIST_START\\"); print(json.dumps(registry.display_plugin_agents_info, indent=4)); print(\\"AGENT_LIST_END\\")"'
python_code = '"from autoagent.registry import registry; import json; print(\\"AGENT_LIST_START\\"); print(json.dumps(registry.display_plugin_agents_info, indent=4)); print(\\"AGENT_LIST_END\\")"'
list_agents_cmd = f"cd {path} && DEFAULT_LOG=False python -c {python_code}"
result = env.run_command(list_agents_cmd)
if result['status'] != 0:
@@ -175,10 +175,10 @@ def create_agent(agent_name: str, agent_description: str, agent_tools: list[str]
except Exception as e:
return "[ERROR] Failed to list agents. Error: " + str(e)
agents_dir = path + "/metachain/agents"
agents_dir = path + "/autoagent/agents"
for tool in agent_tools:
tools_str += f"from metachain.tools import {tool}\n"
tools_str += f"from autoagent.tools import {tool}\n"
agent_func = f"get_{agent_name.lower().replace(' ', '_')}"
if has_format_keys(agent_instructions):
format_keys = extract_format_keys(agent_instructions)
@@ -195,9 +195,9 @@ def instructions(context_variables):
tool_list = "[{}]".format(', '.join(f'{tool}' for tool in agent_tools))
create_codes = f"""\
from metachain.types import Agent
from autoagent.types import Agent
{tools_str}
from metachain.registry import register_plugin_agent
from autoagent.registry import register_plugin_agent
@register_plugin_agent(name="{agent_name}", func_name="{agent_func}")
def {agent_func}(model: str):
@@ -214,13 +214,13 @@ def {agent_func}(model: str):
"""
# print(create_codes)
# with open(f"metachain/agents/{agent_name.lower().replace(' ', '_')}.py", "w", encoding="utf-8") as f:
# with open(f"autoagent/agents/{agent_name.lower().replace(' ', '_')}.py", "w", encoding="utf-8") as f:
# f.write(create_codes)
try:
msg = create_file(agents_dir + "/" + agent_name.lower().replace(' ', '_') + ".py", create_codes, context_variables)
if msg.startswith("Error creating file:"):
return "[ERROR] Failed to create agent. Error: " + msg
result = code_env.run_command('cd {} && python metachain/agents/{}.py'.format(path, agent_name.lower().replace(' ', '_')))
result = code_env.run_command('cd {} && python autoagent/agents/{}.py'.format(path, agent_name.lower().replace(' ', '_')))
if result['status'] != 0:
return "[ERROR] Failed to create agent. Error: " + result['result']
return "Successfully created agent: " + agent_name + " in " + agents_dir + "/" + agent_name.lower().replace(' ', '_') + ".py"
@@ -252,7 +252,7 @@ def create_orchestrator_agent(agent_name: str, agent_description: str, sub_agent
except Exception as e:
return "[ERROR] Failed to list agents. Error: " + str(e)
agents_dir = path + "/metachain/agents"
agents_dir = path + "/autoagent/agents"
agent_list = list_agents(context_variables)
if agent_list.startswith("[ERROR]"):
return "Failed to list agents. Error: " + agent_list
@@ -261,7 +261,7 @@ def create_orchestrator_agent(agent_name: str, agent_description: str, sub_agent
import_agent_str = ""
for ainfo in sub_agent_info:
import_agent_str += f"""
from metachain.agents import {ainfo['func_name']}
from autoagent.agents import {ainfo['func_name']}
"""
if has_format_keys(agent_instructions):
format_keys = extract_format_keys(agent_instructions)
@@ -320,9 +320,9 @@ def instructions(context_variables):
create_codes = f"""\
from metachain.types import Agent
from metachain.registry import register_plugin_agent
from metachain.types import Result
from autoagent.types import Agent
from autoagent.registry import register_plugin_agent
from autoagent.types import Result
@register_plugin_agent(name = "{agent_name}", func_name="{agent_func}")
def {agent_func}(model: str):
@@ -341,13 +341,13 @@ def {agent_func}(model: str):
return {agent_name.lower().replace(' ', '_')}
"""
# print(create_codes)
# with open(f"metachain/agents/{agent_name.lower().replace(' ', '_')}.py", "w", encoding="utf-8") as f:
# with open(f"autoagent/agents/{agent_name.lower().replace(' ', '_')}.py", "w", encoding="utf-8") as f:
# f.write(create_codes)
try:
msg = create_file(agents_dir + "/" + agent_name.lower().replace(' ', '_') + ".py", create_codes, context_variables)
if msg.startswith("Error creating file:"):
return "[ERROR] Failed to create agent. Error: " + msg
result = code_env.run_command('cd {} && python metachain/agents/{}.py'.format(path, agent_name.lower().replace(' ', '_')))
result = code_env.run_command('cd {} && python autoagent/agents/{}.py'.format(path, agent_name.lower().replace(' ', '_')))
if result['status'] != 0:
return "[ERROR] Failed to create agent. Error: " + result['result']
return "Successfully created agent: " + agent_name + " in " + agents_dir + "/" + agent_name.lower().replace(' ', '_') + ".py"
@@ -380,7 +380,7 @@ def read_agent(agent_name: str, context_variables: dict):
if __name__ == "__main__":
# # print(list_agents({}))
# from litellm import completion
# from metachain.util import function_to_json
# from autoagent.util import function_to_json
# tools = [function_to_json(create_agent)]
# messages = [
# {"role": "system", "content": "You are a helpful assistant."},
@@ -427,7 +427,7 @@ if __name__ == "__main__":
workplace_name = "workplace",
communication_port = 12350,
conda_path = "/root/miniconda3",
local_root = "/Users/tangjiabin/Documents/reasoning/metachain/workspace_meta_showcase/showcase_nl2agent_showcase"
local_root = "/Users/tangjiabin/Documents/reasoning/autoagent/workspace_meta_showcase/showcase_nl2agent_showcase"
)
code_env = DockerEnv(docker_cfg)
context_variables = {"code_env": code_env}

View File

@@ -1,7 +1,7 @@
from metachain.registry import registry
from metachain.environment import LocalEnv, DockerEnv
from autoagent.registry import registry
from autoagent.environment import LocalEnv, DockerEnv
from typing import Union
from metachain.tools.terminal_tools import (
from autoagent.tools.terminal_tools import (
create_file,
create_directory,
run_python,
@@ -11,10 +11,10 @@ from metachain.tools.terminal_tools import (
terminal_page_to,
process_terminal_response
)
from metachain.registry import register_tool
from autoagent.registry import register_tool
import json
def get_metachain_path(env: Union[LocalEnv, DockerEnv]) -> str:
result = env.run_command('pip show metachain')
result = env.run_command('pip show autoagent')
if result['status'] != 0:
raise Exception("Failed to list tools. Error: " + result['result'])
stdout = result['result']
@@ -26,7 +26,7 @@ def get_metachain_path(env: Union[LocalEnv, DockerEnv]) -> str:
def protect_tools(tool_name: str):
if tool_name in registry.tools_info.keys():
raise Exception(f"The tool `{tool_name}` can NOT be modified. You can DIRECTLY use the `{tool_name}` tool by USING the `run_tool` tool. Or you can create a new tool using this tool by `from metachain.tools import {tool_name}`.")
raise Exception(f"The tool `{tool_name}` can NOT be modified. You can DIRECTLY use the `{tool_name}` tool by USING the `run_tool` tool. Or you can create a new tool using this tool by `from autoagent.tools import {tool_name}`.")
@register_tool("list_tools")
@@ -41,7 +41,7 @@ def list_tools(context_variables):
path = get_metachain_path(env)
except Exception as e:
return "Failed to list tools. Error: " + str(e)
python_code = '"from metachain.registry import registry; import json; print(\\"TOOL_LIST_START\\"); print(json.dumps(registry.display_plugin_tools_info, indent=4)); print(\\"TOOL_LIST_END\\")"'
python_code = '"from autoagent.registry import registry; import json; print(\\"TOOL_LIST_START\\"); print(json.dumps(registry.display_plugin_tools_info, indent=4)); print(\\"TOOL_LIST_END\\")"'
list_tools_cmd = f"cd {path} && DEFAULT_LOG=False python -c {python_code}"
result = env.run_command(list_tools_cmd)
if result['status'] != 0:
@@ -88,11 +88,11 @@ def create_tool(tool_name: str, tool_code: str, context_variables):
except Exception as e:
return "[ERROR] Failed to list tools. Error: " + str(e)
tools_dir = path + "/metachain/tools"
tools_dir = path + "/autoagent/tools"
try:
tool_path = tools_dir + "/" + tool_name + ".py"
if "from metachain.registry import register_plugin_tool" not in tool_code:
tool_code = "from metachain.registry import register_plugin_tool\n" + tool_code
if "from autoagent.registry import register_plugin_tool" not in tool_code:
tool_code = "from autoagent.registry import register_plugin_tool\n" + tool_code
msg = create_file(tool_path, tool_code, context_variables)
if msg.startswith("Error creating file:"):
@@ -215,7 +215,7 @@ if __name__ == "__main__":
# print(list_tools({}))
# print(create_tool("visual_question_answering", "print('Hello, World!')", {}))
test_code = """
from metachain.tools import test_file_tools
from autoagent.tools import test_file_tools
print(test_file_tools())
"""
print(run_tool("test_file_tools", test_code, {}))

View File

@@ -1,12 +1,12 @@
from typing import Union
from metachain.environment import LocalEnv, DockerEnv
from metachain.tools.meta.edit_tools import get_metachain_path
from metachain.tools.meta.edit_agents import list_agents
from metachain.tools.terminal_tools import create_file, create_directory, print_stream, process_terminal_response
from metachain.registry import register_tool
from autoagent.environment import LocalEnv, DockerEnv
from autoagent.tools.meta.edit_tools import get_metachain_path
from autoagent.tools.meta.edit_agents import list_agents
from autoagent.tools.terminal_tools import create_file, create_directory, print_stream, process_terminal_response
from autoagent.registry import register_tool
import json
from metachain import MetaChain
from metachain.types import Response
from autoagent import MetaChain
from autoagent.types import Response
import shlex
from datetime import datetime
from pydantic import BaseModel
@@ -16,12 +16,12 @@ import json
import argparse
from openai import AsyncOpenAI
from openai.types.chat import ChatCompletionMessageToolCall
from metachain.flow import default_drive, EventInput, ReturnBehavior
from metachain.flow.dynamic import goto_events, abort_this
from autoagent.flow import default_drive, EventInput, ReturnBehavior
from autoagent.flow.dynamic import goto_events, abort_this
import re
from metachain import MetaChain
from metachain.types import Response
from metachain.registry import register_workflow
from autoagent import MetaChain
from autoagent.types import Response
from autoagent.registry import register_workflow
def extract_answer(response: str, key: str):
pattern = f"<{key}>(.*?)</{key}>"
@@ -167,7 +167,7 @@ def create_workflow(workflow_name: str, context_variables: dict) -> str:
except Exception as e:
return "[ERROR] Failed to list agents. Error: " + str(e)
workflows_dir = path + "/metachain/workflows"
workflows_dir = path + "/autoagent/workflows"
agent_list = list_agents(context_variables)
if agent_list.startswith("[ERROR]"):
return "Failed to list agents. Error: " + agent_list
@@ -180,7 +180,7 @@ def create_workflow(workflow_name: str, context_variables: dict) -> str:
import_agent_str = ""
for ainfo in agent_info_dict.values():
import_agent_str += f"""
from metachain.agents import {ainfo['func_name']}
from autoagent.agents import {ainfo['func_name']}
"""
events = workflow_form["events"]
events_code = CODE_PREFIX + import_agent_str
@@ -197,7 +197,7 @@ from metachain.agents import {ainfo['func_name']}
msg = create_file(workflows_dir + "/" + workflow_name.lower().replace(' ', '_') + "_flow.py", events_code, context_variables)
if msg.startswith("Error creating file:"):
return "[ERROR] Failed to create workflow. Error: " + msg
result = code_env.run_command('cd {} && python metachain/workflows/{}_flow.py'.format(path, workflow_name.lower().replace(' ', '_')))
result = code_env.run_command('cd {} && python autoagent/workflows/{}_flow.py'.format(path, workflow_name.lower().replace(' ', '_')))
if result['status'] != 0:
return "[ERROR] Failed to create workflow. Error: " + result['result']
return "Successfully created workflow: " + workflow_name + " in " + workflows_dir + "/" + workflow_name.lower().replace(' ', '_') + "_flow.py"
@@ -216,7 +216,7 @@ def list_workflows(context_variables):
path = get_metachain_path(env)
except Exception as e:
return "[ERROR] Failed to list workflows. Error: " + str(e)
python_code = '"from metachain.registry import registry; import json; print(\\"WORKFLOW_LIST_START\\"); print(json.dumps(registry.display_workflows_info, indent=4)); print(\\"WORKFLOW_LIST_END\\")"'
python_code = '"from autoagent.registry import registry; import json; print(\\"WORKFLOW_LIST_START\\"); print(json.dumps(registry.display_workflows_info, indent=4)); print(\\"WORKFLOW_LIST_END\\")"'
list_workflows_cmd = f"cd {path} && DEFAULT_LOG=False python -c {python_code}"
result = env.run_command(list_workflows_cmd)
if result['status'] != 0:
@@ -279,16 +279,16 @@ DEFAULT_LOG=False mc workflow --workflow_name={workflow_name} --system_input={sy
return "[ERROR] Failed to run the workflow. Error: " + str(e)
if __name__ == "__main__":
from metachain.environment import DockerConfig, DockerEnv, LocalEnv
from autoagent.environment import DockerConfig, DockerEnv, LocalEnv
docker_cfg = DockerConfig(
container_name = "nl2agent_showcase",
workplace_name = "workplace",
communication_port = 12350,
conda_path = "/root/miniconda3",
local_root = "/Users/tangjiabin/Documents/reasoning/metachain/workspace_meta_showcase/showcase_nl2agent_showcase"
local_root = "/Users/tangjiabin/Documents/reasoning/autoagent/workspace_meta_showcase/showcase_nl2agent_showcase"
)
code_env = DockerEnv(docker_cfg)
with open("/Users/tangjiabin/Documents/reasoning/metachain/metachain/agents/meta_agent/workflow_form/condition_mining.json", 'r', encoding='utf-8') as f:
with open("/Users/tangjiabin/Documents/reasoning/autoagent/autoagent/agents/meta_agent/workflow_form/condition_mining.json", 'r', encoding='utf-8') as f:
workflow_form = json.load(f)
print(workflow_form)

View File

@@ -1,4 +1,4 @@
from metachain.registry import register_tool
from autoagent.registry import register_tool
from huggingface_hub import HfApi, hf_hub_download
from typing import List
import tempfile

View File

@@ -1,8 +1,8 @@
from metachain.memory.tool_memory import ToolMemory, ToolReranker
from autoagent.memory.tool_memory import ToolMemory, ToolReranker
import os
from metachain.io_utils import get_file_md5
from autoagent.io_utils import get_file_md5
import pandas as pd
from metachain.registry import register_tool
from autoagent.registry import register_tool
from constant import COMPLETION_MODEL, EMBEDDING_MODEL
@register_tool("get_api_plugin_tools_doc")

View File

@@ -1,8 +1,8 @@
from metachain.memory.code_memory import CodeMemory, CodeReranker
from autoagent.memory.code_memory import CodeMemory, CodeReranker
import os
from metachain.environment import DockerEnv, LocalEnv
from metachain.io_utils import compress_folder, get_file_md5
from metachain.registry import register_tool
from autoagent.environment import DockerEnv, LocalEnv
from autoagent.io_utils import compress_folder, get_file_md5
from autoagent.registry import register_tool
from typing import Union
@register_tool("code_rag")
def code_rag(query_text: str, context_variables) -> str:
@@ -16,13 +16,13 @@ def code_rag(query_text: str, context_variables) -> str:
env: Union[DockerEnv, LocalEnv] = context_variables.get("code_env", LocalEnv())
code_memory = CodeMemory(project_path = './code_db', platform='OpenAI', api_key=os.getenv("OPENAI_API_KEY"),embedding_model='text-embedding-3-small')
code_reranker = CodeReranker(model="gpt-4o-2024-08-06")
code_path = f"{env.local_workplace}/metachain"
compress_folder(code_path, f"{env.local_workplace}/", "metachain.zip")
code_id = get_file_md5(f"{env.local_workplace}/metachain.zip")
code_path = f"{env.local_workplace}/autoagent"
compress_folder(code_path, f"{env.local_workplace}/", "autoagent.zip")
code_id = get_file_md5(f"{env.local_workplace}/autoagent.zip")
code_memory.collection_name = code_memory.collection_name + f"_{code_id}"
if code_memory.count() == 0:
code_memory.add_code_files(f"{env.local_workplace}/metachain", exclude_prefix=['__pycache__', 'code_db', '.git'])
code_memory.add_code_files(f"{env.local_workplace}/autoagent", exclude_prefix=['__pycache__', 'code_db', '.git'])
query_results = code_memory.query_code(query_text, n_results=20)
reranked_results = code_reranker.rerank(query_text, query_results)

View File

@@ -1,18 +1,18 @@
from metachain.memory.rag_memory import Memory
from autoagent.memory.rag_memory import Memory
import os
from metachain.environment.docker_env import DockerEnv
from metachain.environment.local_env import LocalEnv
from autoagent.environment.docker_env import DockerEnv
from autoagent.environment.local_env import LocalEnv
from typing import Union
from metachain.environment.local_env import LocalEnv
from metachain.io_utils import compress_folder, get_file_md5
from metachain.registry import register_tool, register_plugin_tool
from autoagent.environment.local_env import LocalEnv
from autoagent.io_utils import compress_folder, get_file_md5
from autoagent.registry import register_tool, register_plugin_tool
from litellm import completion
import zipfile
import shutil
from metachain.environment.markdown_browser.mdconvert import MarkdownConverter
from metachain.memory.utils import chunking_by_token_size
from autoagent.environment.markdown_browser.mdconvert import MarkdownConverter
from autoagent.memory.utils import chunking_by_token_size
import math
from metachain.types import Result
from autoagent.types import Result
# @register_tool("load_db")
# def load_db(db_path: str) -> str:

View File

@@ -4,10 +4,10 @@ import socket
import json
import base64
import math
# from metachain.util import run_command_in_container
from metachain.environment.docker_env import DockerEnv, DockerConfig
from metachain.registry import register_tool
from metachain.environment.markdown_browser.requests_markdown_browser import RequestsMarkdownBrowser
# from autoagent.util import run_command_in_container
from autoagent.environment.docker_env import DockerEnv, DockerConfig
from autoagent.registry import register_tool
from autoagent.environment.markdown_browser.requests_markdown_browser import RequestsMarkdownBrowser
from typing import Tuple, Optional, Dict, Union
import time
import tiktoken
@@ -15,8 +15,8 @@ from datetime import datetime
from functools import wraps
from rich.console import Console
from pathlib import Path
from metachain.environment.local_env import LocalEnv
from metachain.environment.docker_env import DockerEnv
from autoagent.environment.local_env import LocalEnv
from autoagent.environment.docker_env import DockerEnv
import inspect
terminal_env = RequestsMarkdownBrowser(local_root=os.getcwd(), workplace_name="terminal_env", viewport_size=1024 * 8)

View File

@@ -1,10 +1,10 @@
from metachain.environment.docker_env import DockerEnv
from autoagent.environment.docker_env import DockerEnv
import tiktoken
from datetime import datetime
def truncate_by_tokens(env: DockerEnv, text, max_tokens = 4096, model="gpt-4o-2024-08-06"):
from metachain.tools.terminal_tools import create_file
from autoagent.tools.terminal_tools import create_file
encoding = tiktoken.encoding_for_model(model)
tokens = encoding.encode(text)

View File

@@ -1,18 +1,18 @@
from metachain.registry import register_tool
from autoagent.registry import register_tool
from browsergym.core.action.highlevel import HighLevelActionSet
from typing import Literal
from metachain.environment import DockerEnv, DockerConfig, BrowserEnv, VIEWPORT
from autoagent.environment import DockerEnv, DockerConfig, BrowserEnv, VIEWPORT
from browsergym.utils.obs import flatten_axtree_to_str
from dataclasses import dataclass, field
from typing import Dict
from urllib.parse import quote_plus
from metachain.types import Result
from autoagent.types import Result
from functools import partial, update_wrapper
from inspect import signature
import tiktoken
from datetime import datetime
from metachain.util import function_to_json
from metachain.tools.md_obs import flatten_md_axtree_to_str
from autoagent.util import function_to_json
from autoagent.tools.md_obs import flatten_md_axtree_to_str
# def with_env(env: BrowserEnv):
# """将env注入到工具函数中的装饰器"""
# def decorator(func):
@@ -379,7 +379,7 @@ def sleep(context_variables):
image=web_obs.screenshot,
)
def truncate_by_tokens(env: DockerEnv, text, max_tokens = 4096, model="gpt-4o-2024-08-06"):
from metachain.tools.terminal_tools import create_file, create_directory
from autoagent.tools.terminal_tools import create_file, create_directory
encoding = tiktoken.encoding_for_model(model)
tokens = encoding.encode(text)
@@ -429,7 +429,7 @@ def get_page_markdown(context_variables):
)
if __name__ == "__main__":
env = BrowserEnv(browsergym_eval_env = None, local_root="/Users/tangjiabin/Documents/reasoning/metachain", workplace_name="workplace_gaia_eval")
env = BrowserEnv(browsergym_eval_env = None, local_root="/Users/tangjiabin/Documents/reasoning/autoagent", workplace_name="workplace_gaia_eval")
# code_env = DockerEnv(DockerConfig(container_name = "gaia_lite_eval",
# workplace_name = "workplace_gaia_eval",
# communication_port = 12345,

View File

@@ -331,17 +331,31 @@ def function_to_json(func) -> dict:
# )
# parameters[param.name] = {"type": param_type}
for param in signature.parameters.values():
if param.name == "context_variables":
continue
try:
param_info = get_type_info(param.annotation, type_map)
if isinstance(param_info, dict) and "additionalProperties" in param_info:
del param_info["additionalProperties"]
parameters[param.name] = get_type_info(param.annotation, type_map)
except KeyError as e:
raise KeyError(f"Unknown type annotation {param.annotation} for parameter {param.name}: {str(e)}")
required = [
param.name
for param in signature.parameters.values()
if param.default == inspect._empty
]
if not parameters:
parameters["dummy"] = {
"type": "string",
"description": "Dummy parameter (not used). Added to satisfy non-empty schema requirements."
}
required = []
return {
"type": "function",
"function": {

View File

@@ -1,6 +1,6 @@
import os
import importlib
from metachain.registry import registry
from autoagent.registry import registry
def import_workflows_recursively(base_dir: str, base_package: str):
"""Recursively import all workflows in .py files
@@ -31,7 +31,7 @@ def import_workflows_recursively(base_dir: str, base_package: str):
# get the current directory and import all tools
current_dir = os.path.dirname(__file__)
import_workflows_recursively(current_dir, 'metachain.workflows')
import_workflows_recursively(current_dir, 'autoagent.workflows')
# export all tool creation functions
globals().update(registry.workflows)

View File

@@ -3,21 +3,21 @@ import json
import argparse
from openai import AsyncOpenAI
from openai.types.chat import ChatCompletionMessageToolCall
from metachain.flow import default_drive, EventInput, ReturnBehavior
from metachain.flow.dynamic import goto_events, abort_this
from autoagent.flow import default_drive, EventInput, ReturnBehavior
from autoagent.flow.dynamic import goto_events, abort_this
import re
from metachain import MetaChain
from metachain.types import Response
from metachain.registry import register_workflow
from autoagent import MetaChain
from autoagent.types import Response
from autoagent.registry import register_workflow
def extract_answer(response: str, key: str):
pattern = f"<{key}>(.*?)</{key}>"
matches = re.findall(pattern, response)
return matches[0] if len(matches) > 0 else None
from metachain.agents import get_math_solver_agent
from autoagent.agents import get_math_solver_agent
from metachain.agents import get_vote_aggregator_agent
from autoagent.agents import get_vote_aggregator_agent
@default_drive.make_event
async def on_start(event: EventInput, global_ctx):
print("start the workflow:" + 'math_solver_workflow')

View File

@@ -1,5 +1,6 @@
import os
from dotenv import load_dotenv
import platform
# utils:
load_dotenv() # 加载.env文件
def str_to_bool(value):
@@ -10,8 +11,8 @@ def str_to_bool(value):
if isinstance(value, bool):
return value
if not value:
return False
if value == None:
return None
value = str(value).lower().strip()
if value in true_values:
@@ -21,33 +22,70 @@ def str_to_bool(value):
return True # default return True
DOCKER_WORKPLACE_NAME = os.getenv('DOCKER_WORKPLACE_NAME', 'workplace_meta')
DOCKER_WORKPLACE_NAME = os.getenv('DOCKER_WORKPLACE_NAME', 'workplace')
GITHUB_AI_TOKEN = os.getenv('GITHUB_AI_TOKEN', None)
AI_USER = os.getenv('AI_USER', None)
LOCAL_ROOT = os.getenv('LOCAL_ROOT', os.getcwd())
DEBUG = str_to_bool(os.getenv('DEBUG', True))
DEBUG = str_to_bool(os.getenv('DEBUG', False))
DEFAULT_LOG = str_to_bool(os.getenv('DEFAULT_LOG', False))
LOG_PATH = os.getenv('LOG_PATH', None)
EVAL_MODE = str_to_bool(os.getenv('EVAL_MODE', False))
BASE_IMAGES = os.getenv('BASE_IMAGES', "tjbtech1/gaia-bookworm:v2")
BASE_IMAGES = os.getenv('BASE_IMAGES', None)
def get_architecture():
machine = platform.machine().lower()
if 'x86' in machine or 'amd64' in machine or 'i386' in machine:
return "tjbtech1/metachain:amd64_latest"
elif 'arm' in machine:
return "tjbtech1/metachain:latest"
else:
return "tjbtech1/metachain:latest"
if BASE_IMAGES is None:
BASE_IMAGES = get_architecture()
COMPLETION_MODEL = os.getenv('COMPLETION_MODEL', "claude-3-5-haiku-20241022")
EMBEDDING_MODEL = os.getenv('EMBEDDING_MODEL', "text-embedding-3-small")
MC_MODE = str_to_bool(os.getenv('MC_MODE', False))
MC_MODE = str_to_bool(os.getenv('MC_MODE', True))
# add Env for function call and non-function call
FN_CALL = str_to_bool(os.getenv('FN_CALL', True))
FN_CALL = str_to_bool(os.getenv('FN_CALL', None))
API_BASE_URL = os.getenv('API_BASE_URL', None)
ADD_USER = str_to_bool(os.getenv('ADD_USER', False))
ADD_USER = str_to_bool(os.getenv('ADD_USER', None))
NON_FN_CALL = str_to_bool(os.getenv('NON_FN_CALL', False))
NOT_SUPPORT_SENDER = ["mistral", "groq"]
MUST_ADD_USER = ["deepseek-reasoner", "o1-mini", "deepseek-r1"]
NOT_SUPPORT_FN_CALL = ["o1-mini", "deepseek-reasoner", "deepseek-r1", "llama", "grok-2"]
NOT_USE_FN_CALL = [ "deepseek-chat"] + NOT_SUPPORT_FN_CALL
if ADD_USER is None:
ADD_USER = False
for model in MUST_ADD_USER:
if model in COMPLETION_MODEL:
ADD_USER = True
break
if FN_CALL is None:
FN_CALL = True
for model in NOT_USE_FN_CALL:
if model in COMPLETION_MODEL:
FN_CALL = False
break
NON_FN_CALL = False
for model in NOT_SUPPORT_FN_CALL:
if model in COMPLETION_MODEL:
NON_FN_CALL = True
break
if EVAL_MODE:
DEFAULT_LOG = False
# print(FN_CALL, NON_FN_CALL, ADD_USER)

View File

@@ -1,34 +1,33 @@
from metachain.environment.docker_container import init_container
import argparse
from constant import DOCKER_WORKPLACE_NAME
from datasets import load_dataset
import huggingface_hub
from metachain import MetaChain
from metachain.logger import MetaChainLogger, LoggerManager
from autoagent import MetaChain
from autoagent.logger import MetaChainLogger, LoggerManager
from evaluation.utils import make_metadata, prepare_dataset, update_progress, check_port_available, run_evaluation, clean_msg
from evaluation.types import EvalMetadata, EvalOutput
import metachain.agents as agenthub
import autoagent.agents as agenthub
import os.path as osp
import pandas as pd
import asyncio
import re
import os
import shutil
from metachain.registry import registry
from autoagent.registry import registry
from evaluation.gaia.scorer import question_scorer
import json
# from metachain.util import run_command_in_container
from metachain.environment.docker_env import DockerEnv, DockerConfig, check_container_ports, check_container_exist, check_container_running
from metachain.environment.browser_env import BrowserEnv
from metachain.environment.markdown_browser import RequestsMarkdownBrowser
from metachain.types import Response
from metachain.util import function_to_json
from metachain.main import run_in_client, run_in_client_non_async
from metachain.agents.meta_agent.tool_editor import get_tool_editor_agent
from metachain.environment.utils import setup_metachain
# from autoagent.util import run_command_in_container
from autoagent.environment.docker_env import DockerEnv, DockerConfig, check_container_ports, check_container_exist, check_container_running
from autoagent.environment.browser_env import BrowserEnv
from autoagent.environment.markdown_browser import RequestsMarkdownBrowser
from autoagent.types import Response
from autoagent.util import function_to_json
from autoagent.main import run_in_client, run_in_client_non_async
from autoagent.agents.meta_agent.tool_editor import get_tool_editor_agent
from autoagent.environment.utils import setup_metachain
import subprocess
DATASET_CACHE_DIR = osp.join(osp.dirname(__file__), 'data')
# Note: You should run this script in the root directory of the project metachain
# Note: You should run this script in the root directory of the project autoagent
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--container_name', type=str, default='gaia_test')

View File

@@ -1,235 +0,0 @@
from metachain.environment.docker_container import init_container
import argparse
from constant import DOCKER_WORKPLACE_NAME
from datasets import load_dataset
import huggingface_hub
from metachain import MetaChain
from metachain.logger import MetaChainLogger, LoggerManager
from evaluation.utils import make_metadata, prepare_dataset, update_progress, check_port_available, run_evaluation, clean_msg
from evaluation.types import EvalMetadata, EvalOutput
import metachain.agents as agenthub
import os.path as osp
import pandas as pd
import asyncio
import re
import os
import shutil
from metachain.registry import registry
from evaluation.gaia.scorer import question_scorer
import json
# from metachain.util import run_command_in_container
from metachain.environment.docker_env import DockerEnv, DockerConfig, check_container_ports, check_container_exist, check_container_running
from metachain.environment.browser_env import BrowserEnv
from metachain.environment.markdown_browser import RequestsMarkdownBrowser
from metachain.types import Response
from metachain.util import function_to_json
from metachain.main import run_in_client
import subprocess
DATASET_CACHE_DIR = osp.join(osp.dirname(__file__), 'data')
# Note: You should run this script in the root directory of the project metachain
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--container_name', type=str, default='gaia_test')
parser.add_argument('--model', type=str, default='gpt-4o-2024-08-06')
parser.add_argument('--git_clone', action='store_true', default=False)
parser.add_argument('--setup_package', type=str, default=None)
parser.add_argument('--test_pull_name', type=str, default='main')
parser.add_argument('--debug', action='store_true', default=False)
# metadata
parser.add_argument('--agent_func', type=str, default='get_system_triage_agent')
parser.add_argument('--eval_note', type=str, default=None)
parser.add_argument('--eval_output_dir', type=str, default='./evaluation_results')
parser.add_argument('--data_split', type=str, default=None)
# gaia level
parser.add_argument('--level', type=str, default='1')
parser.add_argument('--eval_n_limit', type=int, default=None)
parser.add_argument('--port', type=int, default=12345)
parser.add_argument('--eval_num_workers', type=int, default=1)
args = parser.parse_args()
return args
def get_config(metadata: EvalMetadata, instance_id: str):
container_name = metadata.container_name+f'_{instance_id}'
port_info = check_container_ports(container_name)
port = metadata.port
if port_info:
port = port_info[0]
else:
while not check_port_available(port):
port += 1
local_root = os.path.join(os.getcwd(), f"workspace_gaia_whole", f"gaia_eval_{instance_id}")
os.makedirs(local_root, exist_ok=True)
docker_config = DockerConfig(
workplace_name=DOCKER_WORKPLACE_NAME,
container_name=container_name,
communication_port=port,
conda_path='/root/miniconda3',
local_root=local_root,
)
return docker_config
def process_instance(
instance: pd.Series,
metadata: EvalMetadata,
logger: MetaChainLogger,
) -> EvalOutput:
docker_config = get_config(metadata, instance_id=instance['instance_id'])
code_env, web_env, file_env = create_environment(docker_config)
local_workplace = code_env.local_workplace
docker_workplace = code_env.docker_workplace
# Setup the logger properly, so you can run multi-processing to parallelize the evaluation
logger.info(f'Starting evaluation for instance {instance["instance_id"]}.')
if instance['file_name'] != '':
assert metadata.data_split is not None
src_file = os.path.join(
DATASET_CACHE_DIR, '2023', metadata.data_split, instance['file_name']
)
assert os.path.exists(src_file)
extension_name = instance['file_name'].split('.')[-1]
dest_file = os.path.join(local_workplace, f'file.{extension_name}')
shutil.copy(src_file, dest_file)
file_name = dest_file.split('/')[-1]
else:
dest_file = None
# Prepare instruction
instruction = f"{instance['Question']}\n"
logger.info(f'Instruction: {instruction}')
if dest_file:
instruction += f"\n\nThe mentioned file is provided in the workspace at: {osp.join(docker_workplace, file_name)}"
instruction += 'IMPORTANT: You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.\n'
instruction += 'Please encapsulate your final answer (answer ONLY) within <solution> and </solution>.\n'
instruction += (
'For example: The answer to the question is <solution> 42 </solution>.\n'
)
logger.info(f'Instruction:\n{instruction}')
system_triage_agent = registry.agents[metadata.agent_func](model=metadata.model, file_env=file_env, web_env=web_env, code_env=code_env)
messages = [
{
'role': 'user',
'content': instruction
}
]
context_variables = {}
# Here's how you can run the agent (similar to the `main` function) and get the final task state
response: Response | None = asyncio.run(
run_in_client(
agent=system_triage_agent,
messages=messages,
context_variables = context_variables,
logger=logger
)
)
messages.extend(response.messages)
# save messages to a file
messages_file = osp.join(metadata.eval_output_dir, f"gaia_{instance['instance_id']}", f'messages_{metadata.agent_func.replace("get_", "")}.json')
os.makedirs(osp.dirname(messages_file), exist_ok=True)
messages = clean_msg(messages)
with open(messages_file, 'w', encoding='utf-8') as f:
json.dump(messages, f, ensure_ascii=False, indent=4)
# ======= Attempt to evaluate the agent's edits =======
# If you are working on simpler benchmark that only evaluates the final model output (e.g., in a MessageAction)
# You can simply get the LAST `MessageAction` from the returned `state.history` and parse it for evaluation.
if response is None:
raise ValueError('Response should not be None.')
model_answer_raw = response.messages[-1]['content']
# attempt to parse model_answer
model_answer = re.findall(r'<solution>(.*?)</solution>', model_answer_raw)
if len(model_answer) == 0:
logger.info(f'Failed to parse model answer: {model_answer_raw}', title='WARNING', color='yellow')
model_answer = model_answer_raw
else:
model_answer = model_answer[0]
logger.info(
f'Final message: {model_answer} | Ground truth: {instance["Final answer"]}',
title='INFO', color='green'
)
score = question_scorer(
model_answer=model_answer, ground_truth=instance['Final answer']
)
test_result = {
'score': score,
'model_answer_raw': model_answer_raw,
'model_answer': model_answer,
'ground_truth': instance['Final answer'],
}
# Save the output
output = EvalOutput(
instance_id=instance['instance_id'],
instance=instance.to_dict(),
instruction=instance['Question'],
metadata=metadata,
messages=messages,
test_result=test_result,
)
return output
def create_environment(docker_config: DockerConfig):
"""
1. create the code environment
2. create the web environment
3. create the file environment
"""
code_env = DockerEnv(docker_config)
code_env.init_container()
web_env = BrowserEnv(browsergym_eval_env = None, local_root=docker_config.local_root, workplace_name=docker_config.workplace_name)
file_env = RequestsMarkdownBrowser(viewport_size=1024 * 5, local_root=docker_config.local_root, workplace_name=docker_config.workplace_name, downloads_folder=os.path.join(docker_config.local_root, docker_config.workplace_name, "downloads"))
return code_env, web_env, file_env
def main(args):
metadata: EvalMetadata = make_metadata(
model=args.model,
dataset_name="gaia",
agent_func=args.agent_func,
eval_note=args.eval_note,
eval_output_dir=args.eval_output_dir,
data_split=args.data_split,
details={'gaia-level': args.level},
port=args.port,
container_name=args.container_name,
)
log_path = osp.join(metadata.eval_output_dir, 'logs', f'agent_{metadata.model}.log')
LoggerManager.set_logger(MetaChainLogger(log_path))
dataset = load_dataset('gaia-benchmark/GAIA', args.level)
huggingface_hub.snapshot_download(
'gaia-benchmark/GAIA',
repo_type='dataset',
local_dir=DATASET_CACHE_DIR,
)
gaia_tests = dataset[metadata.data_split].to_pandas()
gaia_tests.rename(columns={'task_id': 'instance_id'}, inplace=True)
output_file = osp.join(metadata.eval_output_dir, 'output.jsonl')
prepared_dataset = prepare_dataset(gaia_tests, output_file, args.eval_n_limit)
run_evaluation(
dataset=prepared_dataset,
metadata=metadata,
output_file=output_file,
num_workers=args.eval_num_workers,
process_instance_func=process_instance,
)
if __name__ == "__main__":
args = get_args()
main(args)
# print(check_container_exist('gaia_lite_eval_c61d22de-5f6c-4958-a7f6-5e9707bd3466'))

View File

@@ -1,7 +1,7 @@
from metachain.environment.docker_env import DockerEnv, DockerConfig, check_container_ports, check_container_exist, check_container_running
from metachain.tools.files import create_file
from autoagent.environment.docker_env import DockerEnv, DockerConfig, check_container_ports, check_container_exist, check_container_running
from autoagent.tools.files import create_file
if __name__ == "__main__":
import os

View File

@@ -8,7 +8,7 @@ import pandas as pd
import json
from typing import Callable, Awaitable
from tqdm import tqdm
from metachain.logger import MetaChainLogger, LoggerManager
from autoagent.logger import MetaChainLogger, LoggerManager
import multiprocessing as mp
import psutil
import traceback

View File

@@ -6,22 +6,24 @@ from rich.style import Style
from rich.console import Console
from rich.box import DOUBLE
from rich.markdown import Markdown
MC_LOGO = """\
███╗ ██╗███████╗████████╗ █████╗ ██████╗██╗ ██╗ █████╗ ██╗█████╗
████╗ ████║██╔════╚══██╔══██╔══██╗██╔════╝██║ ██║██╔══██╗██║████╗ ██║
██████╔██║████ ██║ ███████║██║ ██████████████║██║██╔██╗ ██║
██║╚██╔╝██║██╔══╝ ██║ ██╔══██║██║ ██╔══██║██╔══██║████║╚██╗██║
██║ ╚═╝ ██║███████╗ ██║ ██║ ██║╚██████╗██║ ██████║████║ ╚████║
╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝╚═╝ ╚═╝╚═╝ ╚═╝╚═╝╚═╝ ╚═══╝
╔═══ 𝒞𝓇𝑒𝒶𝓉𝑒 𝒜𝑔𝑒𝓃𝓉𝒾𝒸 𝒜 𝓊𝓈𝒾𝓃𝑔 𝒶𝓃𝑔𝓊𝒶𝑔𝑒 ═══╗
""".strip()
█████╗ ██╗ ██╗██████████████╗ █████╗ ██████╗ █████████ ██╗████████╗
██╔══██╗██║ ██║╚══██╔══╝██╔═══██╗██╔══██╗██╔════╝ ██╔════╝████╗ ██║╚══██╔══╝
███████║████ ██║ ██║ █████████║██║ ██████████████║ ██║
██╔══██║██║ ██║ ██║ ██║ ██║██╔══██║██║ ██║██╔══██║████║ ██║
██║ ██║██████╔╝ ██║ ╚██████╔╝██║ ██║╚██████╔╝█████████║████║ ██║
╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝╚═╝ ╚═══╝ ╚═╝
╔═══ 𝒞𝓇𝑒𝒶𝓉𝑒 𝒜𝑔𝑒𝓃𝓉𝒾𝒸 𝒜 𝓊𝓈𝒾𝓃𝑔 𝒶𝓃𝑔𝓊𝒶𝑔𝑒 ═══╗\
"""
version_table = Table(show_header=False, box=DOUBLE, expand=True)
version_table.add_column("Key", style="cyan")
version_table.add_column("Value", style="green")
version_table.add_row("Version", "0.1.0")
version_table.add_row("Author", "MetaChain Team@HKU")
version_table.add_row("Version", "0.2.0")
version_table.add_row("Author", "AutoAgent Team@HKU")
version_table.add_row("License", "MIT")
NOTES = """\
@@ -42,3 +44,4 @@ GOODBYE_LOGO = """\
· 𝓜𝓮𝓽𝓪𝓒𝓱𝓪𝓲𝓷-𝓐𝓘 ·
""".strip()

View File

@@ -1,63 +0,0 @@
import click
import importlib
from metachain import MetaChain
from metachain.util import debug_print
import asyncio
@click.group()
def cli():
"""The command line interface for metachain"""
pass
@cli.command()
@click.option('--model', default='gpt-4o-2024-08-06', help='the name of the model')
@click.option('--agent_func', default='get_dummy_agent', help='the function to get the agent')
@click.option('--query', default='...', help='the user query to the agent')
@click.argument('context_variables', nargs=-1)
def agent(model: str, agent_func: str, query: str, context_variables):
"""
Run an agent with a given model, agent function, query, and context variables.
Args:
model (str): The name of the model.
agent_func (str): The function to get the agent.
query (str): The user query to the agent.
context_variables (list): The context variables to pass to the agent.
Usage:
mc agent --model=gpt-4o-2024-08-06 --agent_func=get_weather_agent --query="What is the weather in Tokyo?" city=Tokyo unit=C timestamp=2024-01-01
"""
context_storage = {}
for arg in context_variables:
if '=' in arg:
key, value = arg.split('=', 1)
context_storage[key] = value
agent_module = importlib.import_module(f'metachain.agents')
try:
agent_func = getattr(agent_module, agent_func)
except AttributeError:
raise ValueError(f'Agent function {agent_func} not found, you shoud check in the `metachain.agents` directory for the correct function name')
agent = agent_func(model)
mc = MetaChain()
messages = [
{"role": "user", "content": query}
]
response = mc.run(agent, messages, context_storage, debug=True)
debug_print(True, response.messages[-1]['content'], title = f'Result of running {agent.name} agent', color = 'pink3')
return response.messages[-1]['content']
@cli.command()
@click.option('--workflow_name', default=None, help='the name of the workflow')
@click.option('--system_input', default='...', help='the user query to the agent')
def workflow(workflow_name: str, system_input: str):
"""命令行函数的同步包装器"""
return asyncio.run(async_workflow(workflow_name, system_input))
async def async_workflow(workflow_name: str, system_input: str):
"""异步实现的workflow函数"""
workflow_module = importlib.import_module(f'metachain.workflows')
try:
workflow_func = getattr(workflow_module, workflow_name)
except AttributeError:
raise ValueError(f'Workflow function {workflow_name} not found...')
result = await workflow_func(system_input) # 使用 await 等待异步函数完成
debug_print(True, result, title=f'Result of running {workflow_name} workflow', color='pink3')
return result

View File

@@ -1,207 +0,0 @@
from metachain.registry import registry
from metachain.environment import LocalEnv, DockerEnv
from typing import Union
from metachain.tools.terminal_tools import (
create_file,
create_directory,
run_python,
)
from metachain.registry import register_tool
from metachain.io_utils import print_stream
import json
def get_metachain_path(env: Union[LocalEnv, DockerEnv]) -> str:
result = env.run_command('pip show metachain')
if result['status'] != 0:
raise Exception("Failed to list tools. Error: " + result['result'])
stdout = result['result']
for line in stdout.split('\n'):
if line.startswith('Editable project location:'):
path = line.split(':', 1)[1].strip()
return path
raise Exception("Failed to list tools. The MetaChain is not installed in editable mode.")
@register_tool("list_tools")
def list_tools(context_variables):
"""
List all plugin tools in the MetaChain.
Returns:
A list of information of all plugin tools including name, args, docstring, body, return_type, file_path.
"""
env: Union[LocalEnv, DockerEnv] = context_variables.get("code_env", LocalEnv())
try:
path = get_metachain_path(env)
except Exception as e:
return "Failed to list tools. Error: " + str(e)
python_code = '"from metachain.registry import registry; import json; print(\\"TOOL_LIST_START\\"); print(json.dumps(registry.display_plugin_tools_info, indent=4)); print(\\"TOOL_LIST_END\\")"'
list_tools_cmd = f"cd {path} && DEFAULT_LOG=False python -c {python_code}"
result = env.run_command(list_tools_cmd)
if result['status'] != 0:
return "Failed to list tools. Error: " + result['result']
try:
output = result['result']
start_marker = "TOOL_LIST_START"
end_marker = "TOOL_LIST_END"
start_idx = output.find(start_marker) + len(start_marker)
end_idx = output.find(end_marker)
if start_idx == -1 or end_idx == -1:
return "Failed to parse tool list: markers not found"
json_str = output[start_idx:end_idx].strip()
return json_str
except Exception as e:
return f"Failed to process output: {str(e)}"
# return result['result']
@register_tool("create_tool")
def create_tool(tool_name: str, tool_code: str, context_variables):
"""
Create a plugin tool.
Args:
tool_name: The name of the tool.
tool_code: The code of creating the tool. (You should strictly follow the format of the template given to you to create the tool.)
Returns:
A string representation of the result of the tool creation.
"""
if tool_name == "visual_question_answering":
return "The tool `visual_question_answering` is not allowed to be modified."
env: Union[LocalEnv, DockerEnv] = context_variables.get("code_env", LocalEnv())
try:
path = get_metachain_path(env)
except Exception as e:
return "Failed to list tools. Error: " + str(e)
tools_dir = path + "/metachain/tools"
try:
msg = create_file(tools_dir + "/" + tool_name + ".py", tool_code, context_variables)
if msg.startswith("Error creating file:"):
return "Failed to create tool. Error: " + msg
return "Successfully created tool: " + tool_name + " in " + tools_dir + "/" + tool_name + ".py"
except Exception as e:
return "Failed to create tool. Error: " + str(e)
def tool_exists(tool_name: str, context_variables):
try:
list_res = list_tools(context_variables)
tool_dict = json.loads(list_res)
if tool_name not in tool_dict.keys():
return False, tool_dict
return True, tool_dict
except Exception as e:
return "Before deleting a tool, you should list all tools first. But the following error occurred: " + str(e), None
@register_tool("delete_tool")
def delete_tool(tool_name: str, context_variables):
"""
Delete a plugin tool.
Args:
tool_name: The name of the tool to be deleted.
Returns:
A string representation of the result of the tool deletion.
"""
env: Union[LocalEnv, DockerEnv] = context_variables.get("code_env", LocalEnv())
# try:
# exist_flag, tool_dict = tool_exists(tool_name, context_variables)
# if isinstance(exist_flag, str):
# return "Before deleting a tool, you should list all tools first. But the following error occurred: " + exist_flag
# if not exist_flag:
# return f"The tool `{tool_name}` does not exist."
# except Exception as e:
# return "Before deleting a tool, you should list all tools first. But the following error occurred: " + str(e)
list_res = list_tools(context_variables)
tool_dict = json.loads(list_res)
try:
tool_path = tool_dict[tool_name]['file_path']
except KeyError:
return "The tool `" + tool_name + "` does not exist."
except Exception as e:
return "Error: " + str(e)
try:
result = env.run_command(f"rm {tool_path}")
if result['status'] != 0:
return f"Failed to delete tool: `{tool_name}`. Error: " + result['result']
return f"Successfully deleted tool: `{tool_name}`."
except Exception as e:
return f"Failed to delete tool: `{tool_name}`. Error: " + str(e)
@register_tool("update_tool")
def update_tool(tool_name: str, tool_code: str, context_variables):
"""
Update an existing plugin tool.
Args:
tool_name: The name of the tool to be updated.
tool_code: The code of the tool to be updated.
Returns:
A string representation of the result of the tool update.
"""
env: Union[LocalEnv, DockerEnv] = context_variables.get("code_env", LocalEnv())
# try:
# exist_flag, tool_dict = tool_exists(tool_name, context_variables)
# if isinstance(exist_flag, str):
# return "Before deleting a tool, you should list all tools first. But the following error occurred: " + exist_flag
# if not exist_flag:
# return f"The tool `{tool_name}` does not exist."
# except Exception as e:
# return "Before updating a tool, you should list all tools first. But the following error occurred: " + str(e)
list_res = list_tools(context_variables)
tool_dict = json.loads(list_res)
try:
tool_path = tool_dict[tool_name]['file_path']
except KeyError:
return "The tool `" + tool_name + "` does not exist."
except Exception as e:
return "Error: " + str(e)
try:
msg = create_file(tool_path, tool_code, context_variables)
if msg.startswith("Error creating file:"):
return "Failed to update tool. Error: " + msg
return "Successfully updated tool: " + tool_name + " in " + tool_path
except Exception as e:
return "Failed to update tool. Error: " + str(e)
@register_tool("test_tool")
def test_tool(tool_name: str, test_code: str, context_variables):
env: Union[LocalEnv, DockerEnv] = context_variables.get("code_env", LocalEnv())
try:
path = get_metachain_path(env)
except Exception as e:
return "Failed to get the path of the MetaChain. Error: " + str(e)
# try:
# exist_flag, tool_dict = tool_exists(tool_name, context_variables)
# if isinstance(exist_flag, str):
# return "Before deleting a tool, you should list all tools first. But the following error occurred: " + exist_flag
# if not exist_flag:
# return f"The tool `{tool_name}` does not exist."
# except Exception as e:
# return "Before testing a tool, you should list all tools first. But the following error occurred: " + str(e)
test_dir = path + "/test_tools"
try:
msg = create_directory(test_dir, context_variables)
if msg.startswith("Error creating directory:"):
return "Failed to create the test directory. Error: " + msg
except Exception as e:
return "Failed to create the test directory. Error: " + str(e)
test_file_path = test_dir + "/" + "test_" + tool_name + ".py"
try:
msg = create_file(test_file_path, test_code, context_variables)
if msg.startswith("Error creating file:"):
return "Failed to create the test file. Error: " + msg
except Exception as e:
return "Failed to create the test file. Error: " + str(e)
try:
result = run_python(context_variables, test_file_path, cwd=path, env_vars={"DEFAULT_LOG": "False"})
if "Exit code: 0" not in result:
return "Failed to test the tool. The test case is not correct. The result is: " + result
return f"The result is of the tool `{tool_name}`: \n{result}"
except Exception as e:
return "Failed to test the tool. Error: " + str(e)
if __name__ == "__main__":
print(list_tools({}))

View File

View File

@@ -1,262 +0,0 @@
from constant import DOCKER_WORKPLACE_NAME
from metachain.io_utils import read_yaml_file, get_md5_hash_bytext, read_file
from metachain.environment.utils import setup_metachain
from metachain.types import Response
from metachain import MetaChain
from metachain.util import ask_text, single_select_menu, print_markdown, debug_print, UserCompleter
from prompt_toolkit import PromptSession
from prompt_toolkit.completion import Completer, Completion
from prompt_toolkit.formatted_text import HTML
from prompt_toolkit.styles import Style
from rich.progress import Progress, SpinnerColumn, TextColumn
import json
import argparse
from datetime import datetime
from metachain.agents.meta_agent import tool_editor, agent_editor
from metachain.tools.meta.edit_tools import list_tools
from metachain.tools.meta.edit_agents import list_agents
from loop_utils.font_page import MC_LOGO, version_table, NOTES, GOODBYE_LOGO
from rich.live import Live
from metachain.environment.docker_env import DockerEnv, DockerConfig, check_container_ports
from metachain.environment.browser_env import BrowserEnv
from metachain.environment.markdown_browser import RequestsMarkdownBrowser
from evaluation.utils import update_progress, check_port_available, run_evaluation, clean_msg
import os
import os.path as osp
from metachain.agents import get_system_triage_agent
from metachain.logger import LoggerManager, MetaChainLogger
from rich.console import Console
from rich.markdown import Markdown
from rich.table import Table
from rich.columns import Columns
from rich.text import Text
from rich.panel import Panel
import re
from playground.cli.metachain_meta_agent import meta_agent
from playground.cli.metachain_meta_workflow import meta_workflow
from playground.cli.file_select import select_and_copy_files
def get_args():
parser = argparse.ArgumentParser(description="working@tjb-tech")
parser.add_argument('--container_name', type=str, default='gpu_test')
parser.add_argument('--model', type=str, default='gpt-4o-2024-08-06')
parser.add_argument('--test_pull_name', type=str, default='test_pull_1010')
parser.add_argument('--debug', action='store_true', default=False)
parser.add_argument('--port', type=int, default=12350)
parser.add_argument('--git_clone', action='store_true', default=False)
args = parser.parse_args()
return args
def clear_screen():
console = Console()
console.print("[bold green]Coming soon...[/bold green]")
print('\033[u\033[J\033[?25h', end='') # Restore cursor and clear everything after it, show cursor
def get_config(args):
container_name = args.container_name
port_info = check_container_ports(container_name)
port = args.port
if port_info:
port = port_info[0]
else:
# while not check_port_available(port):
# port += 1
# 使用文件锁来确保端口分配的原子性
import filelock
lock_file = os.path.join(os.getcwd(), ".port_lock")
lock = filelock.FileLock(lock_file)
with lock:
port = args.port
while not check_port_available(port):
port += 1
print(f'{port} is not available, trying {port+1}')
# 立即标记该端口为已使用
with open(os.path.join(os.getcwd(), f".port_{port}"), 'w') as f:
f.write(container_name)
local_root = os.path.join(os.getcwd(), f"workspace_meta_showcase", f"showcase_{container_name}")
os.makedirs(local_root, exist_ok=True)
docker_config = DockerConfig(
workplace_name=DOCKER_WORKPLACE_NAME,
container_name=container_name,
communication_port=port,
conda_path='/root/miniconda3',
local_root=local_root,
git_clone=args.git_clone,
test_pull_name=args.test_pull_name,
)
return docker_config
def create_environment(docker_config: DockerConfig):
"""
1. create the code environment
2. create the web environment
3. create the file environment
"""
code_env = DockerEnv(docker_config)
code_env.init_container()
web_env = BrowserEnv(browsergym_eval_env = None, local_root=docker_config.local_root, workplace_name=docker_config.workplace_name)
file_env = RequestsMarkdownBrowser(viewport_size=1024 * 5, local_root=docker_config.local_root, workplace_name=docker_config.workplace_name, downloads_folder=os.path.join(docker_config.local_root, docker_config.workplace_name, "downloads"))
return code_env, web_env, file_env
def user_mode(model: str, context_variables: dict, debug: bool = True):
logger = LoggerManager.get_logger()
console = Console()
system_triage_agent = get_system_triage_agent(model)
assert system_triage_agent.agent_teams != {}, "System Triage Agent must have agent teams"
messages = []
agent = system_triage_agent
agents = {system_triage_agent.name.replace(' ', '_'): system_triage_agent}
for agent_name in system_triage_agent.agent_teams.keys():
agents[agent_name.replace(' ', '_')] = system_triage_agent.agent_teams[agent_name]("placeholder").agent
agents["Upload_files"] = "select"
style = Style.from_dict({
'bottom-toolbar': 'bg:#333333 #ffffff',
})
# 创建会话
session = PromptSession(
completer=UserCompleter(agents.keys()),
complete_while_typing=True,
style=style
)
client = MetaChain(log_path=logger)
while True:
# query = ask_text("Tell me what you want to do:")
query = session.prompt(
'Tell me what you want to do (type "exit" to quit): ',
bottom_toolbar=HTML('<b>Prompt:</b> Enter <b>@</b> to mention Agents')
)
if query.strip().lower() == 'exit':
# logger.info('User mode completed. See you next time! :waving_hand:', color='green', title='EXIT')
logo_text = "User mode completed. See you next time! :waving_hand:"
console.print(Panel(logo_text, style="bold salmon1", expand=True))
break
words = query.split()
console.print(f"[bold green]Your request: {query}[/bold green]", end=" ")
for word in words:
if word.startswith('@') and word[1:] in agents.keys():
# print(f"[bold magenta]{word}[bold magenta]", end=' ')
agent = agents[word.replace('@', '')]
else:
# print(word, end=' ')
pass
print()
if hasattr(agent, "name"):
agent_name = agent.name
console.print(f"[bold green][bold magenta]@{agent_name}[/bold magenta] will help you, be patient...[/bold green]")
messages.append({"role": "user", "content": query})
response = client.run(agent, messages, context_variables, debug=debug)
messages.extend(response.messages)
model_answer_raw = response.messages[-1]['content']
# attempt to parse model_answer
if model_answer_raw.startswith('Case resolved'):
model_answer = re.findall(r'<solution>(.*?)</solution>', model_answer_raw, re.DOTALL)
if len(model_answer) == 0:
model_answer = model_answer_raw
else:
model_answer = model_answer[0]
else:
model_answer = model_answer_raw
console.print(f"[bold green][bold magenta]@{agent_name}[/bold magenta] has finished with the response:\n[/bold green] [bold blue]{model_answer}[/bold blue]")
agent = response.agent
elif agent == "select":
code_env: DockerEnv = context_variables["code_env"]
local_workplace = code_env.local_workplace
files_dir = os.path.join(local_workplace, "files")
os.makedirs(files_dir, exist_ok=True)
select_and_copy_files(files_dir, console)
else:
console.print(f"[bold red]Unknown agent: {agent}[/bold red]")
def tool_to_table(tool_dict: dict):
table = Table(title="Tool List", show_lines=True)
table.add_column("Tool Name")
table.add_column("Description")
for tool_name in tool_dict.keys():
if tool_name == "tool_dummy":
continue
table.add_row(tool_name, tool_dict[tool_name]["docstring"])
return table
def agent_to_table(agent_dict: dict):
table = Table(title="Agent List", show_lines=True)
table.add_column("Agent Name")
table.add_column("Description")
for agent_name in agent_dict.keys():
if agent_name == "get_dummy_agent":
continue
table.add_row(agent_name, agent_dict[agent_name]["docstring"])
return table
def update_guidance(context_variables):
tool_dict = json.loads(list_tools(context_variables))
# print(tool_dict)
tool_table = tool_to_table(tool_dict)
agent_dict = json.loads(list_agents(context_variables))
agent_table = agent_to_table(agent_dict)
console = Console()
columns = Columns([tool_table, agent_table])
# print the logo
logo_text = Text(MC_LOGO, justify="center")
console.print(Panel(logo_text, style="bold salmon1", expand=True))
console.print(version_table)
console.print(Panel(NOTES,title="Important Notes", expand=True))
def main(args):
print('\033[s\033[?25l', end='') # Save cursor position and hide cursor
with Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
transient=True # 这会让进度条完成后消失
) as progress:
task = progress.add_task("[cyan]Initializing...", total=None)
progress.update(task, description="[cyan]Initializing config...[/cyan]\n")
docker_config = get_config(args)
progress.update(task, description="[cyan]Setting up logger...[/cyan]\n")
log_path = osp.join("casestudy_results", 'logs', f'agent_{args.container_name}_{args.model}.log')
LoggerManager.set_logger(MetaChainLogger(log_path = None))
progress.update(task, description="[cyan]Creating environment...[/cyan]\n")
code_env, web_env, file_env = create_environment(docker_config)
progress.update(task, description="[cyan]Setting up metachain...[/cyan]\n")
setup_metachain(workplace_name=docker_config.workplace_name, env=code_env)
clear_screen()
# console = Console()
# console.clear()
# print('\033[H\033[J') # ANSI 转义序列清屏
# print('\033[3J\033[H\033[2J')
# clear_screen()
context_variables = {"working_dir": docker_config.workplace_name, "code_env": code_env, "web_env": web_env, "file_env": file_env}
# select the mode
while True:
update_guidance(context_variables)
mode = single_select_menu(['user mode', 'agent editor', 'workflow editor', 'exit'], "Please select the mode:")
match mode:
case 'user mode':
clear_screen()
user_mode(args.model, context_variables, args.debug)
case 'agent editor':
clear_screen()
meta_agent(args.model, context_variables, args.debug)
case 'workflow editor':
clear_screen()
meta_workflow(args.model, context_variables, args.debug)
case 'exit':
console = Console()
logo_text = Text(GOODBYE_LOGO, justify="center")
console.print(Panel(logo_text, style="bold salmon1", expand=True))
break
if __name__ == "__main__":
args = get_args()
main(args)

View File

@@ -1,15 +0,0 @@
current_dir=$(dirname "$(readlink -f "$0")")
cd $current_dir
cd ../..
export DOCKER_WORKPLACE_NAME=workplace
export EVAL_MODE=True
export BASE_IMAGES=tjbtech1/gaia-bookworm:v2
export COMPLETION_MODEL=claude-3-5-haiku-20241022
# export COMPLETION_MODEL=gpt-4o-2024-08-06
# export API_BASE_URL = "http://localhost:8000/v1" # if vllm
export DEBUG=False
export MC_MODE=True
export AI_USER=tjb-tech
python playground/cli/metachain_cli.py --container_name quick_start --model ${COMPLETION_MODEL} --test_pull_name mirror_branch_0207 --debug --port 12345 --git_clone

View File

@@ -1,5 +1,5 @@
[metadata]
name = metachain
name = autoagent
version = 0.1.0
author = jiabintang
description = dynamic agentic framework.
@@ -10,8 +10,7 @@ license = MIT
[options]
package_dir =
= .
packages =
metachain
packages = find_namespace:
zip_safe = True
include_package_data = True
install_requires =
@@ -64,12 +63,16 @@ install_requires =
moviepy
faster_whisper
sentence_transformers
[options.packages.find]
where = .
include = autoagent*
python_requires = >=3.10
[options.entry_points]
console_scripts =
mc = metachain.cli:cli
auto = autoagent.cli:cli
[tool.autopep8]
max_line_length = 120
ignore = E501,W6