Files
OpenHands-swe-agent/config.template.toml
Xingyao Wang d9a8b53bc2 feat: specialize CodeAct into micro agents by providing markdown files (#3511)
* update microagent name and update template.toml

* substitute actual micro_agent_name for prompt manager

* add python-frontmatter

* support micro agent in codeact

* add test cases

* add instruction from require env var

* add draft gh micro agent

* update poetry lock

* update poetry lock
2024-08-28 14:58:16 +00:00

233 lines
5.5 KiB
TOML

###################### OpenHands Configuration Example ######################
#
# All settings have default values, so you only need to uncomment and
# modify what you want to change
# The fields within each section are sorted in alphabetical order.
#
##############################################################################
#################################### Core ####################################
# General core configurations
##############################################################################
[core]
# API key for E2B
#e2b_api_key = ""
# Base path for the workspace
workspace_base = "./workspace"
# Cache directory path
#cache_dir = "/tmp/cache"
# Debugging enabled
#debug = false
# Disable color in terminal output
#disable_color = false
# Enable saving and restoring the session when run from CLI
#enable_cli_session = false
# File store path
#file_store_path = "/tmp/file_store"
# File store type
#file_store = "memory"
# List of allowed file extensions for uploads
#file_uploads_allowed_extensions = [".*"]
# Maximum file size for uploads, in megabytes
#file_uploads_max_file_size_mb = 0
# Maximum budget per task, 0.0 means no limit
#max_budget_per_task = 0.0
# Maximum number of iterations
#max_iterations = 100
# Path to mount the workspace in the sandbox
#workspace_mount_path_in_sandbox = "/workspace"
# Path to mount the workspace
#workspace_mount_path = ""
# Path to rewrite the workspace mount path to
#workspace_mount_rewrite = ""
# Run as openhands
#run_as_openhands = true
# Runtime environment
#runtime = "eventstream"
# Name of the default agent
#default_agent = "CodeActAgent"
# JWT secret for authentication
#jwt_secret = ""
# Restrict file types for file uploads
#file_uploads_restrict_file_types = false
# List of allowed file extensions for uploads
#file_uploads_allowed_extensions = [".*"]
#################################### LLM #####################################
# Configuration for LLM models (group name starts with 'llm')
# use 'llm' for the default LLM config
##############################################################################
[llm]
# AWS access key ID
#aws_access_key_id = ""
# AWS region name
#aws_region_name = ""
# AWS secret access key
#aws_secret_access_key = ""
# API key to use
api_key = "your-api-key"
# API base URL
#base_url = ""
# API version
#api_version = ""
# Cost per input token
#input_cost_per_token = 0.0
# Cost per output token
#output_cost_per_token = 0.0
# Custom LLM provider
#custom_llm_provider = ""
# Embedding API base URL
#embedding_base_url = ""
# Embedding deployment name
#embedding_deployment_name = ""
# Embedding model to use
embedding_model = ""
# Maximum number of characters in an observation's content
#max_message_chars = 10000
# Maximum number of input tokens
#max_input_tokens = 0
# Maximum number of output tokens
#max_output_tokens = 0
# Model to use
model = "gpt-4o"
# Number of retries to attempt
#num_retries = 5
# Retry maximum wait time
#retry_max_wait = 60
# Retry minimum wait time
#retry_min_wait = 3
# Retry multiplier for exponential backoff
#retry_multiplier = 2.0
# Drop any unmapped (unsupported) params without causing an exception
#drop_params = false
# Base URL for the OLLAMA API
#ollama_base_url = ""
# Temperature for the API
#temperature = 0.0
# Timeout for the API
#timeout = 0
# Top p for the API
#top_p = 0.5
[llm.gpt3]
# API key to use
api_key = "your-api-key"
# Model to use
model = "gpt-3.5"
#################################### Agent ###################################
# Configuration for agents (group name starts with 'agent')
# Use 'agent' for the default agent config
# otherwise, group name must be `agent.<agent_name>` (case-sensitive), e.g.
# agent.CodeActAgent
##############################################################################
[agent]
# Name of the micro agent to use for this agent
#micro_agent_name = ""
# Memory enabled
#memory_enabled = false
# Memory maximum threads
#memory_max_threads = 2
# LLM config group to use
#llm_config = 'llm'
[agent.RepoExplorerAgent]
# Example: use a cheaper model for RepoExplorerAgent to reduce cost, especially
# useful when an agent doesn't demand high quality but uses a lot of tokens
llm_config = 'gpt3'
#################################### Sandbox ###################################
# Configuration for the sandbox
##############################################################################
[sandbox]
# Sandbox timeout in seconds
#timeout = 120
# Sandbox user ID
#user_id = 1000
# Container image to use for the sandbox
#base_container_image = "nikolaik/python-nodejs:python3.11-nodejs22"
# Use host network
#use_host_network = false
# Enable auto linting after editing
#enable_auto_lint = false
# Whether to initialize plugins
#initialize_plugins = true
# Extra dependencies to install in the runtime image
#runtime_extra_deps = ""
# Environment variables to set at the launch of the runtime
#runtime_startup_env_vars = {}
# BrowserGym environment to use for evaluation
#browsergym_eval_env = ""
#################################### Security ###################################
# Configuration for security features
##############################################################################
[security]
# Enable confirmation mode
#confirmation_mode = true
# The security analyzer to use
#security_analyzer = ""
#################################### Eval ####################################
# Configuration for the evaluation, please refer to the specific evaluation
# plugin for the available options
##############################################################################