Merge pull request #730 from k-w-lee/improve-examples-ui

Improve UI Examples
This commit is contained in:
Magnus Müller
2025-02-15 07:30:40 -08:00
committed by GitHub
4 changed files with 159 additions and 45 deletions

7
examples/ui/README.md Normal file
View File

@@ -0,0 +1,7 @@
# **User Interfaces of Browser-Use**
| **File Name** | **User Interface** | **Description** | **Example Usage** |
|------------------------|-------------------|-------------------------------------------|-------------------------------------------|
| `command_line.py` | **Terminal** | Parses arguments for command-line execution. | `python command_line.py` |
| `gradio_demo.py` | **Gradio** | Provides a Gradio-based interactive UI. | `python gradio_demo.py` |
| `streamlit_demo.py` | **Streamlit** | Runs a Streamlit-based web interface. | `python -m streamlit run streamlit_demo.py` |

View File

@@ -1,72 +1,97 @@
"""
To Use It:
Example 1: Using OpenAI (default), with default task: 'go to reddit and search for posts about browser-use'
python command_line.py
Example 2: Using OpenAI with a Custom Query
python command_line.py --query "go to google and search for browser-use"
Example 3: Using Anthropic's Claude Model with a Custom Query
python command_line.py --query "find latest Python tutorials on Medium" --provider anthropic
"""
import os
import sys
from langchain_anthropic import ChatAnthropic
from langchain_openai import ChatOpenAI
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import argparse
import asyncio
# Ensure local repository (browser_use) is accessible
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from dotenv import load_dotenv
from browser_use import Agent
from browser_use.browser.browser import Browser, BrowserConfig
from browser_use.controller.service import Controller
load_dotenv()
def get_llm(provider: str):
if provider == 'anthropic':
from langchain_anthropic import ChatAnthropic
api_key = os.getenv("ANTHROPIC_API_KEY")
if not api_key:
raise ValueError("Error: ANTHROPIC_API_KEY is not set. Please provide a valid API key.")
return ChatAnthropic(
model_name='claude-3-5-sonnet-20240620', timeout=25, stop=None, temperature=0.0
)
elif provider == 'openai':
from langchain_openai import ChatOpenAI
api_key = os.getenv("OPENAI_API_KEY")
if not api_key:
raise ValueError("Error: OPENAI_API_KEY is not set. Please provide a valid API key.")
return ChatOpenAI(model='gpt-4o', temperature=0.0)
else:
raise ValueError(f'Unsupported provider: {provider}')
def parse_arguments():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser(description="Automate browser tasks using an LLM agent.")
parser.add_argument(
'--query',
type=str,
help='The query to process',
default='go to reddit and search for posts about browser-use'
)
parser.add_argument(
'--provider',
type=str,
choices=['openai', 'anthropic'],
default='openai',
help='The model provider to use (default: openai)',
)
return parser.parse_args()
task = 'go to reddit and search for post about brower-use '
controller = Controller()
parser = argparse.ArgumentParser()
parser.add_argument('--query', type=str, help='The query to process', default=task)
parser.add_argument(
'--provider',
type=str,
choices=['openai', 'anthropic'],
default='openai',
help='The model provider to use (default: openai)',
)
args = parser.parse_args()
llm = get_llm(args.provider)
browser = Browser(
config=BrowserConfig(
# chrome_instance_path='/Applications/Google Chrome.app/Contents/MacOS/Google Chrome',
)
)
agent = Agent(
task=args.query,
llm=llm,
controller=controller,
browser=browser,
use_vision=True,
max_actions_per_step=1,
)
def initialize_agent(query: str, provider: str):
"""Initialize the browser agent with the given query and provider."""
llm = get_llm(provider)
controller = Controller()
browser = Browser(config=BrowserConfig())
return Agent(
task=query,
llm=llm,
controller=controller,
browser=browser,
use_vision=True,
max_actions_per_step=1,
), browser
async def main():
await agent.run(max_steps=25)
"""Main async function to run the agent."""
args = parse_arguments()
agent, browser = initialize_agent(args.query, args.provider)
input('Press Enter to close the browser...')
await browser.close()
await agent.run(max_steps=25)
input('Press Enter to close the browser...')
await browser.close()
asyncio.run(main())
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -1,8 +1,9 @@
import asyncio
import os
import asyncio
from dataclasses import dataclass
from typing import List, Optional
# Third-party imports
import gradio as gr
from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
@@ -10,6 +11,7 @@ from rich.console import Console
from rich.panel import Panel
from rich.text import Text
# Local module imports
from browser_use import Agent
load_dotenv()

View File

@@ -0,0 +1,80 @@
"""
To use it, you'll need to install streamlit, and run with:
python -m streamlit run streamlit_demo.py
"""
import os
import sys
import asyncio
import streamlit as st
from dotenv import load_dotenv
# Ensure local repository (browser_use) is accessible
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from browser_use import Agent
from browser_use.browser.browser import Browser, BrowserConfig
from browser_use.controller.service import Controller
# Load environment variables
load_dotenv()
# Function to get the LLM based on provider
def get_llm(provider: str):
if provider == 'anthropic':
from langchain_anthropic import ChatAnthropic
api_key = os.getenv("ANTHROPIC_API_KEY")
if not api_key:
st.error("Error: ANTHROPIC_API_KEY is not set. Please provide a valid API key.")
st.stop()
return ChatAnthropic(
model_name='claude-3-5-sonnet-20240620', timeout=25, stop=None, temperature=0.0
)
elif provider == 'openai':
from langchain_openai import ChatOpenAI
api_key = os.getenv("OPENAI_API_KEY")
if not api_key:
st.error("Error: OPENAI_API_KEY is not set. Please provide a valid API key.")
st.stop()
return ChatOpenAI(model='gpt-4o', temperature=0.0)
else:
st.error(f'Unsupported provider: {provider}')
st.stop()
# Function to initialize the agent
def initialize_agent(query: str, provider: str):
llm = get_llm(provider)
controller = Controller()
browser = Browser(config=BrowserConfig())
return Agent(
task=query,
llm=llm,
controller=controller,
browser=browser,
use_vision=True,
max_actions_per_step=1,
), browser
# Streamlit UI
st.title("Automated Browser Agent with LLMs 🤖")
query = st.text_input("Enter your query:", "go to reddit and search for posts about browser-use")
provider = st.radio("Select LLM Provider:", ["openai", "anthropic"], index=0)
if st.button("Run Agent"):
st.write("Initializing agent...")
agent, browser = initialize_agent(query, provider)
async def run_agent():
with st.spinner("Running automation..."):
await agent.run(max_steps=25)
st.success("Task completed! 🎉")
asyncio.run(run_agent())
st.button("Close Browser", on_click=lambda: asyncio.run(browser.close()))