Merge pull request #158 from vvincent1234/feat/deepseek-r1
fix cutting message bug
This commit is contained in:
12
.env.example
12
.env.example
@@ -22,12 +22,16 @@ CHROME_PATH=
|
||||
CHROME_USER_DATA=
|
||||
CHROME_DEBUGGING_PORT=9222
|
||||
CHROME_DEBUGGING_HOST=localhost
|
||||
CHROME_PERSISTENT_SESSION=false # Set to true to keep browser open between AI tasks
|
||||
# Set to true to keep browser open between AI tasks
|
||||
CHROME_PERSISTENT_SESSION=false
|
||||
|
||||
# Display settings
|
||||
RESOLUTION=1920x1080x24 # Format: WIDTHxHEIGHTxDEPTH
|
||||
RESOLUTION_WIDTH=1920 # Width in pixels
|
||||
RESOLUTION_HEIGHT=1080 # Height in pixels
|
||||
# Format: WIDTHxHEIGHTxDEPTH
|
||||
RESOLUTION=1920x1080x24
|
||||
# Width in pixels
|
||||
RESOLUTION_WIDTH=1920
|
||||
# Height in pixels
|
||||
RESOLUTION_HEIGHT=1080
|
||||
|
||||
# VNC settings
|
||||
VNC_PASSWORD=youvncpassword
|
||||
@@ -89,7 +89,8 @@ class CustomAgent(Agent):
|
||||
max_actions_per_step=max_actions_per_step,
|
||||
tool_call_in_content=tool_call_in_content,
|
||||
)
|
||||
if self.llm.model_name in ["deepseek-reasoner"]:
|
||||
if hasattr(self.llm, 'model_name') and self.llm.model_name in ["deepseek-reasoner"]:
|
||||
# deepseek-reasoner does not support function calling
|
||||
self.use_function_calling = False
|
||||
# TODO: deepseek-reasoner only support 64000 context
|
||||
self.max_input_tokens = 64000
|
||||
@@ -242,6 +243,7 @@ class CustomAgent(Agent):
|
||||
model_output.action, self.browser_context
|
||||
)
|
||||
if len(result) != len(model_output.action):
|
||||
# I think something changes, such information should let LLM know
|
||||
for ri in range(len(result), len(model_output.action)):
|
||||
result.append(ActionResult(extracted_content=None,
|
||||
include_in_memory=True,
|
||||
|
||||
@@ -88,11 +88,9 @@ class CustomMassageManager(MessageManager):
|
||||
def cut_messages(self):
|
||||
"""Get current message list, potentially trimmed to max tokens"""
|
||||
diff = self.history.total_tokens - self.max_input_tokens
|
||||
i = 1 # start from 1 to keep system message in history
|
||||
while diff > 0 and i < len(self.history.messages):
|
||||
self.history.remove_message(i)
|
||||
while diff > 0 and len(self.history.messages) > 1:
|
||||
self.history.remove_message(1) # alway remove the oldest one
|
||||
diff = self.history.total_tokens - self.max_input_tokens
|
||||
i += 1
|
||||
|
||||
def add_state_message(
|
||||
self,
|
||||
|
||||
@@ -17,7 +17,7 @@ def default_config():
|
||||
"llm_temperature": 1.0,
|
||||
"llm_base_url": "",
|
||||
"llm_api_key": "",
|
||||
"use_own_browser": os.getenv("CHROME_PERSISTENT_SESSION", False),
|
||||
"use_own_browser": os.getenv("CHROME_PERSISTENT_SESSION", "false").lower() == "true",
|
||||
"keep_browser_open": False,
|
||||
"headless": False,
|
||||
"disable_security": True,
|
||||
|
||||
2
webui.py
2
webui.py
@@ -34,8 +34,6 @@ from gradio.themes import Citrus, Default, Glass, Monochrome, Ocean, Origin, Sof
|
||||
from src.utils.default_config_settings import default_config, load_config_from_file, save_config_to_file, save_current_config, update_ui_from_config
|
||||
from src.utils.utils import update_model_dropdown, get_latest_files, capture_screenshot
|
||||
|
||||
from dotenv import load_dotenv
|
||||
load_dotenv()
|
||||
|
||||
# Global variables for persistence
|
||||
_global_browser = None
|
||||
|
||||
Reference in New Issue
Block a user