update langchain examples

This commit is contained in:
dexhorthy
2025-01-19 18:10:40 -08:00
parent ed8e894890
commit 745961dd70
4 changed files with 81 additions and 57 deletions

View File

@@ -3,40 +3,53 @@
# with no approvals required
#
from dotenv import load_dotenv
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from langchain.agents import AgentType, initialize_agent
from langchain.tools import tool
import langchain.tools
from langchain.agents import AgentExecutor, create_tool_calling_agent
load_dotenv()
@tool
def add(x: int, y: int) -> int:
"""Add two numbers together."""
return x + y
@tool
def multiply(x: int, y: int) -> int:
"""multiply two numbers"""
return x * y
tools = [add.as_tool(), multiply.as_tool()]
tools = [
langchain.tools.StructuredTool.from_function(add),
langchain.tools.StructuredTool.from_function(multiply),
]
llm = ChatOpenAI(model="gpt-4o", temperature=0)
agent = initialize_agent(
tools,
llm,
agent=AgentType.OPENAI_FUNCTIONS,
verbose=True,
handle_parsing_errors=True,
# Prompt for creating Tool Calling Agent
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are a helpful assistant.",
),
("placeholder", "{chat_history}"),
("human", "{input}"),
("placeholder", "{agent_scratchpad}"),
]
)
# Construct the Tool Calling Agent
agent = create_tool_calling_agent(llm, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
def main():
result = agent.run("multiply 2 and 5, then add 32 to the result")
def main() -> None:
result = agent_executor.invoke({"input": "multiply 2 and 5, then add 32 to the result"})
print("\n\n----------Result----------\n\n")
print(result)

View File

@@ -5,11 +5,12 @@
# use local / CLI mode
#
from dotenv import load_dotenv
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
import langchain.tools
from humanlayer import HumanLayer
from langchain.agents import AgentType, initialize_agent
from langchain.tools import tool
from langchain.agents import AgentExecutor, create_tool_calling_agent
load_dotenv()
@@ -20,33 +21,43 @@ hl = HumanLayer(
)
@tool
def add(x: int, y: int) -> int:
"""Add two numbers together."""
return x + y
@tool
@hl.require_approval()
def multiply(x: int, y: int) -> int:
"""multiply two numbers"""
return x * y
tools = [add.as_tool(), multiply.as_tool()]
tools = [
langchain.tools.StructuredTool.from_function(add),
langchain.tools.StructuredTool.from_function(multiply),
]
llm = ChatOpenAI(model="gpt-4o", temperature=0)
agent = initialize_agent(
tools,
llm,
agent=AgentType.OPENAI_FUNCTIONS,
verbose=True,
handle_parsing_errors=True,
llm = ChatOpenAI(model="gpt-4", temperature=0)
# Prompt for creating Tool Calling Agent
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are a helpful assistant.",
),
("placeholder", "{chat_history}"),
("human", "{input}"),
("placeholder", "{agent_scratchpad}"),
]
)
# Construct the Tool Calling Agent
agent = create_tool_calling_agent(llm, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
def main():
result = agent.run("multiply 2 and 5, then add 32 to the result")
def main() -> None:
result = agent_executor.invoke({"input": "multiply 2 and 5, then add 32 to the result"})
print("\n\n----------Result----------\n\n")
print(result)

View File

@@ -5,10 +5,11 @@
#
from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
from langchain.agents import AgentExecutor, create_tool_calling_agent
from langchain_core.prompts import ChatPromptTemplate
import langchain.tools
from humanlayer.core.approval import HumanLayer
from langchain.agents import AgentType, initialize_agent
from langchain.tools import tool
load_dotenv()
@@ -19,33 +20,45 @@ hl = HumanLayer(
)
@tool
def add(x: int, y: int) -> int:
"""Add two numbers together."""
return x + y
@tool
@hl.require_approval()
def multiply(x: int, y: int) -> int:
"""multiply two numbers"""
return x * y
tools = [add.as_tool(), multiply.as_tool()]
tools = [
langchain.tools.StructuredTool.from_function(add),
langchain.tools.StructuredTool.from_function(multiply),
]
llm = ChatOpenAI(model="gpt-4o", temperature=0)
agent = initialize_agent(
tools,
llm,
agent=AgentType.OPENAI_FUNCTIONS,
verbose=True,
handle_parsing_errors=True,
# Prompt for creating Tool Calling Agent
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are a helpful assistant.",
),
("placeholder", "{chat_history}"),
("human", "{input}"),
("placeholder", "{agent_scratchpad}"),
]
)
# Construct the Tool Calling Agent
agent = create_tool_calling_agent(llm, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
def main():
result = agent.run("multiply 2 and 5, then add 32 to the result")
def main() -> None:
result = agent_executor.invoke({"input": "multiply 2 and 5, then add 32 to the result"})
print("\n\n----------Result----------\n\n")
print(result)

View File

@@ -22,21 +22,6 @@
"typescript": "^5.5.2"
}
},
"../../humanlayer-ts": {
"name": "humanlayer",
"version": "0.7.1",
"license": "Apache-2.0",
"devDependencies": {
"@types/jest": "^29.5.14",
"@types/node": "^20.17.11",
"eslint": "^8.57.0",
"jest": "^29.7.0",
"prettier": "^3.3.2",
"ts-jest": "^29.1.5",
"ts-node": "^10.9.2",
"typescript": "^5.5.2"
}
},
"node_modules/@cspotcode/source-map-support": {
"version": "0.8.1",
"resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz",
@@ -1249,8 +1234,10 @@
}
},
"node_modules/humanlayer": {
"resolved": "../../humanlayer-ts",
"link": true
"version": "0.7.1",
"resolved": "https://registry.npmjs.org/humanlayer/-/humanlayer-0.7.1.tgz",
"integrity": "sha512-/TffEouVEGxXBNcxC3KNMsoEsIBgUnVQzInhKRKvKLbVtbF3dJjJOVF4tojlLsUOu9vy/RDs7U3x8dtE/t4teQ==",
"license": "Apache-2.0"
},
"node_modules/iconv-lite": {
"version": "0.4.24",