mirror of
https://github.com/humanlayer/humanlayer.git
synced 2025-08-20 19:01:22 +03:00
thanks codebuff
This commit is contained in:
43
Makefile
43
Makefile
@@ -320,11 +320,11 @@ release-rc: _check-uv-publish-token release-plan
|
||||
: confirming release plan
|
||||
@read -p "Press Enter to continue..."
|
||||
@echo "Releasing..."
|
||||
# cd humanlayer-ts && npm run build && npm publish
|
||||
# @$(MAKE) update-examples-ts-versions VERSION=$(current-ts-version)
|
||||
# @$(MAKE) smoke-test-examples-ts
|
||||
# @$(MAKE) build-and-publish
|
||||
# @$(MAKE) update-examples-versions VERSION=$(current-py-version)
|
||||
cd humanlayer-ts && npm run build && npm publish
|
||||
@$(MAKE) update-examples-ts-versions VERSION=$(current-ts-version)
|
||||
@$(MAKE) smoke-test-examples-ts
|
||||
@$(MAKE) build-and-publish
|
||||
@$(MAKE) update-examples-versions VERSION=$(current-py-version)
|
||||
@$(MAKE) smoke-test-examples-py
|
||||
|
||||
@echo "RC tested against staging, to proceed, update env vars to point at production"
|
||||
@@ -340,20 +340,20 @@ release-rc: _check-uv-publish-token release-plan
|
||||
release-and-test-prod: _release-plan-versions _release-branch-check _production-env-check
|
||||
@echo "Releasing..."
|
||||
@echo "Publish TypeScript:"
|
||||
#sed -i '' 's/$(current-ts-version)/$(new-version)/' humanlayer-ts/package.json
|
||||
#cat humanlayer-ts/package.json | grep version
|
||||
#@read -p "Press Enter to continue..."
|
||||
#cd humanlayer-ts && npm publish
|
||||
#@$(MAKE) update-examples-ts-versions VERSION=$(new-version)
|
||||
#@$(MAKE) smoke-test-examples-ts
|
||||
sed -i '' 's/$(current-ts-version)/$(new-version)/' humanlayer-ts/package.json
|
||||
cat humanlayer-ts/package.json | grep version
|
||||
@read -p "Press Enter to continue..."
|
||||
cd humanlayer-ts && npm publish
|
||||
@$(MAKE) update-examples-ts-versions VERSION=$(new-version)
|
||||
@$(MAKE) smoke-test-examples-ts
|
||||
|
||||
#@echo "Publish Python:"
|
||||
#sed -i '' 's/$(current-py-version)/$(new-version)/' pyproject.toml
|
||||
#cat pyproject.toml | grep version
|
||||
#@read -p "Press Enter to continue..."
|
||||
#@$(MAKE) build-and-publish
|
||||
#@$(MAKE) update-examples-versions VERSION=$(new-version)
|
||||
#@$(MAKE) smoke-test-examples-py
|
||||
@echo "Publish Python:"
|
||||
sed -i '' 's/$(current-py-version)/$(new-version)/' pyproject.toml
|
||||
cat pyproject.toml | grep version
|
||||
@read -p "Press Enter to continue..."
|
||||
@$(MAKE) build-and-publish
|
||||
@$(MAKE) update-examples-versions VERSION=$(new-version)
|
||||
@$(MAKE) smoke-test-examples-py
|
||||
|
||||
@echo "Finalize:"
|
||||
#git commit -am 'release: v$(current-ts-version)' && git push upstream release-$(new-version)
|
||||
@@ -367,3 +367,10 @@ release-and-test-prod: _release-plan-versions _release-branch-check _production-
|
||||
git push upstream release-$(new-version)
|
||||
|
||||
hub compare
|
||||
|
||||
.PHONY: check-local
|
||||
check-local:
|
||||
@if [[ $$(git rev-parse --abbrev-ref HEAD) == local/* ]]; then \
|
||||
echo "blocking push of local branch"; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
@@ -56,6 +56,7 @@ prompt = ChatPromptTemplate.from_messages(
|
||||
agent = create_tool_calling_agent(llm, tools, prompt)
|
||||
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
result = agent_executor.invoke({"input": "multiply 2 and 5, then add 32 to the result"})
|
||||
print("\n\n----------Result----------\n\n")
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
from dotenv import load_dotenv
|
||||
from langchain.agents import AgentType, initialize_agent
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain.tools import StructuredTool
|
||||
import langchain.tools
|
||||
from langchain.agents import AgentExecutor, create_tool_calling_agent
|
||||
|
||||
from humanlayer import ContactChannel, HumanLayer, SlackContactChannel
|
||||
|
||||
@@ -43,19 +44,30 @@ def multiply(a: float, b: float) -> float:
|
||||
|
||||
|
||||
tools = [
|
||||
StructuredTool.from_function(add),
|
||||
StructuredTool.from_function(multiply),
|
||||
langchain.tools.StructuredTool.from_function(add),
|
||||
langchain.tools.StructuredTool.from_function(multiply),
|
||||
]
|
||||
|
||||
llm = ChatOpenAI(model="gpt-4", temperature=0)
|
||||
agent = initialize_agent(
|
||||
tools=tools,
|
||||
llm=llm,
|
||||
agent=AgentType.OPENAI_FUNCTIONS,
|
||||
verbose=True,
|
||||
|
||||
# Prompt for creating Tool Calling Agent
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
(
|
||||
"system",
|
||||
"You are a helpful assistant.",
|
||||
),
|
||||
("placeholder", "{chat_history}"),
|
||||
("human", "{input}"),
|
||||
("placeholder", "{agent_scratchpad}"),
|
||||
]
|
||||
)
|
||||
|
||||
# Construct the Tool Calling Agent
|
||||
agent = create_tool_calling_agent(llm, tools, prompt)
|
||||
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
|
||||
|
||||
if __name__ == "__main__":
|
||||
result = agent.run("What is (4 + 5) * 3?")
|
||||
result = agent_executor.invoke({"input": "What is (4 + 5) * 3?"})
|
||||
print("\n\n----------Result----------\n\n")
|
||||
print(result)
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
from humanlayer import ContactChannel, SlackContactChannel
|
||||
from langchain.agents import AgentType, initialize_agent
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
import langchain_core.tools as langchain_tools
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain.agents import AgentExecutor, create_tool_calling_agent
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
|
||||
from humanlayer.core.approval import HumanLayer
|
||||
|
||||
load_dotenv()
|
||||
@@ -23,14 +23,12 @@ hl = HumanLayer(
|
||||
)
|
||||
|
||||
task_prompt = """
|
||||
|
||||
You are the email onboarding assistant. You check on the progress customers
|
||||
are making and then based on that info, you
|
||||
send friendly and encouraging emails to customers to help them fully onboard
|
||||
into the product.
|
||||
|
||||
Your task is to send an email to the customer danny@example.com
|
||||
|
||||
"""
|
||||
|
||||
|
||||
@@ -56,15 +54,25 @@ tools = [
|
||||
]
|
||||
|
||||
llm = ChatOpenAI(model="gpt-4o", temperature=0)
|
||||
agent = initialize_agent(
|
||||
tools=tools,
|
||||
llm=llm,
|
||||
agent=AgentType.OPENAI_FUNCTIONS,
|
||||
verbose=True,
|
||||
handle_parsing_errors=True,
|
||||
|
||||
# Prompt for creating Tool Calling Agent
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
(
|
||||
"system",
|
||||
"You are a helpful assistant.",
|
||||
),
|
||||
("placeholder", "{chat_history}"),
|
||||
("human", "{input}"),
|
||||
("placeholder", "{agent_scratchpad}"),
|
||||
]
|
||||
)
|
||||
|
||||
# Construct the Tool Calling Agent
|
||||
agent = create_tool_calling_agent(llm, tools, prompt)
|
||||
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
|
||||
|
||||
if __name__ == "__main__":
|
||||
result = agent.run(task_prompt)
|
||||
result = agent_executor.invoke({"input": task_prompt})
|
||||
print("\n\n----------Result----------\n\n")
|
||||
print(result)
|
||||
|
||||
@@ -2,8 +2,9 @@ from datetime import datetime
|
||||
|
||||
import langchain_core.tools as langchain_tools
|
||||
from dotenv import load_dotenv
|
||||
from langchain.agents import AgentType, initialize_agent
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain.agents import AgentExecutor, create_tool_calling_agent
|
||||
from pydantic import BaseModel
|
||||
|
||||
from channels import (
|
||||
@@ -22,7 +23,6 @@ hl = HumanLayer(
|
||||
)
|
||||
|
||||
task_prompt = """
|
||||
|
||||
You are the linkedin inbox assistant. You check on
|
||||
the CEO's linkedin inbox and decide if there are any messages
|
||||
that seem interesting, then contact the human in slack with a summary.
|
||||
@@ -30,18 +30,7 @@ that seem interesting, then contact the human in slack with a summary.
|
||||
don't provide detail on spam-looking messages, or messages
|
||||
that appear to be selling a service or software
|
||||
|
||||
You can offer to perform actions like schedule time.
|
||||
|
||||
Example slack dm to send:
|
||||
|
||||
Your inbox for today includes 4 spam messages,
|
||||
and 1 message from Devin who seems interested in your
|
||||
product - [here's the link](https://linkedin.com/in/devin).
|
||||
|
||||
Terri has still not responded to your question about scheduling an onboarding call.
|
||||
|
||||
Would you like me to respond to Devin with your availability?
|
||||
|
||||
You can offer to perform actions like schedule time.
|
||||
"""
|
||||
|
||||
|
||||
@@ -130,15 +119,25 @@ tools = [
|
||||
]
|
||||
|
||||
llm = ChatOpenAI(model="gpt-4o", temperature=0)
|
||||
agent = initialize_agent(
|
||||
tools=tools,
|
||||
llm=llm,
|
||||
agent=AgentType.OPENAI_FUNCTIONS,
|
||||
verbose=True,
|
||||
handle_parsing_errors=True,
|
||||
|
||||
# Prompt for creating Tool Calling Agent
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
(
|
||||
"system",
|
||||
"You are a helpful assistant.",
|
||||
),
|
||||
("placeholder", "{chat_history}"),
|
||||
("human", "{input}"),
|
||||
("placeholder", "{agent_scratchpad}"),
|
||||
]
|
||||
)
|
||||
|
||||
# Construct the Tool Calling Agent
|
||||
agent = create_tool_calling_agent(llm, tools, prompt)
|
||||
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
|
||||
|
||||
if __name__ == "__main__":
|
||||
result = agent.run(task_prompt)
|
||||
result = agent_executor.invoke({"input": task_prompt})
|
||||
print("\n\n----------Result----------\n\n")
|
||||
print(result)
|
||||
|
||||
@@ -5,12 +5,13 @@ from channels import (
|
||||
dm_with_summer_intern,
|
||||
)
|
||||
from dotenv import load_dotenv
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain.agents import AgentExecutor, create_tool_calling_agent
|
||||
|
||||
from humanlayer.core.approval import (
|
||||
HumanLayer,
|
||||
)
|
||||
from langchain.agents import AgentType, initialize_agent
|
||||
|
||||
load_dotenv()
|
||||
|
||||
@@ -21,7 +22,6 @@ hl = HumanLayer(
|
||||
)
|
||||
|
||||
task_prompt = """
|
||||
|
||||
You are the email onboarding assistant. You check on the progress customers
|
||||
are making and get other information, then based on that info, you
|
||||
send friendly and encouraging emails to customers to help them fully onboard
|
||||
@@ -32,7 +32,6 @@ and incorporate that feedback into your email before sending. You repeat the
|
||||
feedback process until the head of marketing approves the request
|
||||
|
||||
Your task is to prepare an email to send to the customer danny@metacorp.com
|
||||
|
||||
"""
|
||||
|
||||
|
||||
@@ -70,15 +69,25 @@ tools = [
|
||||
]
|
||||
|
||||
llm = ChatOpenAI(model="gpt-4o", temperature=0)
|
||||
agent = initialize_agent(
|
||||
tools=tools,
|
||||
llm=llm,
|
||||
agent=AgentType.OPENAI_FUNCTIONS,
|
||||
verbose=True,
|
||||
handle_parsing_errors=True,
|
||||
|
||||
# Prompt for creating Tool Calling Agent
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
(
|
||||
"system",
|
||||
"You are a helpful assistant.",
|
||||
),
|
||||
("placeholder", "{chat_history}"),
|
||||
("human", "{input}"),
|
||||
("placeholder", "{agent_scratchpad}"),
|
||||
]
|
||||
)
|
||||
|
||||
# Construct the Tool Calling Agent
|
||||
agent = create_tool_calling_agent(llm, tools, prompt)
|
||||
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
|
||||
|
||||
if __name__ == "__main__":
|
||||
result = agent.run(task_prompt)
|
||||
result = agent_executor.invoke({"input": task_prompt})
|
||||
print("\n\n----------Result----------\n\n")
|
||||
print(result)
|
||||
|
||||
@@ -1,28 +1,12 @@
|
||||
"""
|
||||
the summer marketing intern wrote an onboarding assistant
|
||||
to keep up to date with customers by emailing
|
||||
them suggestions.
|
||||
|
||||
they want the agent to collaborate with their boss, the head of
|
||||
marketing to ensure emails are well-written and likely to
|
||||
achieve the desired outcome.
|
||||
|
||||
The intern doesn't want the agent to annoy the head of marketing
|
||||
or ask questions that don't make sense, so they
|
||||
wrap the "contact head of marketing" tool in an
|
||||
approval requirement, so they can review any messages that would
|
||||
be sent to the head of marketing.
|
||||
|
||||
"""
|
||||
|
||||
import langchain_core.tools as langchain_tools
|
||||
from dotenv import load_dotenv
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain.agents import AgentExecutor, create_tool_calling_agent
|
||||
|
||||
from humanlayer import (
|
||||
HumanLayer,
|
||||
)
|
||||
from langchain.agents import AgentType, initialize_agent
|
||||
|
||||
from channels import (
|
||||
dm_with_head_of_marketing,
|
||||
@@ -38,7 +22,6 @@ hl = HumanLayer(
|
||||
)
|
||||
|
||||
task_prompt = """
|
||||
|
||||
You are the email onboarding assistant. You check on the progress customers
|
||||
are making and get other information, then based on that info, you
|
||||
send friendly and encouraging emails to customers to help them
|
||||
@@ -48,7 +31,6 @@ and incorporate that feedback into your email before sending. You repeat the
|
||||
feedback process until the head of marketing approves the request
|
||||
|
||||
Your task is to prepare an email to send to the customer danny@metacorp.com
|
||||
|
||||
"""
|
||||
|
||||
|
||||
@@ -78,15 +60,25 @@ tools = [
|
||||
]
|
||||
|
||||
llm = ChatOpenAI(model="gpt-4o", temperature=0)
|
||||
agent = initialize_agent(
|
||||
tools=tools,
|
||||
llm=llm,
|
||||
agent=AgentType.OPENAI_FUNCTIONS,
|
||||
verbose=True,
|
||||
handle_parsing_errors=True,
|
||||
|
||||
# Prompt for creating Tool Calling Agent
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
(
|
||||
"system",
|
||||
"You are a helpful assistant.",
|
||||
),
|
||||
("placeholder", "{chat_history}"),
|
||||
("human", "{input}"),
|
||||
("placeholder", "{agent_scratchpad}"),
|
||||
]
|
||||
)
|
||||
|
||||
# Construct the Tool Calling Agent
|
||||
agent = create_tool_calling_agent(llm, tools, prompt)
|
||||
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
|
||||
|
||||
if __name__ == "__main__":
|
||||
result = agent.run(task_prompt)
|
||||
result = agent_executor.invoke({"input": task_prompt})
|
||||
print("\n\n----------Result----------\n\n")
|
||||
print(result)
|
||||
|
||||
@@ -3,8 +3,9 @@ from typing import Any
|
||||
|
||||
import langchain_core.tools as langchain_tools
|
||||
from dotenv import load_dotenv
|
||||
from langchain.agents import AgentType, initialize_agent
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain.agents import AgentExecutor, create_tool_calling_agent
|
||||
from pydantic import BaseModel
|
||||
|
||||
from channels import (
|
||||
@@ -24,7 +25,6 @@ hl = HumanLayer(
|
||||
)
|
||||
|
||||
task_prompt = """
|
||||
|
||||
You are the mealprep power assistant.
|
||||
|
||||
You are responsible for planning the meals and shopping
|
||||
@@ -36,7 +36,6 @@ i like tacos and sushi, but i'm open to new ideas.
|
||||
I like to eat healthy, and I'm trying to lose weight.
|
||||
|
||||
Make the best decision and order the groceries. Don't confirm with me.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
@@ -80,15 +79,25 @@ tools = [
|
||||
]
|
||||
|
||||
llm = ChatOpenAI(model="gpt-4o", temperature=0)
|
||||
agent = initialize_agent(
|
||||
tools=tools,
|
||||
llm=llm,
|
||||
agent=AgentType.OPENAI_FUNCTIONS,
|
||||
verbose=True,
|
||||
handle_parsing_errors=True,
|
||||
|
||||
# Prompt for creating Tool Calling Agent
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
(
|
||||
"system",
|
||||
"You are a helpful assistant.",
|
||||
),
|
||||
("placeholder", "{chat_history}"),
|
||||
("human", "{input}"),
|
||||
("placeholder", "{agent_scratchpad}"),
|
||||
]
|
||||
)
|
||||
|
||||
# Construct the Tool Calling Agent
|
||||
agent = create_tool_calling_agent(llm, tools, prompt)
|
||||
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
|
||||
|
||||
if __name__ == "__main__":
|
||||
result = agent.run(task_prompt)
|
||||
result = agent_executor.invoke({"input": task_prompt})
|
||||
print("\n\n----------Result----------\n\n")
|
||||
print(result)
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
from humanlayer import ContactChannel, EmailContactChannel
|
||||
from langchain.agents import AgentType, initialize_agent
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
import langchain_core.tools as langchain_tools
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain.agents import AgentExecutor, create_tool_calling_agent
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
@@ -91,15 +92,25 @@ tools = [
|
||||
]
|
||||
|
||||
llm = ChatOpenAI(model="gpt-4o", temperature=0)
|
||||
agent = initialize_agent(
|
||||
tools=tools,
|
||||
llm=llm,
|
||||
agent=AgentType.OPENAI_FUNCTIONS,
|
||||
verbose=True,
|
||||
handle_parsing_errors=True,
|
||||
|
||||
# Prompt for creating Tool Calling Agent
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
(
|
||||
"system",
|
||||
"You are a helpful assistant.",
|
||||
),
|
||||
("placeholder", "{chat_history}"),
|
||||
("human", "{input}"),
|
||||
("placeholder", "{agent_scratchpad}"),
|
||||
]
|
||||
)
|
||||
|
||||
# Construct the Tool Calling Agent
|
||||
agent = create_tool_calling_agent(llm, tools, prompt)
|
||||
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
|
||||
|
||||
if __name__ == "__main__":
|
||||
result = agent.run(task_prompt)
|
||||
result = agent_executor.invoke({"input": task_prompt})
|
||||
print("\n\n----------Result----------\n\n")
|
||||
print(result)
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
from typing import Any
|
||||
from humanlayer import ContactChannel, EmailContactChannel
|
||||
from langchain.agents import AgentType, initialize_agent
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
import langchain_core.tools as langchain_tools
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain.agents import AgentExecutor, create_tool_calling_agent
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
@@ -61,7 +62,6 @@ Should you require any assistance with the filing process through the California
|
||||
Best regards,
|
||||
Compliance Team
|
||||
Mosey Corporation
|
||||
|
||||
"""
|
||||
|
||||
|
||||
@@ -77,7 +77,6 @@ def get_linear_projects() -> Any:
|
||||
|
||||
def get_linear_assignees() -> Any:
|
||||
"""get all linear assignees"""
|
||||
|
||||
return [
|
||||
{"id": "1", "name": "Austin"},
|
||||
{"id": "2", "name": "Dexter"},
|
||||
@@ -101,15 +100,25 @@ tools = [
|
||||
]
|
||||
|
||||
llm = ChatOpenAI(model="gpt-4o", temperature=0)
|
||||
agent = initialize_agent(
|
||||
tools=tools,
|
||||
llm=llm,
|
||||
agent=AgentType.OPENAI_FUNCTIONS,
|
||||
verbose=True,
|
||||
handle_parsing_errors=True,
|
||||
|
||||
# Prompt for creating Tool Calling Agent
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
(
|
||||
"system",
|
||||
"You are a helpful assistant.",
|
||||
),
|
||||
("placeholder", "{chat_history}"),
|
||||
("human", "{input}"),
|
||||
("placeholder", "{agent_scratchpad}"),
|
||||
]
|
||||
)
|
||||
|
||||
# Construct the Tool Calling Agent
|
||||
agent = create_tool_calling_agent(llm, tools, prompt)
|
||||
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
|
||||
|
||||
if __name__ == "__main__":
|
||||
result = agent.run(task_prompt)
|
||||
result = agent_executor.invoke({"input": task_prompt})
|
||||
print("\n\n----------Result----------\n\n")
|
||||
print(result)
|
||||
|
||||
@@ -39,5 +39,3 @@ In this example, all LLM classifications are computed, and then they are all sen
|
||||
be received by a webhook, and the results will be printed out.
|
||||
|
||||

|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user