mirror of
https://github.com/humanlayer/12-factor-agents.git
synced 2025-08-20 18:59:53 +03:00
add sections
This commit is contained in:
142
workshops/2025-05-17/sections/00-hello-world/README.md
Normal file
142
workshops/2025-05-17/sections/00-hello-world/README.md
Normal file
@@ -0,0 +1,142 @@
|
||||
# Chapter 0 - Hello World
|
||||
|
||||
Let's start with a basic TypeScript setup and a hello world program.
|
||||
|
||||
This guide is written in TypeScript (yes, a python version is coming soon)
|
||||
|
||||
There are many checkpoints between the every file edit in theworkshop steps,
|
||||
so even if you aren't super familiar with typescript,
|
||||
you should be able to keep up and run each example.
|
||||
|
||||
To run this guide, you'll need a relatively recent version of nodejs and npm installed
|
||||
|
||||
You can use whatever nodejs version manager you want, [homebrew](https://formulae.brew.sh/formula/node) is fine
|
||||
|
||||
|
||||
brew install node@20
|
||||
|
||||
You should see the node version
|
||||
|
||||
node --version
|
||||
|
||||
Copy initial package.json
|
||||
|
||||
cp ./walkthrough/00-package.json package.json
|
||||
|
||||
<details>
|
||||
<summary>show file</summary>
|
||||
|
||||
```json
|
||||
// ./walkthrough/00-package.json
|
||||
{
|
||||
"name": "my-agent",
|
||||
"version": "0.1.0",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"dev": "tsx src/index.ts",
|
||||
"build": "tsc"
|
||||
},
|
||||
"dependencies": {
|
||||
"tsx": "^4.15.0",
|
||||
"typescript": "^5.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^20.0.0",
|
||||
"@typescript-eslint/eslint-plugin": "^6.0.0",
|
||||
"@typescript-eslint/parser": "^6.0.0",
|
||||
"eslint": "^8.0.0"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
Install dependencies
|
||||
|
||||
npm install
|
||||
|
||||
Copy tsconfig.json
|
||||
|
||||
cp ./walkthrough/00-tsconfig.json tsconfig.json
|
||||
|
||||
<details>
|
||||
<summary>show file</summary>
|
||||
|
||||
```json
|
||||
// ./walkthrough/00-tsconfig.json
|
||||
{
|
||||
"compilerOptions": {
|
||||
"target": "ES2017",
|
||||
"lib": ["esnext"],
|
||||
"allowJs": true,
|
||||
"skipLibCheck": true,
|
||||
"strict": true,
|
||||
"noEmit": true,
|
||||
"esModuleInterop": true,
|
||||
"module": "esnext",
|
||||
"moduleResolution": "bundler",
|
||||
"resolveJsonModule": true,
|
||||
"isolatedModules": true,
|
||||
"jsx": "preserve",
|
||||
"incremental": true,
|
||||
"plugins": [],
|
||||
"paths": {
|
||||
"@/*": ["./*"]
|
||||
}
|
||||
},
|
||||
"include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"],
|
||||
"exclude": ["node_modules", "walkthrough"]
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
add .gitignore
|
||||
|
||||
cp ./walkthrough/00-.gitignore .gitignore
|
||||
|
||||
<details>
|
||||
<summary>show file</summary>
|
||||
|
||||
```gitignore
|
||||
// ./walkthrough/00-.gitignore
|
||||
baml_client/
|
||||
node_modules/
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
Create src folder
|
||||
|
||||
mkdir -p src
|
||||
|
||||
Add a simple hello world index.ts
|
||||
|
||||
cp ./walkthrough/00-index.ts src/index.ts
|
||||
|
||||
<details>
|
||||
<summary>show file</summary>
|
||||
|
||||
```ts
|
||||
// ./walkthrough/00-index.ts
|
||||
async function hello(): Promise<void> {
|
||||
console.log('hello, world!')
|
||||
}
|
||||
|
||||
async function main() {
|
||||
await hello()
|
||||
}
|
||||
|
||||
main().catch(console.error)
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
Run it to verify
|
||||
|
||||
npx tsx src/index.ts
|
||||
|
||||
You should see:
|
||||
|
||||
hello, world!
|
||||
|
||||
@@ -0,0 +1,2 @@
|
||||
baml_client/
|
||||
node_modules/
|
||||
@@ -0,0 +1,9 @@
|
||||
async function hello(): Promise<void> {
|
||||
console.log('hello, world!')
|
||||
}
|
||||
|
||||
async function main() {
|
||||
await hello()
|
||||
}
|
||||
|
||||
main().catch(console.error)
|
||||
@@ -0,0 +1,20 @@
|
||||
{
|
||||
"name": "my-agent",
|
||||
"version": "0.1.0",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"dev": "tsx src/index.ts",
|
||||
"build": "tsc"
|
||||
},
|
||||
"dependencies": {
|
||||
"tsx": "^4.15.0",
|
||||
"typescript": "^5.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^20.0.0",
|
||||
"@typescript-eslint/eslint-plugin": "^6.0.0",
|
||||
"@typescript-eslint/parser": "^6.0.0",
|
||||
"eslint": "^8.0.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"target": "ES2017",
|
||||
"lib": ["esnext"],
|
||||
"allowJs": true,
|
||||
"skipLibCheck": true,
|
||||
"strict": true,
|
||||
"noEmit": true,
|
||||
"esModuleInterop": true,
|
||||
"module": "esnext",
|
||||
"moduleResolution": "bundler",
|
||||
"resolveJsonModule": true,
|
||||
"isolatedModules": true,
|
||||
"jsx": "preserve",
|
||||
"incremental": true,
|
||||
"plugins": [],
|
||||
"paths": {
|
||||
"@/*": ["./*"]
|
||||
}
|
||||
},
|
||||
"include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"],
|
||||
"exclude": ["node_modules", "walkthrough"]
|
||||
}
|
||||
|
||||
2
workshops/2025-05-17/sections/01-cli-and-agent/.gitignore
vendored
Normal file
2
workshops/2025-05-17/sections/01-cli-and-agent/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
baml_client/
|
||||
node_modules/
|
||||
231
workshops/2025-05-17/sections/01-cli-and-agent/README.md
Normal file
231
workshops/2025-05-17/sections/01-cli-and-agent/README.md
Normal file
@@ -0,0 +1,231 @@
|
||||
# Chapter 1 - CLI and Agent Loop
|
||||
|
||||
Now let's add BAML and create our first agent with a CLI interface.
|
||||
|
||||
First, we'll need to install [BAML](https://github.com/boundaryml/baml)
|
||||
which is a tool for prompting and structured outputs.
|
||||
|
||||
|
||||
npm install @boundaryml/baml
|
||||
|
||||
Initialize BAML
|
||||
|
||||
npx baml-cli init
|
||||
|
||||
Remove default resume.baml
|
||||
|
||||
rm baml_src/resume.baml
|
||||
|
||||
Add our starter agent, a single baml prompt that we'll build on
|
||||
|
||||
cp ./walkthrough/01-agent.baml baml_src/agent.baml
|
||||
|
||||
<details>
|
||||
<summary>show file</summary>
|
||||
|
||||
```rust
|
||||
// ./walkthrough/01-agent.baml
|
||||
class DoneForNow {
|
||||
intent "done_for_now"
|
||||
message string
|
||||
}
|
||||
|
||||
client<llm> Qwen3 {
|
||||
provider "openai-generic"
|
||||
options {
|
||||
base_url env.BASETEN_BASE_URL
|
||||
api_key env.BASETEN_API_KEY
|
||||
}
|
||||
}
|
||||
|
||||
function DetermineNextStep(
|
||||
thread: string
|
||||
) -> DoneForNow {
|
||||
client Qwen3
|
||||
|
||||
// use /nothink for now because the thinking tokens (or streaming thereof) screw with baml (i think (no pun intended))
|
||||
prompt #"
|
||||
{{ _.role("system") }}
|
||||
|
||||
/nothink
|
||||
|
||||
You are a helpful assistant that can help with tasks.
|
||||
|
||||
{{ _.role("user") }}
|
||||
|
||||
You are working on the following thread:
|
||||
|
||||
{{ thread }}
|
||||
|
||||
What should the next step be?
|
||||
|
||||
{{ ctx.output_format }}
|
||||
"#
|
||||
}
|
||||
|
||||
test HelloWorld {
|
||||
functions [DetermineNextStep]
|
||||
args {
|
||||
thread #"
|
||||
{
|
||||
"type": "user_input",
|
||||
"data": "hello!"
|
||||
}
|
||||
"#
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
Generate BAML client code
|
||||
|
||||
npx baml-cli generate
|
||||
|
||||
Enable BAML logging for this section
|
||||
|
||||
export BAML_LOG=debug
|
||||
|
||||
Add the CLI interface
|
||||
|
||||
cp ./walkthrough/01-cli.ts src/cli.ts
|
||||
|
||||
<details>
|
||||
<summary>show file</summary>
|
||||
|
||||
```ts
|
||||
// ./walkthrough/01-cli.ts
|
||||
// cli.ts lets you invoke the agent loop from the command line
|
||||
|
||||
import { agentLoop, Thread, Event } from "./agent";
|
||||
|
||||
export async function cli() {
|
||||
// Get command line arguments, skipping the first two (node and script name)
|
||||
const args = process.argv.slice(2);
|
||||
|
||||
if (args.length === 0) {
|
||||
console.error("Error: Please provide a message as a command line argument");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Join all arguments into a single message
|
||||
const message = args.join(" ");
|
||||
|
||||
// Create a new thread with the user's message as the initial event
|
||||
const thread = new Thread([{ type: "user_input", data: message }]);
|
||||
|
||||
// Run the agent loop with the thread
|
||||
const result = await agentLoop(thread);
|
||||
console.log(result);
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
Update index.ts to use the CLI
|
||||
|
||||
```diff
|
||||
src/index.ts
|
||||
+import { cli } from "./cli"
|
||||
+
|
||||
async function hello(): Promise<void> {
|
||||
console.log('hello, world!')
|
||||
|
||||
async function main() {
|
||||
- await hello()
|
||||
+ await cli()
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
<details>
|
||||
<summary>skip this step</summary>
|
||||
|
||||
cp ./walkthrough/01-index.ts src/index.ts
|
||||
|
||||
</details>
|
||||
|
||||
Add the agent implementation
|
||||
|
||||
cp ./walkthrough/01-agent.ts src/agent.ts
|
||||
|
||||
<details>
|
||||
<summary>show file</summary>
|
||||
|
||||
```ts
|
||||
// ./walkthrough/01-agent.ts
|
||||
import { b } from "../baml_client";
|
||||
|
||||
// tool call or a respond to human tool
|
||||
type AgentResponse = Awaited<ReturnType<typeof b.DetermineNextStep>>;
|
||||
|
||||
export interface Event {
|
||||
type: string
|
||||
data: any;
|
||||
}
|
||||
|
||||
export class Thread {
|
||||
events: Event[] = [];
|
||||
|
||||
constructor(events: Event[]) {
|
||||
this.events = events;
|
||||
}
|
||||
|
||||
serializeForLLM() {
|
||||
// can change this to whatever custom serialization you want to do, XML, etc
|
||||
// e.g. https://github.com/got-agents/agents/blob/59ebbfa236fc376618f16ee08eb0f3bf7b698892/linear-assistant-ts/src/agent.ts#L66-L105
|
||||
return JSON.stringify(this.events);
|
||||
}
|
||||
}
|
||||
|
||||
// right now this just runs one turn with the LLM, but
|
||||
// we'll update this function to handle all the agent logic
|
||||
export async function agentLoop(thread: Thread): Promise<AgentResponse> {
|
||||
const nextStep = await b.DetermineNextStep(thread.serializeForLLM());
|
||||
return nextStep;
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
The the BAML code is configured to use BASETEN_API_KEY by default
|
||||
|
||||
To get a Baseten API key and URL, create an account at [baseten.co](https://baseten.co),
|
||||
and then deploy [Qwen3 32B from the model library](https://www.baseten.co/library/qwen-3-32b/).
|
||||
|
||||
```rust
|
||||
function DetermineNextStep(thread: string) -> DoneForNow {
|
||||
client Qwen3
|
||||
// ...
|
||||
```
|
||||
|
||||
If you want to run the example with no changes, you can set the BASETEN_API_KEY env var to any valid baseten key.
|
||||
|
||||
If you want to try swapping out the model, you can change the `client` line.
|
||||
|
||||
[Docs on baml clients can be found here](https://docs.boundaryml.com/guide/baml-basics/switching-llms)
|
||||
|
||||
For example, you can configure [gemini](https://docs.boundaryml.com/ref/llm-client-providers/google-ai-gemini)
|
||||
or [anthropic](https://docs.boundaryml.com/ref/llm-client-providers/anthropic) as your model provider.
|
||||
|
||||
For example, to use openai with an OPENAI_API_KEY, you can do:
|
||||
|
||||
client "openai/gpt-4o"
|
||||
|
||||
|
||||
Set your env vars
|
||||
|
||||
export BASETEN_API_KEY=...
|
||||
export BASETEN_BASE_URL=...
|
||||
|
||||
Try it out
|
||||
|
||||
npx tsx src/index.ts hello
|
||||
|
||||
you should see a familiar response from the model
|
||||
|
||||
{
|
||||
intent: 'done_for_now',
|
||||
message: 'Hello! How can I assist you today?'
|
||||
}
|
||||
|
||||
@@ -0,0 +1,9 @@
|
||||
async function hello(): Promise<void> {
|
||||
console.log('hello, world!')
|
||||
}
|
||||
|
||||
async function main() {
|
||||
await hello()
|
||||
}
|
||||
|
||||
main().catch(console.error)
|
||||
@@ -0,0 +1,49 @@
|
||||
class DoneForNow {
|
||||
intent "done_for_now"
|
||||
message string
|
||||
}
|
||||
|
||||
client<llm> Qwen3 {
|
||||
provider "openai-generic"
|
||||
options {
|
||||
base_url env.BASETEN_BASE_URL
|
||||
api_key env.BASETEN_API_KEY
|
||||
}
|
||||
}
|
||||
|
||||
function DetermineNextStep(
|
||||
thread: string
|
||||
) -> DoneForNow {
|
||||
client Qwen3
|
||||
|
||||
// use /nothink for now because the thinking tokens (or streaming thereof) screw with baml (i think (no pun intended))
|
||||
prompt #"
|
||||
{{ _.role("system") }}
|
||||
|
||||
/nothink
|
||||
|
||||
You are a helpful assistant that can help with tasks.
|
||||
|
||||
{{ _.role("user") }}
|
||||
|
||||
You are working on the following thread:
|
||||
|
||||
{{ thread }}
|
||||
|
||||
What should the next step be?
|
||||
|
||||
{{ ctx.output_format }}
|
||||
"#
|
||||
}
|
||||
|
||||
test HelloWorld {
|
||||
functions [DetermineNextStep]
|
||||
args {
|
||||
thread #"
|
||||
{
|
||||
"type": "user_input",
|
||||
"data": "hello!"
|
||||
}
|
||||
"#
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,32 @@
|
||||
import { b } from "../baml_client";
|
||||
|
||||
// tool call or a respond to human tool
|
||||
type AgentResponse = Awaited<ReturnType<typeof b.DetermineNextStep>>;
|
||||
|
||||
export interface Event {
|
||||
type: string
|
||||
data: any;
|
||||
}
|
||||
|
||||
export class Thread {
|
||||
events: Event[] = [];
|
||||
|
||||
constructor(events: Event[]) {
|
||||
this.events = events;
|
||||
}
|
||||
|
||||
serializeForLLM() {
|
||||
// can change this to whatever custom serialization you want to do, XML, etc
|
||||
// e.g. https://github.com/got-agents/agents/blob/59ebbfa236fc376618f16ee08eb0f3bf7b698892/linear-assistant-ts/src/agent.ts#L66-L105
|
||||
return JSON.stringify(this.events);
|
||||
}
|
||||
}
|
||||
|
||||
// right now this just runs one turn with the LLM, but
|
||||
// we'll update this function to handle all the agent logic
|
||||
export async function agentLoop(thread: Thread): Promise<AgentResponse> {
|
||||
const nextStep = await b.DetermineNextStep(thread.serializeForLLM());
|
||||
return nextStep;
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1,23 @@
|
||||
// cli.ts lets you invoke the agent loop from the command line
|
||||
|
||||
import { agentLoop, Thread, Event } from "./agent";
|
||||
|
||||
export async function cli() {
|
||||
// Get command line arguments, skipping the first two (node and script name)
|
||||
const args = process.argv.slice(2);
|
||||
|
||||
if (args.length === 0) {
|
||||
console.error("Error: Please provide a message as a command line argument");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Join all arguments into a single message
|
||||
const message = args.join(" ");
|
||||
|
||||
// Create a new thread with the user's message as the initial event
|
||||
const thread = new Thread([{ type: "user_input", data: message }]);
|
||||
|
||||
// Run the agent loop with the thread
|
||||
const result = await agentLoop(thread);
|
||||
console.log(result);
|
||||
}
|
||||
@@ -0,0 +1,11 @@
|
||||
import { cli } from "./cli"
|
||||
|
||||
async function hello(): Promise<void> {
|
||||
console.log('hello, world!')
|
||||
}
|
||||
|
||||
async function main() {
|
||||
await cli()
|
||||
}
|
||||
|
||||
main().catch(console.error)
|
||||
2
workshops/2025-05-17/sections/02-calculator-tools/.gitignore
vendored
Normal file
2
workshops/2025-05-17/sections/02-calculator-tools/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
baml_client/
|
||||
node_modules/
|
||||
84
workshops/2025-05-17/sections/02-calculator-tools/README.md
Normal file
84
workshops/2025-05-17/sections/02-calculator-tools/README.md
Normal file
@@ -0,0 +1,84 @@
|
||||
# Chapter 2 - Add Calculator Tools
|
||||
|
||||
Let's add some calculator tools to our agent.
|
||||
|
||||
Let's start by adding a tool definition for the calculator
|
||||
|
||||
These are simpile structured outputs that we'll ask the model to
|
||||
return as a "next step" in the agentic loop.
|
||||
|
||||
|
||||
cp ./walkthrough/02-tool_calculator.baml baml_src/tool_calculator.baml
|
||||
|
||||
<details>
|
||||
<summary>show file</summary>
|
||||
|
||||
```rust
|
||||
// ./walkthrough/02-tool_calculator.baml
|
||||
type CalculatorTools = AddTool | SubtractTool | MultiplyTool | DivideTool
|
||||
|
||||
|
||||
class AddTool {
|
||||
intent "add"
|
||||
a int | float
|
||||
b int | float
|
||||
}
|
||||
|
||||
class SubtractTool {
|
||||
intent "subtract"
|
||||
a int | float
|
||||
b int | float
|
||||
}
|
||||
|
||||
class MultiplyTool {
|
||||
intent "multiply"
|
||||
a int | float
|
||||
b int | float
|
||||
}
|
||||
|
||||
class DivideTool {
|
||||
intent "divide"
|
||||
a int | float
|
||||
b int | float
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
Now, let's update the agent's DetermineNextStep method to
|
||||
expose the calculator tools as potential next steps
|
||||
|
||||
|
||||
```diff
|
||||
baml_src/agent.baml
|
||||
function DetermineNextStep(
|
||||
thread: string
|
||||
-) -> DoneForNow {
|
||||
+) -> CalculatorTools | DoneForNow {
|
||||
client Qwen3
|
||||
|
||||
```
|
||||
|
||||
<details>
|
||||
<summary>skip this step</summary>
|
||||
|
||||
cp ./walkthrough/02-agent.baml baml_src/agent.baml
|
||||
|
||||
</details>
|
||||
|
||||
Generate updated BAML client
|
||||
|
||||
npx baml-cli generate
|
||||
|
||||
Try out the calculator
|
||||
|
||||
npx tsx src/index.ts 'can you add 3 and 4'
|
||||
|
||||
You should see a tool call to the calculator
|
||||
|
||||
{
|
||||
intent: 'add',
|
||||
a: 3,
|
||||
b: 4
|
||||
}
|
||||
|
||||
@@ -0,0 +1,49 @@
|
||||
class DoneForNow {
|
||||
intent "done_for_now"
|
||||
message string
|
||||
}
|
||||
|
||||
client<llm> Qwen3 {
|
||||
provider "openai-generic"
|
||||
options {
|
||||
base_url env.BASETEN_BASE_URL
|
||||
api_key env.BASETEN_API_KEY
|
||||
}
|
||||
}
|
||||
|
||||
function DetermineNextStep(
|
||||
thread: string
|
||||
) -> DoneForNow {
|
||||
client Qwen3
|
||||
|
||||
// use /nothink for now because the thinking tokens (or streaming thereof) screw with baml (i think (no pun intended))
|
||||
prompt #"
|
||||
{{ _.role("system") }}
|
||||
|
||||
/nothink
|
||||
|
||||
You are a helpful assistant that can help with tasks.
|
||||
|
||||
{{ _.role("user") }}
|
||||
|
||||
You are working on the following thread:
|
||||
|
||||
{{ thread }}
|
||||
|
||||
What should the next step be?
|
||||
|
||||
{{ ctx.output_format }}
|
||||
"#
|
||||
}
|
||||
|
||||
test HelloWorld {
|
||||
functions [DetermineNextStep]
|
||||
args {
|
||||
thread #"
|
||||
{
|
||||
"type": "user_input",
|
||||
"data": "hello!"
|
||||
}
|
||||
"#
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,75 @@
|
||||
// Learn more about clients at https://docs.boundaryml.com/docs/snippets/clients/overview
|
||||
|
||||
client<llm> CustomGPT4o {
|
||||
provider openai
|
||||
options {
|
||||
model "gpt-4o"
|
||||
api_key env.OPENAI_API_KEY
|
||||
}
|
||||
}
|
||||
|
||||
client<llm> CustomGPT4oMini {
|
||||
provider openai
|
||||
retry_policy Exponential
|
||||
options {
|
||||
model "gpt-4o-mini"
|
||||
api_key env.OPENAI_API_KEY
|
||||
}
|
||||
}
|
||||
|
||||
client<llm> CustomSonnet {
|
||||
provider anthropic
|
||||
options {
|
||||
model "claude-3-5-sonnet-20241022"
|
||||
api_key env.ANTHROPIC_API_KEY
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
client<llm> CustomHaiku {
|
||||
provider anthropic
|
||||
retry_policy Constant
|
||||
options {
|
||||
model "claude-3-haiku-20240307"
|
||||
api_key env.ANTHROPIC_API_KEY
|
||||
}
|
||||
}
|
||||
|
||||
// https://docs.boundaryml.com/docs/snippets/clients/round-robin
|
||||
client<llm> CustomFast {
|
||||
provider round-robin
|
||||
options {
|
||||
// This will alternate between the two clients
|
||||
strategy [CustomGPT4oMini, CustomHaiku]
|
||||
}
|
||||
}
|
||||
|
||||
// https://docs.boundaryml.com/docs/snippets/clients/fallback
|
||||
client<llm> OpenaiFallback {
|
||||
provider fallback
|
||||
options {
|
||||
// This will try the clients in order until one succeeds
|
||||
strategy [CustomGPT4oMini, CustomGPT4oMini]
|
||||
}
|
||||
}
|
||||
|
||||
// https://docs.boundaryml.com/docs/snippets/clients/retry
|
||||
retry_policy Constant {
|
||||
max_retries 3
|
||||
// Strategy is optional
|
||||
strategy {
|
||||
type constant_delay
|
||||
delay_ms 200
|
||||
}
|
||||
}
|
||||
|
||||
retry_policy Exponential {
|
||||
max_retries 2
|
||||
// Strategy is optional
|
||||
strategy {
|
||||
type exponential_backoff
|
||||
delay_ms 300
|
||||
multiplier 1.5
|
||||
max_delay_ms 10000
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,18 @@
|
||||
// This helps use auto generate libraries you can use in the language of
|
||||
// your choice. You can have multiple generators if you use multiple languages.
|
||||
// Just ensure that the output_dir is different for each generator.
|
||||
generator target {
|
||||
// Valid values: "python/pydantic", "typescript", "ruby/sorbet", "rest/openapi"
|
||||
output_type "typescript"
|
||||
|
||||
// Where the generated code will be saved (relative to baml_src/)
|
||||
output_dir "../"
|
||||
|
||||
// The version of the BAML package you have installed (e.g. same version as your baml-py or @boundaryml/baml).
|
||||
// The BAML VSCode extension version should also match this version.
|
||||
version "0.88.0"
|
||||
|
||||
// Valid values: "sync", "async"
|
||||
// This controls what `b.FunctionName()` will be (sync or async).
|
||||
default_client_mode async
|
||||
}
|
||||
@@ -0,0 +1,32 @@
|
||||
import { b } from "../baml_client";
|
||||
|
||||
// tool call or a respond to human tool
|
||||
type AgentResponse = Awaited<ReturnType<typeof b.DetermineNextStep>>;
|
||||
|
||||
export interface Event {
|
||||
type: string
|
||||
data: any;
|
||||
}
|
||||
|
||||
export class Thread {
|
||||
events: Event[] = [];
|
||||
|
||||
constructor(events: Event[]) {
|
||||
this.events = events;
|
||||
}
|
||||
|
||||
serializeForLLM() {
|
||||
// can change this to whatever custom serialization you want to do, XML, etc
|
||||
// e.g. https://github.com/got-agents/agents/blob/59ebbfa236fc376618f16ee08eb0f3bf7b698892/linear-assistant-ts/src/agent.ts#L66-L105
|
||||
return JSON.stringify(this.events);
|
||||
}
|
||||
}
|
||||
|
||||
// right now this just runs one turn with the LLM, but
|
||||
// we'll update this function to handle all the agent logic
|
||||
export async function agentLoop(thread: Thread): Promise<AgentResponse> {
|
||||
const nextStep = await b.DetermineNextStep(thread.serializeForLLM());
|
||||
return nextStep;
|
||||
}
|
||||
|
||||
|
||||
23
workshops/2025-05-17/sections/02-calculator-tools/src/cli.ts
Normal file
23
workshops/2025-05-17/sections/02-calculator-tools/src/cli.ts
Normal file
@@ -0,0 +1,23 @@
|
||||
// cli.ts lets you invoke the agent loop from the command line
|
||||
|
||||
import { agentLoop, Thread, Event } from "./agent";
|
||||
|
||||
export async function cli() {
|
||||
// Get command line arguments, skipping the first two (node and script name)
|
||||
const args = process.argv.slice(2);
|
||||
|
||||
if (args.length === 0) {
|
||||
console.error("Error: Please provide a message as a command line argument");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Join all arguments into a single message
|
||||
const message = args.join(" ");
|
||||
|
||||
// Create a new thread with the user's message as the initial event
|
||||
const thread = new Thread([{ type: "user_input", data: message }]);
|
||||
|
||||
// Run the agent loop with the thread
|
||||
const result = await agentLoop(thread);
|
||||
console.log(result);
|
||||
}
|
||||
@@ -0,0 +1,11 @@
|
||||
import { cli } from "./cli"
|
||||
|
||||
async function hello(): Promise<void> {
|
||||
console.log('hello, world!')
|
||||
}
|
||||
|
||||
async function main() {
|
||||
await cli()
|
||||
}
|
||||
|
||||
main().catch(console.error)
|
||||
@@ -0,0 +1,49 @@
|
||||
class DoneForNow {
|
||||
intent "done_for_now"
|
||||
message string
|
||||
}
|
||||
|
||||
client<llm> Qwen3 {
|
||||
provider "openai-generic"
|
||||
options {
|
||||
base_url env.BASETEN_BASE_URL
|
||||
api_key env.BASETEN_API_KEY
|
||||
}
|
||||
}
|
||||
|
||||
function DetermineNextStep(
|
||||
thread: string
|
||||
) -> CalculatorTools | DoneForNow {
|
||||
client Qwen3
|
||||
|
||||
// use /nothink for now because the thinking tokens (or streaming thereof) screw with baml (i think (no pun intended))
|
||||
prompt #"
|
||||
{{ _.role("system") }}
|
||||
|
||||
/nothink
|
||||
|
||||
You are a helpful assistant that can help with tasks.
|
||||
|
||||
{{ _.role("user") }}
|
||||
|
||||
You are working on the following thread:
|
||||
|
||||
{{ thread }}
|
||||
|
||||
What should the next step be?
|
||||
|
||||
{{ ctx.output_format }}
|
||||
"#
|
||||
}
|
||||
|
||||
test HelloWorld {
|
||||
functions [DetermineNextStep]
|
||||
args {
|
||||
thread #"
|
||||
{
|
||||
"type": "user_input",
|
||||
"data": "hello!"
|
||||
}
|
||||
"#
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,27 @@
|
||||
type CalculatorTools = AddTool | SubtractTool | MultiplyTool | DivideTool
|
||||
|
||||
|
||||
class AddTool {
|
||||
intent "add"
|
||||
a int | float
|
||||
b int | float
|
||||
}
|
||||
|
||||
class SubtractTool {
|
||||
intent "subtract"
|
||||
a int | float
|
||||
b int | float
|
||||
}
|
||||
|
||||
class MultiplyTool {
|
||||
intent "multiply"
|
||||
a int | float
|
||||
b int | float
|
||||
}
|
||||
|
||||
class DivideTool {
|
||||
intent "divide"
|
||||
a int | float
|
||||
b int | float
|
||||
}
|
||||
|
||||
2
workshops/2025-05-17/sections/03-tool-loop/.gitignore
vendored
Normal file
2
workshops/2025-05-17/sections/03-tool-loop/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
baml_client/
|
||||
node_modules/
|
||||
194
workshops/2025-05-17/sections/03-tool-loop/README.md
Normal file
194
workshops/2025-05-17/sections/03-tool-loop/README.md
Normal file
@@ -0,0 +1,194 @@
|
||||
# Chapter 3 - Process Tool Calls in a Loop
|
||||
|
||||
Now let's add a real agentic loop that can run the tools and get a final answer from the LLM.
|
||||
|
||||
First, lets update the agent to handle the tool call
|
||||
|
||||
|
||||
```diff
|
||||
src/agent.ts
|
||||
}
|
||||
|
||||
-// right now this just runs one turn with the LLM, but
|
||||
-// we'll update this function to handle all the agent logic
|
||||
-export async function agentLoop(thread: Thread): Promise<AgentResponse> {
|
||||
- const nextStep = await b.DetermineNextStep(thread.serializeForLLM());
|
||||
- return nextStep;
|
||||
+
|
||||
+
|
||||
+export async function agentLoop(thread: Thread): Promise<string> {
|
||||
+
|
||||
+ while (true) {
|
||||
+ const nextStep = await b.DetermineNextStep(thread.serializeForLLM());
|
||||
+ console.log("nextStep", nextStep);
|
||||
+
|
||||
+ switch (nextStep.intent) {
|
||||
+ case "done_for_now":
|
||||
+ // response to human, return the next step object
|
||||
+ return nextStep.message;
|
||||
+ case "add":
|
||||
+ thread.events.push({
|
||||
+ "type": "tool_call",
|
||||
+ "data": nextStep
|
||||
+ });
|
||||
+ const result = nextStep.a + nextStep.b;
|
||||
+ console.log("tool_response", result);
|
||||
+ thread.events.push({
|
||||
+ "type": "tool_response",
|
||||
+ "data": result
|
||||
+ });
|
||||
+ continue;
|
||||
+ default:
|
||||
+ throw new Error(`Unknown intent: ${nextStep.intent}`);
|
||||
+ }
|
||||
+ }
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
<details>
|
||||
<summary>skip this step</summary>
|
||||
|
||||
cp ./walkthrough/03-agent.ts src/agent.ts
|
||||
|
||||
</details>
|
||||
|
||||
Now, lets try it out
|
||||
|
||||
|
||||
npx tsx src/index.ts 'can you add 3 and 4'
|
||||
|
||||
you should see the agent call the tool and then return the result
|
||||
|
||||
{
|
||||
intent: 'done_for_now',
|
||||
message: 'The sum of 3 and 4 is 7.'
|
||||
}
|
||||
|
||||
For the next step, we'll do a more complex calculation, let's turn off the baml logs for more concise output
|
||||
|
||||
export BAML_LOG=off
|
||||
|
||||
Try a multi-step calculation
|
||||
|
||||
npx tsx src/index.ts 'can you add 3 and 4, then add 6 to that result'
|
||||
|
||||
you'll notice that tools like multiply and divide are not available
|
||||
|
||||
npx tsx src/index.ts 'can you multiply 3 and 4'
|
||||
|
||||
next, let's add handlers for the rest of the calculator tools
|
||||
|
||||
|
||||
```diff
|
||||
src/agent.ts
|
||||
-import { b } from "../baml_client";
|
||||
+import { AddTool, SubtractTool, DivideTool, MultiplyTool, b } from "../baml_client";
|
||||
|
||||
-// tool call or a respond to human tool
|
||||
-type AgentResponse = Awaited<ReturnType<typeof b.DetermineNextStep>>;
|
||||
-
|
||||
export interface Event {
|
||||
type: string
|
||||
}
|
||||
|
||||
+export type CalculatorTool = AddTool | SubtractTool | MultiplyTool | DivideTool;
|
||||
|
||||
+export async function handleNextStep(nextStep: CalculatorTool, thread: Thread): Promise<Thread> {
|
||||
+ let result: number;
|
||||
+ switch (nextStep.intent) {
|
||||
+ case "add":
|
||||
+ result = nextStep.a + nextStep.b;
|
||||
+ console.log("tool_response", result);
|
||||
+ thread.events.push({
|
||||
+ "type": "tool_response",
|
||||
+ "data": result
|
||||
+ });
|
||||
+ return thread;
|
||||
+ case "subtract":
|
||||
+ result = nextStep.a - nextStep.b;
|
||||
+ console.log("tool_response", result);
|
||||
+ thread.events.push({
|
||||
+ "type": "tool_response",
|
||||
+ "data": result
|
||||
+ });
|
||||
+ return thread;
|
||||
+ case "multiply":
|
||||
+ result = nextStep.a * nextStep.b;
|
||||
+ console.log("tool_response", result);
|
||||
+ thread.events.push({
|
||||
+ "type": "tool_response",
|
||||
+ "data": result
|
||||
+ });
|
||||
+ return thread;
|
||||
+ case "divide":
|
||||
+ result = nextStep.a / nextStep.b;
|
||||
+ console.log("tool_response", result);
|
||||
+ thread.events.push({
|
||||
+ "type": "tool_response",
|
||||
+ "data": result
|
||||
+ });
|
||||
+ return thread;
|
||||
+ }
|
||||
+}
|
||||
|
||||
export async function agentLoop(thread: Thread): Promise<string> {
|
||||
console.log("nextStep", nextStep);
|
||||
|
||||
+ thread.events.push({
|
||||
+ "type": "tool_call",
|
||||
+ "data": nextStep
|
||||
+ });
|
||||
+
|
||||
switch (nextStep.intent) {
|
||||
case "done_for_now":
|
||||
return nextStep.message;
|
||||
case "add":
|
||||
- thread.events.push({
|
||||
- "type": "tool_call",
|
||||
- "data": nextStep
|
||||
- });
|
||||
- const result = nextStep.a + nextStep.b;
|
||||
- console.log("tool_response", result);
|
||||
- thread.events.push({
|
||||
- "type": "tool_response",
|
||||
- "data": result
|
||||
- });
|
||||
- continue;
|
||||
- default:
|
||||
- throw new Error(`Unknown intent: ${nextStep.intent}`);
|
||||
+ case "subtract":
|
||||
+ case "multiply":
|
||||
+ case "divide":
|
||||
+ thread = await handleNextStep(nextStep, thread);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
<details>
|
||||
<summary>skip this step</summary>
|
||||
|
||||
cp ./walkthrough/03b-agent.ts src/agent.ts
|
||||
|
||||
</details>
|
||||
|
||||
Test subtraction
|
||||
|
||||
npx tsx src/index.ts 'can you subtract 3 from 4'
|
||||
|
||||
now, let's test the multiplication tool
|
||||
|
||||
|
||||
npx tsx src/index.ts 'can you multiply 3 and 4'
|
||||
|
||||
finally, let's test a more complex calculation with multiple operations
|
||||
|
||||
|
||||
npx tsx src/index.ts 'can you multiply 3 and 4, then divide the result by 2 and then add 12 to that result'
|
||||
|
||||
congratulations, you've taking your first step into hand-rolling an agent loop.
|
||||
|
||||
from here, we're going to start incorporating some more intermediate and advanced
|
||||
concepts for 12-factor agents.
|
||||
|
||||
|
||||
@@ -0,0 +1,49 @@
|
||||
class DoneForNow {
|
||||
intent "done_for_now"
|
||||
message string
|
||||
}
|
||||
|
||||
client<llm> Qwen3 {
|
||||
provider "openai-generic"
|
||||
options {
|
||||
base_url env.BASETEN_BASE_URL
|
||||
api_key env.BASETEN_API_KEY
|
||||
}
|
||||
}
|
||||
|
||||
function DetermineNextStep(
|
||||
thread: string
|
||||
) -> CalculatorTools | DoneForNow {
|
||||
client Qwen3
|
||||
|
||||
// use /nothink for now because the thinking tokens (or streaming thereof) screw with baml (i think (no pun intended))
|
||||
prompt #"
|
||||
{{ _.role("system") }}
|
||||
|
||||
/nothink
|
||||
|
||||
You are a helpful assistant that can help with tasks.
|
||||
|
||||
{{ _.role("user") }}
|
||||
|
||||
You are working on the following thread:
|
||||
|
||||
{{ thread }}
|
||||
|
||||
What should the next step be?
|
||||
|
||||
{{ ctx.output_format }}
|
||||
"#
|
||||
}
|
||||
|
||||
test HelloWorld {
|
||||
functions [DetermineNextStep]
|
||||
args {
|
||||
thread #"
|
||||
{
|
||||
"type": "user_input",
|
||||
"data": "hello!"
|
||||
}
|
||||
"#
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,75 @@
|
||||
// Learn more about clients at https://docs.boundaryml.com/docs/snippets/clients/overview
|
||||
|
||||
client<llm> CustomGPT4o {
|
||||
provider openai
|
||||
options {
|
||||
model "gpt-4o"
|
||||
api_key env.OPENAI_API_KEY
|
||||
}
|
||||
}
|
||||
|
||||
client<llm> CustomGPT4oMini {
|
||||
provider openai
|
||||
retry_policy Exponential
|
||||
options {
|
||||
model "gpt-4o-mini"
|
||||
api_key env.OPENAI_API_KEY
|
||||
}
|
||||
}
|
||||
|
||||
client<llm> CustomSonnet {
|
||||
provider anthropic
|
||||
options {
|
||||
model "claude-3-5-sonnet-20241022"
|
||||
api_key env.ANTHROPIC_API_KEY
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
client<llm> CustomHaiku {
|
||||
provider anthropic
|
||||
retry_policy Constant
|
||||
options {
|
||||
model "claude-3-haiku-20240307"
|
||||
api_key env.ANTHROPIC_API_KEY
|
||||
}
|
||||
}
|
||||
|
||||
// https://docs.boundaryml.com/docs/snippets/clients/round-robin
|
||||
client<llm> CustomFast {
|
||||
provider round-robin
|
||||
options {
|
||||
// This will alternate between the two clients
|
||||
strategy [CustomGPT4oMini, CustomHaiku]
|
||||
}
|
||||
}
|
||||
|
||||
// https://docs.boundaryml.com/docs/snippets/clients/fallback
|
||||
client<llm> OpenaiFallback {
|
||||
provider fallback
|
||||
options {
|
||||
// This will try the clients in order until one succeeds
|
||||
strategy [CustomGPT4oMini, CustomGPT4oMini]
|
||||
}
|
||||
}
|
||||
|
||||
// https://docs.boundaryml.com/docs/snippets/clients/retry
|
||||
retry_policy Constant {
|
||||
max_retries 3
|
||||
// Strategy is optional
|
||||
strategy {
|
||||
type constant_delay
|
||||
delay_ms 200
|
||||
}
|
||||
}
|
||||
|
||||
retry_policy Exponential {
|
||||
max_retries 2
|
||||
// Strategy is optional
|
||||
strategy {
|
||||
type exponential_backoff
|
||||
delay_ms 300
|
||||
multiplier 1.5
|
||||
max_delay_ms 10000
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,18 @@
|
||||
// This helps use auto generate libraries you can use in the language of
|
||||
// your choice. You can have multiple generators if you use multiple languages.
|
||||
// Just ensure that the output_dir is different for each generator.
|
||||
generator target {
|
||||
// Valid values: "python/pydantic", "typescript", "ruby/sorbet", "rest/openapi"
|
||||
output_type "typescript"
|
||||
|
||||
// Where the generated code will be saved (relative to baml_src/)
|
||||
output_dir "../"
|
||||
|
||||
// The version of the BAML package you have installed (e.g. same version as your baml-py or @boundaryml/baml).
|
||||
// The BAML VSCode extension version should also match this version.
|
||||
version "0.88.0"
|
||||
|
||||
// Valid values: "sync", "async"
|
||||
// This controls what `b.FunctionName()` will be (sync or async).
|
||||
default_client_mode async
|
||||
}
|
||||
@@ -0,0 +1,27 @@
|
||||
type CalculatorTools = AddTool | SubtractTool | MultiplyTool | DivideTool
|
||||
|
||||
|
||||
class AddTool {
|
||||
intent "add"
|
||||
a int | float
|
||||
b int | float
|
||||
}
|
||||
|
||||
class SubtractTool {
|
||||
intent "subtract"
|
||||
a int | float
|
||||
b int | float
|
||||
}
|
||||
|
||||
class MultiplyTool {
|
||||
intent "multiply"
|
||||
a int | float
|
||||
b int | float
|
||||
}
|
||||
|
||||
class DivideTool {
|
||||
intent "divide"
|
||||
a int | float
|
||||
b int | float
|
||||
}
|
||||
|
||||
32
workshops/2025-05-17/sections/03-tool-loop/src/agent.ts
Normal file
32
workshops/2025-05-17/sections/03-tool-loop/src/agent.ts
Normal file
@@ -0,0 +1,32 @@
|
||||
import { b } from "../baml_client";
|
||||
|
||||
// tool call or a respond to human tool
|
||||
type AgentResponse = Awaited<ReturnType<typeof b.DetermineNextStep>>;
|
||||
|
||||
export interface Event {
|
||||
type: string
|
||||
data: any;
|
||||
}
|
||||
|
||||
export class Thread {
|
||||
events: Event[] = [];
|
||||
|
||||
constructor(events: Event[]) {
|
||||
this.events = events;
|
||||
}
|
||||
|
||||
serializeForLLM() {
|
||||
// can change this to whatever custom serialization you want to do, XML, etc
|
||||
// e.g. https://github.com/got-agents/agents/blob/59ebbfa236fc376618f16ee08eb0f3bf7b698892/linear-assistant-ts/src/agent.ts#L66-L105
|
||||
return JSON.stringify(this.events);
|
||||
}
|
||||
}
|
||||
|
||||
// right now this just runs one turn with the LLM, but
|
||||
// we'll update this function to handle all the agent logic
|
||||
export async function agentLoop(thread: Thread): Promise<AgentResponse> {
|
||||
const nextStep = await b.DetermineNextStep(thread.serializeForLLM());
|
||||
return nextStep;
|
||||
}
|
||||
|
||||
|
||||
23
workshops/2025-05-17/sections/03-tool-loop/src/cli.ts
Normal file
23
workshops/2025-05-17/sections/03-tool-loop/src/cli.ts
Normal file
@@ -0,0 +1,23 @@
|
||||
// cli.ts lets you invoke the agent loop from the command line
|
||||
|
||||
import { agentLoop, Thread, Event } from "./agent";
|
||||
|
||||
export async function cli() {
|
||||
// Get command line arguments, skipping the first two (node and script name)
|
||||
const args = process.argv.slice(2);
|
||||
|
||||
if (args.length === 0) {
|
||||
console.error("Error: Please provide a message as a command line argument");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Join all arguments into a single message
|
||||
const message = args.join(" ");
|
||||
|
||||
// Create a new thread with the user's message as the initial event
|
||||
const thread = new Thread([{ type: "user_input", data: message }]);
|
||||
|
||||
// Run the agent loop with the thread
|
||||
const result = await agentLoop(thread);
|
||||
console.log(result);
|
||||
}
|
||||
11
workshops/2025-05-17/sections/03-tool-loop/src/index.ts
Normal file
11
workshops/2025-05-17/sections/03-tool-loop/src/index.ts
Normal file
@@ -0,0 +1,11 @@
|
||||
import { cli } from "./cli"
|
||||
|
||||
async function hello(): Promise<void> {
|
||||
console.log('hello, world!')
|
||||
}
|
||||
|
||||
async function main() {
|
||||
await cli()
|
||||
}
|
||||
|
||||
main().catch(console.error)
|
||||
@@ -0,0 +1,55 @@
|
||||
import { b } from "../baml_client";
|
||||
|
||||
// tool call or a respond to human tool
|
||||
type AgentResponse = Awaited<ReturnType<typeof b.DetermineNextStep>>;
|
||||
|
||||
export interface Event {
|
||||
type: string
|
||||
data: any;
|
||||
}
|
||||
|
||||
export class Thread {
|
||||
events: Event[] = [];
|
||||
|
||||
constructor(events: Event[]) {
|
||||
this.events = events;
|
||||
}
|
||||
|
||||
serializeForLLM() {
|
||||
// can change this to whatever custom serialization you want to do, XML, etc
|
||||
// e.g. https://github.com/got-agents/agents/blob/59ebbfa236fc376618f16ee08eb0f3bf7b698892/linear-assistant-ts/src/agent.ts#L66-L105
|
||||
return JSON.stringify(this.events);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
export async function agentLoop(thread: Thread): Promise<string> {
|
||||
|
||||
while (true) {
|
||||
const nextStep = await b.DetermineNextStep(thread.serializeForLLM());
|
||||
console.log("nextStep", nextStep);
|
||||
|
||||
switch (nextStep.intent) {
|
||||
case "done_for_now":
|
||||
// response to human, return the next step object
|
||||
return nextStep.message;
|
||||
case "add":
|
||||
thread.events.push({
|
||||
"type": "tool_call",
|
||||
"data": nextStep
|
||||
});
|
||||
const result = nextStep.a + nextStep.b;
|
||||
console.log("tool_response", result);
|
||||
thread.events.push({
|
||||
"type": "tool_response",
|
||||
"data": result
|
||||
});
|
||||
continue;
|
||||
default:
|
||||
throw new Error(`Unknown intent: ${nextStep.intent}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1,86 @@
|
||||
import { AddTool, SubtractTool, DivideTool, MultiplyTool, b } from "../baml_client";
|
||||
|
||||
export interface Event {
|
||||
type: string
|
||||
data: any;
|
||||
}
|
||||
|
||||
export class Thread {
|
||||
events: Event[] = [];
|
||||
|
||||
constructor(events: Event[]) {
|
||||
this.events = events;
|
||||
}
|
||||
|
||||
serializeForLLM() {
|
||||
// can change this to whatever custom serialization you want to do, XML, etc
|
||||
// e.g. https://github.com/got-agents/agents/blob/59ebbfa236fc376618f16ee08eb0f3bf7b698892/linear-assistant-ts/src/agent.ts#L66-L105
|
||||
return JSON.stringify(this.events);
|
||||
}
|
||||
}
|
||||
|
||||
export type CalculatorTool = AddTool | SubtractTool | MultiplyTool | DivideTool;
|
||||
|
||||
export async function handleNextStep(nextStep: CalculatorTool, thread: Thread): Promise<Thread> {
|
||||
let result: number;
|
||||
switch (nextStep.intent) {
|
||||
case "add":
|
||||
result = nextStep.a + nextStep.b;
|
||||
console.log("tool_response", result);
|
||||
thread.events.push({
|
||||
"type": "tool_response",
|
||||
"data": result
|
||||
});
|
||||
return thread;
|
||||
case "subtract":
|
||||
result = nextStep.a - nextStep.b;
|
||||
console.log("tool_response", result);
|
||||
thread.events.push({
|
||||
"type": "tool_response",
|
||||
"data": result
|
||||
});
|
||||
return thread;
|
||||
case "multiply":
|
||||
result = nextStep.a * nextStep.b;
|
||||
console.log("tool_response", result);
|
||||
thread.events.push({
|
||||
"type": "tool_response",
|
||||
"data": result
|
||||
});
|
||||
return thread;
|
||||
case "divide":
|
||||
result = nextStep.a / nextStep.b;
|
||||
console.log("tool_response", result);
|
||||
thread.events.push({
|
||||
"type": "tool_response",
|
||||
"data": result
|
||||
});
|
||||
return thread;
|
||||
}
|
||||
}
|
||||
|
||||
export async function agentLoop(thread: Thread): Promise<string> {
|
||||
|
||||
while (true) {
|
||||
const nextStep = await b.DetermineNextStep(thread.serializeForLLM());
|
||||
console.log("nextStep", nextStep);
|
||||
|
||||
thread.events.push({
|
||||
"type": "tool_call",
|
||||
"data": nextStep
|
||||
});
|
||||
|
||||
switch (nextStep.intent) {
|
||||
case "done_for_now":
|
||||
// response to human, return the next step object
|
||||
return nextStep.message;
|
||||
case "add":
|
||||
case "subtract":
|
||||
case "multiply":
|
||||
case "divide":
|
||||
thread = await handleNextStep(nextStep, thread);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -434,30 +434,11 @@ expose the calculator tools as potential next steps
|
||||
|
||||
```diff
|
||||
baml_src/agent.baml
|
||||
}
|
||||
|
||||
-client<llm> Qwen3 {
|
||||
- provider "openai-generic"
|
||||
- options {
|
||||
- base_url env.BASETEN_BASE_URL
|
||||
- api_key env.BASETEN_API_KEY
|
||||
- }
|
||||
-}
|
||||
-
|
||||
function DetermineNextStep(
|
||||
thread: string
|
||||
-) -> DoneForNow {
|
||||
- client Qwen3
|
||||
+) -> CalculatorTools | DoneForNow {
|
||||
+ client "openai/gpt-4o"
|
||||
|
||||
- // use /nothink for now because the thinking tokens (or streaming thereof) screw with baml (i think (no pun intended))
|
||||
prompt #"
|
||||
{{ _.role("system") }}
|
||||
|
||||
- /nothink
|
||||
-
|
||||
You are a helpful assistant that can help with tasks.
|
||||
client Qwen3
|
||||
|
||||
```
|
||||
|
||||
|
||||
@@ -9,12 +9,12 @@ targets:
|
||||
newFiles:
|
||||
cat: false
|
||||
cp: true
|
||||
# - folders:
|
||||
# path: "./build/sections"
|
||||
# skip:
|
||||
# - "cleanup"
|
||||
# final:
|
||||
# dirName: "final"
|
||||
- folders:
|
||||
path: "./build/sections"
|
||||
skip:
|
||||
- "cleanup"
|
||||
final:
|
||||
dirName: "final"
|
||||
|
||||
sections:
|
||||
- name: cleanup
|
||||
|
||||
@@ -3,14 +3,25 @@ class DoneForNow {
|
||||
message string
|
||||
}
|
||||
|
||||
client<llm> Qwen3 {
|
||||
provider "openai-generic"
|
||||
options {
|
||||
base_url env.BASETEN_BASE_URL
|
||||
api_key env.BASETEN_API_KEY
|
||||
}
|
||||
}
|
||||
|
||||
function DetermineNextStep(
|
||||
thread: string
|
||||
) -> CalculatorTools | DoneForNow {
|
||||
client "openai/gpt-4o"
|
||||
client Qwen3
|
||||
|
||||
// use /nothink for now because the thinking tokens (or streaming thereof) screw with baml (i think (no pun intended))
|
||||
prompt #"
|
||||
{{ _.role("system") }}
|
||||
|
||||
/nothink
|
||||
|
||||
You are a helpful assistant that can help with tasks.
|
||||
|
||||
{{ _.role("user") }}
|
||||
|
||||
Reference in New Issue
Block a user