mirror of
https://github.com/humanlayer/12-factor-agents.git
synced 2025-08-20 18:59:53 +03:00
wip testing
This commit is contained in:
132
workshops/2025-07-16/hack/BAML_LOGGING_IN_JUPYTER.md
Normal file
132
workshops/2025-07-16/hack/BAML_LOGGING_IN_JUPYTER.md
Normal file
@@ -0,0 +1,132 @@
|
||||
# BAML Logging in Jupyter Notebooks
|
||||
|
||||
## Overview
|
||||
|
||||
BAML uses the `BAML_LOG` environment variable to control logging output. However, in Jupyter notebooks, these logs are sent to stderr and aren't automatically captured in the cell output. This guide explains how to capture and display BAML logs in Jupyter notebooks.
|
||||
|
||||
## The Problem
|
||||
|
||||
When you set `os.environ["BAML_LOG"] = "info"` and run BAML functions in a Jupyter notebook, the logs are written to stderr but don't appear in the notebook cell output. This is because:
|
||||
|
||||
1. BAML logs to stderr at the system level
|
||||
2. Jupyter notebooks don't automatically capture subprocess stderr
|
||||
3. The logs bypass Python's standard logging module
|
||||
|
||||
## The Solution
|
||||
|
||||
The solution is to use IPython's `capture_output` context manager to capture both stdout and stderr when running BAML functions.
|
||||
|
||||
### Basic Usage
|
||||
|
||||
```python
|
||||
from IPython.utils.capture import capture_output
|
||||
import os
|
||||
|
||||
# Set BAML logging level
|
||||
os.environ['BAML_LOG'] = 'info'
|
||||
|
||||
# Helper function to run code with BAML log capture
|
||||
def run_with_baml_logs(func, *args, **kwargs):
|
||||
"""Run a function and display BAML logs in the notebook."""
|
||||
with capture_output() as captured:
|
||||
result = func(*args, **kwargs)
|
||||
|
||||
# Display the result
|
||||
if result is not None:
|
||||
print("=== Result ===")
|
||||
print(result)
|
||||
|
||||
# Display BAML logs from stderr
|
||||
if captured.stderr:
|
||||
print("\n=== BAML Logs ===")
|
||||
print(captured.stderr)
|
||||
|
||||
return result
|
||||
|
||||
# Use it like this:
|
||||
run_with_baml_logs(main, "can you multiply 3 and 4")
|
||||
```
|
||||
|
||||
## BAML Log Levels
|
||||
|
||||
Set `BAML_LOG` to one of these levels:
|
||||
|
||||
- `error`: Fatal errors only
|
||||
- `warn`: Function failures (default)
|
||||
- `info`: All function calls, prompts, and responses
|
||||
- `debug`: Includes detailed parsing errors
|
||||
- `trace`: Most comprehensive logging
|
||||
- `off`: No logging
|
||||
|
||||
## Enhanced Reasoning Visualization
|
||||
|
||||
For sections that use reasoning prompts, you can extract and highlight the reasoning steps:
|
||||
|
||||
```python
|
||||
import re
|
||||
from IPython.display import display, HTML
|
||||
|
||||
def run_and_show_reasoning(func, *args, **kwargs):
|
||||
"""Run a function and highlight reasoning steps."""
|
||||
with capture_output() as captured:
|
||||
result = func(*args, **kwargs)
|
||||
|
||||
if captured.stderr:
|
||||
# Extract reasoning blocks
|
||||
reasoning_pattern = r'<reasoning>(.*?)</reasoning>'
|
||||
reasoning_matches = re.findall(reasoning_pattern, captured.stderr, re.DOTALL)
|
||||
|
||||
if reasoning_matches:
|
||||
display(HTML("<h3>🧠 Model Reasoning:</h3>"))
|
||||
for reasoning in reasoning_matches:
|
||||
display(HTML(f'''
|
||||
<div style='background-color: #f0f8ff;
|
||||
border-left: 4px solid #4169e1;
|
||||
padding: 10px; margin: 10px 0;'>
|
||||
{reasoning.strip().replace(chr(10), '<br>')}
|
||||
</div>
|
||||
'''))
|
||||
|
||||
return result
|
||||
```
|
||||
|
||||
## Implementation in Notebook Generator
|
||||
|
||||
The updated `walkthroughgen_py.py` automatically includes:
|
||||
|
||||
1. A logging helper cell after BAML setup
|
||||
2. Automatic wrapping of `main()` calls with `run_with_baml_logs()`
|
||||
3. Enhanced reasoning visualization for the reasoning chapter
|
||||
4. Proper handling of different log levels with icons
|
||||
|
||||
## What You'll See
|
||||
|
||||
With logging enabled, you'll see:
|
||||
|
||||
- **Prompt sent to the model**: The full prompt including system and user messages
|
||||
- **Raw model response**: The complete response from the LLM
|
||||
- **Parsed output**: How BAML parsed the response into structured data
|
||||
- **Reasoning steps**: If using reasoning prompts, the model's thought process
|
||||
- **Timing information**: How long each call took
|
||||
- **Token usage**: Number of tokens used (if available)
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If logs aren't appearing:
|
||||
|
||||
1. Verify `BAML_LOG` is set: `print(os.environ.get('BAML_LOG'))`
|
||||
2. Ensure you're using the capture wrapper functions
|
||||
3. Check that BAML is properly initialized
|
||||
4. Try setting `BAML_LOG='debug'` for more verbose output
|
||||
|
||||
## Environment Variables
|
||||
|
||||
- `BAML_LOG`: Controls logging level (info, debug, trace, etc.)
|
||||
- `BOUNDARY_MAX_LOG_CHUNK_CHARS`: Truncate log entries (e.g., 3000)
|
||||
|
||||
## Notes
|
||||
|
||||
- Logs are captured per cell execution
|
||||
- Full logs can be quite verbose - start with 'info' level
|
||||
- The reasoning visualization works best with prompts that include `<reasoning>` tags
|
||||
- In Google Colab, the capture functions work the same way as local Jupyter
|
||||
82
workshops/2025-07-16/hack/baml_logging_notebook.py
Normal file
82
workshops/2025-07-16/hack/baml_logging_notebook.py
Normal file
@@ -0,0 +1,82 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Helper utilities for capturing BAML logs in Jupyter notebooks."""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
import contextlib
|
||||
from io import StringIO
|
||||
|
||||
# Configure Python logging to display in Jupyter
|
||||
def setup_jupyter_logging():
|
||||
"""Configure logging to work properly in Jupyter notebooks."""
|
||||
# Remove any existing handlers
|
||||
root_logger = logging.getLogger()
|
||||
for handler in root_logger.handlers[:]:
|
||||
root_logger.removeHandler(handler)
|
||||
|
||||
# Create a new handler that outputs to stdout
|
||||
handler = logging.StreamHandler(sys.stdout)
|
||||
handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
|
||||
|
||||
# Set up the root logger
|
||||
root_logger.addHandler(handler)
|
||||
root_logger.setLevel(logging.INFO)
|
||||
|
||||
# Also set up BAML-specific logger
|
||||
baml_logger = logging.getLogger('baml')
|
||||
baml_logger.setLevel(logging.DEBUG)
|
||||
|
||||
return root_logger
|
||||
|
||||
@contextlib.contextmanager
|
||||
def capture_baml_output():
|
||||
"""Context manager to capture BAML output in Jupyter notebooks."""
|
||||
# Capture stdout and stderr
|
||||
old_stdout = sys.stdout
|
||||
old_stderr = sys.stderr
|
||||
|
||||
stdout_capture = StringIO()
|
||||
stderr_capture = StringIO()
|
||||
|
||||
try:
|
||||
# Redirect stdout and stderr
|
||||
sys.stdout = stdout_capture
|
||||
sys.stderr = stderr_capture
|
||||
|
||||
yield stdout_capture, stderr_capture
|
||||
|
||||
finally:
|
||||
# Restore original stdout/stderr
|
||||
sys.stdout = old_stdout
|
||||
sys.stderr = old_stderr
|
||||
|
||||
# Print captured output
|
||||
stdout_content = stdout_capture.getvalue()
|
||||
stderr_content = stderr_capture.getvalue()
|
||||
|
||||
if stdout_content:
|
||||
print("=== BAML Output ===")
|
||||
print(stdout_content)
|
||||
|
||||
if stderr_content:
|
||||
print("=== BAML Logs ===")
|
||||
print(stderr_content)
|
||||
|
||||
def run_with_baml_logging(func, *args, **kwargs):
|
||||
"""Run a function and capture its BAML output."""
|
||||
# Ensure BAML_LOG is set
|
||||
if 'BAML_LOG' not in os.environ:
|
||||
os.environ['BAML_LOG'] = 'info'
|
||||
|
||||
print(f"BAML_LOG is set to: {os.environ.get('BAML_LOG')}")
|
||||
|
||||
with capture_baml_output() as (stdout_cap, stderr_cap):
|
||||
result = func(*args, **kwargs)
|
||||
|
||||
return result
|
||||
|
||||
# Example usage in notebook:
|
||||
# from baml_logging_notebook import run_with_baml_logging, setup_jupyter_logging
|
||||
# setup_jupyter_logging()
|
||||
# result = run_with_baml_logging(main, "can you multiply 3 and 4")
|
||||
1210
workshops/2025-07-16/hack/chapter0-2_with_logging.ipynb
Normal file
1210
workshops/2025-07-16/hack/chapter0-2_with_logging.ipynb
Normal file
File diff suppressed because it is too large
Load Diff
112
workshops/2025-07-16/hack/notebook_baml_logging_snippet.py
Normal file
112
workshops/2025-07-16/hack/notebook_baml_logging_snippet.py
Normal file
@@ -0,0 +1,112 @@
|
||||
"""
|
||||
Snippet to add to notebooks for capturing BAML logs.
|
||||
|
||||
Add this code cell after the BAML setup cells in the notebook:
|
||||
"""
|
||||
|
||||
notebook_logging_cell = '''# Enable BAML logging capture in Jupyter
|
||||
import os
|
||||
import sys
|
||||
from IPython.utils.capture import capture_output
|
||||
|
||||
# Set BAML logging level
|
||||
os.environ['BAML_LOG'] = 'info'
|
||||
|
||||
# Helper function to run code with BAML log capture
|
||||
def run_with_baml_logs(func, *args, **kwargs):
|
||||
"""Run a function and display BAML logs in the notebook."""
|
||||
print(f"Running with BAML_LOG={os.environ.get('BAML_LOG')}...")
|
||||
|
||||
# Capture all output
|
||||
with capture_output() as captured:
|
||||
result = func(*args, **kwargs)
|
||||
|
||||
# Display the result first
|
||||
if result is not None:
|
||||
print("=== Result ===")
|
||||
print(result)
|
||||
|
||||
# Display captured stdout
|
||||
if captured.stdout:
|
||||
print("\\n=== Output ===")
|
||||
print(captured.stdout)
|
||||
|
||||
# Display BAML logs from stderr
|
||||
if captured.stderr:
|
||||
print("\\n=== BAML Logs ===")
|
||||
# Format the logs for better readability
|
||||
log_lines = captured.stderr.strip().split('\\n')
|
||||
for line in log_lines:
|
||||
if 'reasoning' in line.lower() or '<reasoning>' in line:
|
||||
print(f"🤔 {line}")
|
||||
elif 'error' in line.lower():
|
||||
print(f"❌ {line}")
|
||||
elif 'warn' in line.lower():
|
||||
print(f"⚠️ {line}")
|
||||
else:
|
||||
print(f" {line}")
|
||||
|
||||
return result
|
||||
|
||||
# Alternative: Monkey-patch the main function to always capture logs
|
||||
def with_baml_logging(original_func):
|
||||
"""Decorator to add BAML logging to any function."""
|
||||
def wrapper(*args, **kwargs):
|
||||
return run_with_baml_logs(original_func, *args, **kwargs)
|
||||
return wrapper
|
||||
|
||||
print("BAML logging helper functions loaded! Use run_with_baml_logs(main, 'your message') to see logs.")
|
||||
'''
|
||||
|
||||
# For section 6 (reasoning), add this special cell
|
||||
reasoning_logging_cell = '''# Special logging setup for reasoning visualization
|
||||
import os
|
||||
import re
|
||||
from IPython.utils.capture import capture_output
|
||||
from IPython.display import display, HTML
|
||||
|
||||
os.environ['BAML_LOG'] = 'info'
|
||||
|
||||
def run_and_show_reasoning(func, *args, **kwargs):
|
||||
"""Run a function and highlight the reasoning steps from BAML logs."""
|
||||
with capture_output() as captured:
|
||||
result = func(*args, **kwargs)
|
||||
|
||||
# Extract and format reasoning from logs
|
||||
if captured.stderr:
|
||||
# Look for reasoning sections
|
||||
log_text = captured.stderr
|
||||
|
||||
# Find reasoning blocks
|
||||
reasoning_pattern = r'<reasoning>(.*?)</reasoning>'
|
||||
reasoning_matches = re.findall(reasoning_pattern, log_text, re.DOTALL)
|
||||
|
||||
if reasoning_matches:
|
||||
display(HTML("<h3>🧠 Model Reasoning:</h3>"))
|
||||
for reasoning in reasoning_matches:
|
||||
display(HTML(f"""
|
||||
<div style='background-color: #f0f8ff; border-left: 4px solid #4169e1;
|
||||
padding: 10px; margin: 10px 0; font-family: monospace;'>
|
||||
{reasoning.strip().replace('\\n', '<br>')}
|
||||
</div>
|
||||
"""))
|
||||
|
||||
# Show the full response
|
||||
display(HTML("<h3>📤 Response:</h3>"))
|
||||
display(HTML(f"<pre>{str(result)}</pre>"))
|
||||
|
||||
# Optionally show full logs
|
||||
if os.environ.get('SHOW_FULL_LOGS', 'false').lower() == 'true':
|
||||
display(HTML("<details><summary>View Full BAML Logs</summary><pre style='font-size: 0.8em;'>" +
|
||||
log_text + "</pre></details>"))
|
||||
|
||||
return result
|
||||
|
||||
print("Enhanced reasoning visualization loaded! Use run_and_show_reasoning(main, 'your message') to see reasoning steps.")
|
||||
'''
|
||||
|
||||
print("Notebook logging snippets created. Add these to the notebook generator.")
|
||||
print("\nUsage in notebook:")
|
||||
print("1. Add notebook_logging_cell after BAML setup")
|
||||
print("2. Use: run_with_baml_logs(main, 'can you multiply 3 and 4')")
|
||||
print("3. For reasoning section, use reasoning_logging_cell")
|
||||
279
workshops/2025-07-16/hack/test_baml_logging.ipynb
Normal file
279
workshops/2025-07-16/hack/test_baml_logging.ipynb
Normal file
@@ -0,0 +1,279 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Testing BAML Logging in Jupyter\n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to capture BAML logs in Jupyter notebook output."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# First, set up the environment\n",
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import subprocess\n",
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"# Set BAML_LOG environment variable\n",
|
||||
"os.environ['BAML_LOG'] = 'info'\n",
|
||||
"print(f\"BAML_LOG is set to: {os.environ['BAML_LOG']}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Method 1: Configure Python logging to capture subprocess output\n",
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"# Configure logging for Jupyter - force reconfiguration\n",
|
||||
"logging.basicConfig(\n",
|
||||
" level=logging.INFO,\n",
|
||||
" format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n",
|
||||
" force=True, # This forces reconfiguration in Jupyter\n",
|
||||
" handlers=[\n",
|
||||
" logging.StreamHandler(sys.stdout) # Output to stdout instead of stderr\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Get logger for BAML\n",
|
||||
"logger = logging.getLogger('baml')\n",
|
||||
"logger.setLevel(logging.DEBUG)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Method 2: Monkey-patch print to capture BAML output\n",
|
||||
"import builtins\n",
|
||||
"from datetime import datetime\n",
|
||||
"\n",
|
||||
"# Store original print function\n",
|
||||
"_original_print = builtins.print\n",
|
||||
"\n",
|
||||
"def patched_print(*args, **kwargs):\n",
|
||||
" \"\"\"Patched print function that adds timestamp and formatting.\"\"\"\n",
|
||||
" # Check if this is a BAML log message\n",
|
||||
" message = ' '.join(str(arg) for arg in args)\n",
|
||||
" if '[baml' in message.lower() or 'baml' in message.lower():\n",
|
||||
" timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n",
|
||||
" _original_print(f\"[{timestamp}] BAML LOG: {message}\", **kwargs)\n",
|
||||
" else:\n",
|
||||
" _original_print(*args, **kwargs)\n",
|
||||
"\n",
|
||||
"# Apply the patch\n",
|
||||
"builtins.print = patched_print"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Method 3: Use IPython's capture magic\n",
|
||||
"from IPython.utils.capture import capture_output\n",
|
||||
"\n",
|
||||
"def run_with_capture(func, *args, **kwargs):\n",
|
||||
" \"\"\"Run a function and capture all its output.\"\"\"\n",
|
||||
" with capture_output() as captured:\n",
|
||||
" result = func(*args, **kwargs)\n",
|
||||
" \n",
|
||||
" # Display captured output\n",
|
||||
" if captured.stdout:\n",
|
||||
" print(\"=== Standard Output ===\")\n",
|
||||
" print(captured.stdout)\n",
|
||||
" \n",
|
||||
" if captured.stderr:\n",
|
||||
" print(\"\\n=== Error/Log Output ===\")\n",
|
||||
" print(captured.stderr)\n",
|
||||
" \n",
|
||||
" # Also display using IPython's display\n",
|
||||
" from IPython.display import display, HTML\n",
|
||||
" if captured.stderr:\n",
|
||||
" display(HTML(f\"<pre style='background-color: #f0f0f0; padding: 10px;'>{captured.stderr}</pre>\"))\n",
|
||||
" \n",
|
||||
" return result"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Method 4: Direct stderr/stdout redirection\n",
|
||||
"import io\n",
|
||||
"import contextlib\n",
|
||||
"\n",
|
||||
"@contextlib.contextmanager\n",
|
||||
"def capture_all_output():\n",
|
||||
" \"\"\"Capture both stdout and stderr.\"\"\"\n",
|
||||
" old_stdout = sys.stdout\n",
|
||||
" old_stderr = sys.stderr\n",
|
||||
" \n",
|
||||
" stdout_buffer = io.StringIO()\n",
|
||||
" stderr_buffer = io.StringIO()\n",
|
||||
" \n",
|
||||
" # Create a custom writer that writes to both the buffer and the notebook\n",
|
||||
" class TeeWriter:\n",
|
||||
" def __init__(self, buffer, original):\n",
|
||||
" self.buffer = buffer\n",
|
||||
" self.original = original\n",
|
||||
" \n",
|
||||
" def write(self, data):\n",
|
||||
" self.buffer.write(data)\n",
|
||||
" self.original.write(data)\n",
|
||||
" self.original.flush() # Ensure immediate display\n",
|
||||
" \n",
|
||||
" def flush(self):\n",
|
||||
" self.buffer.flush()\n",
|
||||
" self.original.flush()\n",
|
||||
" \n",
|
||||
" try:\n",
|
||||
" sys.stdout = TeeWriter(stdout_buffer, old_stdout)\n",
|
||||
" sys.stderr = TeeWriter(stderr_buffer, old_stderr)\n",
|
||||
" yield stdout_buffer, stderr_buffer\n",
|
||||
" finally:\n",
|
||||
" sys.stdout = old_stdout\n",
|
||||
" sys.stderr = old_stderr"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Test function that would use BAML\n",
|
||||
"def test_baml_function():\n",
|
||||
" print(\"Starting BAML test...\")\n",
|
||||
" # This would be where your main() function runs\n",
|
||||
" # For now, let's simulate some BAML-like output\n",
|
||||
" print(\"[BAML] Initializing client...\")\n",
|
||||
" print(\"[BAML] Running DetermineNextStep...\")\n",
|
||||
" print(\"[BAML] Reasoning: User wants to multiply 3 and 4\")\n",
|
||||
" print(\"[BAML] Result: MultiplyTool(a=3, b=4)\")\n",
|
||||
" return \"Test completed\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Test different methods\n",
|
||||
"print(\"Testing BAML logging capture methods...\\n\")\n",
|
||||
"\n",
|
||||
"# Method 1: Using IPython capture\n",
|
||||
"print(\"Method 1: IPython capture\")\n",
|
||||
"result = run_with_capture(test_baml_function)\n",
|
||||
"print(f\"Result: {result}\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Method 2: Using context manager\n",
|
||||
"print(\"Method 2: Direct output capture\")\n",
|
||||
"with capture_all_output() as (stdout, stderr):\n",
|
||||
" result = test_baml_function()\n",
|
||||
"print(f\"Result: {result}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## For actual BAML usage\n",
|
||||
"\n",
|
||||
"When using actual BAML functions, the logs should appear if:\n",
|
||||
"\n",
|
||||
"1. `BAML_LOG` environment variable is set (we set it to 'info')\n",
|
||||
"2. You're using one of the capture methods above\n",
|
||||
"\n",
|
||||
"Example with real BAML:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"# Assuming you have your main() function from the workshop\n",
|
||||
"os.environ['BAML_LOG'] = 'info'\n",
|
||||
"\n",
|
||||
"# Use IPython capture\n",
|
||||
"with capture_output() as captured:\n",
|
||||
" main(\"can you multiply 3 and 4\")\n",
|
||||
"\n",
|
||||
"# Display the logs\n",
|
||||
"print(\"=== Output ===\")\n",
|
||||
"print(captured.stdout)\n",
|
||||
"print(\"\\n=== Logs ===\")\n",
|
||||
"print(captured.stderr)\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Method 5: Enable verbose subprocess output\n",
|
||||
"# This is useful if BAML is running as a subprocess\n",
|
||||
"\n",
|
||||
"def run_baml_with_logging():\n",
|
||||
" # Ensure BAML_LOG is set\n",
|
||||
" env = os.environ.copy()\n",
|
||||
" env['BAML_LOG'] = 'info'\n",
|
||||
" \n",
|
||||
" # If BAML runs as subprocess, capture its output\n",
|
||||
" result = subprocess.run(\n",
|
||||
" ['python', '-c', 'print(\"BAML subprocess test\")'],\n",
|
||||
" env=env,\n",
|
||||
" capture_output=True,\n",
|
||||
" text=True\n",
|
||||
" )\n",
|
||||
" \n",
|
||||
" print(\"Subprocess stdout:\", result.stdout)\n",
|
||||
" print(\"Subprocess stderr:\", result.stderr)\n",
|
||||
"\n",
|
||||
"run_baml_with_logging()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.0"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -82,12 +82,111 @@ def get_baml_client():
|
||||
# Third cell: Initialize BAML
|
||||
init_code = "!baml-cli init"
|
||||
nb.cells.append(new_code_cell(init_code))
|
||||
|
||||
# Fourth cell: BAML logging helper for Jupyter
|
||||
logging_setup = '''# Enable BAML logging capture in Jupyter
|
||||
import os
|
||||
import sys
|
||||
from IPython.utils.capture import capture_output
|
||||
|
||||
def process_step(nb, step, base_path, current_functions):
|
||||
# Set BAML logging level
|
||||
os.environ['BAML_LOG'] = 'info'
|
||||
|
||||
# Helper function to run code with BAML log capture
|
||||
def run_with_baml_logs(func, *args, **kwargs):
|
||||
"""Run a function and display BAML logs in the notebook."""
|
||||
print(f"Running with BAML_LOG={os.environ.get('BAML_LOG')}...")
|
||||
|
||||
# Capture all output
|
||||
with capture_output() as captured:
|
||||
result = func(*args, **kwargs)
|
||||
|
||||
# Display the result first
|
||||
if result is not None:
|
||||
print("=== Result ===")
|
||||
print(result)
|
||||
|
||||
# Display captured stdout
|
||||
if captured.stdout:
|
||||
print("\\n=== Output ===")
|
||||
print(captured.stdout)
|
||||
|
||||
# Display BAML logs from stderr
|
||||
if captured.stderr:
|
||||
print("\\n=== BAML Logs ===")
|
||||
# Format the logs for better readability
|
||||
log_lines = captured.stderr.strip().split('\\n')
|
||||
for line in log_lines:
|
||||
if 'reasoning' in line.lower() or '<reasoning>' in line:
|
||||
print(f"🤔 {line}")
|
||||
elif 'error' in line.lower():
|
||||
print(f"❌ {line}")
|
||||
elif 'warn' in line.lower():
|
||||
print(f"⚠️ {line}")
|
||||
else:
|
||||
print(f" {line}")
|
||||
|
||||
return result
|
||||
|
||||
print("BAML logging helper loaded! Use run_with_baml_logs(main, 'your message') to see logs.")
|
||||
'''
|
||||
nb.cells.append(new_code_cell(logging_setup))
|
||||
|
||||
def process_step(nb, step, base_path, current_functions, section_name=None):
|
||||
"""Process different step types."""
|
||||
if 'text' in step:
|
||||
# Add markdown cell
|
||||
nb.cells.append(new_markdown_cell(step['text']))
|
||||
|
||||
# Special handling for reasoning section
|
||||
if section_name == 'customize-prompt' and 'reasoning in action' in step['text']:
|
||||
# Add enhanced reasoning visualization after the text
|
||||
reasoning_viz = '''# Enhanced logging for reasoning visualization
|
||||
import re
|
||||
from IPython.display import display, HTML
|
||||
|
||||
def run_and_show_reasoning(func, *args, **kwargs):
|
||||
"""Run a function and highlight the reasoning steps from BAML logs."""
|
||||
from IPython.utils.capture import capture_output
|
||||
|
||||
with capture_output() as captured:
|
||||
result = func(*args, **kwargs)
|
||||
|
||||
# Extract and format reasoning from logs
|
||||
if captured.stderr:
|
||||
# Look for reasoning sections in the logs
|
||||
log_text = captured.stderr
|
||||
|
||||
# Find reasoning blocks
|
||||
reasoning_pattern = r'<reasoning>(.*?)</reasoning>'
|
||||
reasoning_matches = re.findall(reasoning_pattern, log_text, re.DOTALL)
|
||||
|
||||
if reasoning_matches:
|
||||
display(HTML("<h3>🧠 Model Reasoning:</h3>"))
|
||||
for reasoning in reasoning_matches:
|
||||
display(HTML(f"""
|
||||
<div style='background-color: #f0f8ff; border-left: 4px solid #4169e1;
|
||||
padding: 10px; margin: 10px 0; font-family: monospace;'>
|
||||
{reasoning.strip().replace(chr(10), '<br>')}
|
||||
</div>
|
||||
"""))
|
||||
|
||||
# Show the result
|
||||
display(HTML("<h3>📤 Response:</h3>"))
|
||||
display(HTML(f"<pre>{str(result)}</pre>"))
|
||||
|
||||
# Optionally show full logs
|
||||
display(HTML("""<details><summary>View Full BAML Logs</summary>
|
||||
<pre style='font-size: 0.8em; background-color: #f5f5f5; padding: 10px;'>""" +
|
||||
log_text.replace('<', '<').replace('>', '>') +
|
||||
"</pre></details>"))
|
||||
|
||||
return result
|
||||
|
||||
print("Enhanced reasoning visualization loaded! Use:")
|
||||
print("run_and_show_reasoning(main, 'can you multiply 3 and 4')")
|
||||
'''
|
||||
nb.cells.append(new_code_cell(reasoning_viz))
|
||||
|
||||
if 'baml_setup' in step:
|
||||
# Add BAML setup cells
|
||||
@@ -158,10 +257,18 @@ def process_step(nb, step, base_path, current_functions):
|
||||
call_parts.append(f'{key}={value}')
|
||||
|
||||
# Generate the function call
|
||||
if call_parts:
|
||||
nb.cells.append(new_code_cell(f'main({", ".join(call_parts)})'))
|
||||
main_call = f'main({", ".join(call_parts)})' if call_parts else "main()"
|
||||
|
||||
# Use different wrappers based on section
|
||||
if section_name == 'customize-prompt':
|
||||
# Use enhanced reasoning visualization for this section
|
||||
nb.cells.append(new_code_cell(f'run_and_show_reasoning({main_call})'))
|
||||
elif section_name in ['cli-and-agent', 'calculator-tools', 'tool-loop']:
|
||||
# Use basic logging wrapper for these sections
|
||||
nb.cells.append(new_code_cell(f'run_with_baml_logs({main_call})'))
|
||||
else:
|
||||
nb.cells.append(new_code_cell("main()"))
|
||||
# Default to plain call
|
||||
nb.cells.append(new_code_cell(main_call))
|
||||
|
||||
def convert_walkthrough_to_notebook(yaml_path, output_path):
|
||||
"""Convert walkthrough.yaml to Jupyter notebook."""
|
||||
@@ -187,6 +294,7 @@ def convert_walkthrough_to_notebook(yaml_path, output_path):
|
||||
for section in walkthrough.get('sections', []):
|
||||
# Add section title
|
||||
section_title = section.get('title', section.get('name', 'Section'))
|
||||
section_name = section.get('name', '')
|
||||
nb.cells.append(new_markdown_cell(f"## {section_title}"))
|
||||
|
||||
# Add section description
|
||||
@@ -195,7 +303,7 @@ def convert_walkthrough_to_notebook(yaml_path, output_path):
|
||||
|
||||
# Process steps
|
||||
for step in section.get('steps', []):
|
||||
process_step(nb, step, base_path, current_functions)
|
||||
process_step(nb, step, base_path, current_functions, section_name)
|
||||
|
||||
# Write notebook
|
||||
with open(output_path, 'w') as f:
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
async function hello(): Promise<void> {
|
||||
console.log('hello, world!')
|
||||
}
|
||||
|
||||
async function main() {
|
||||
await hello()
|
||||
}
|
||||
|
||||
main().catch(console.error)
|
||||
@@ -1,32 +0,0 @@
|
||||
import { b } from "../baml_client";
|
||||
|
||||
// tool call or a respond to human tool
|
||||
type AgentResponse = Awaited<ReturnType<typeof b.DetermineNextStep>>;
|
||||
|
||||
export interface Event {
|
||||
type: string
|
||||
data: any;
|
||||
}
|
||||
|
||||
export class Thread {
|
||||
events: Event[] = [];
|
||||
|
||||
constructor(events: Event[]) {
|
||||
this.events = events;
|
||||
}
|
||||
|
||||
serializeForLLM() {
|
||||
// can change this to whatever custom serialization you want to do, XML, etc
|
||||
// e.g. https://github.com/got-agents/agents/blob/59ebbfa236fc376618f16ee08eb0f3bf7b698892/linear-assistant-ts/src/agent.ts#L66-L105
|
||||
return JSON.stringify(this.events);
|
||||
}
|
||||
}
|
||||
|
||||
// right now this just runs one turn with the LLM, but
|
||||
// we'll update this function to handle all the agent logic
|
||||
export async function agentLoop(thread: Thread): Promise<AgentResponse> {
|
||||
const nextStep = await b.DetermineNextStep(thread.serializeForLLM());
|
||||
return nextStep;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
// cli.ts lets you invoke the agent loop from the command line
|
||||
|
||||
import { agentLoop, Thread, Event } from "./agent";
|
||||
|
||||
export async function cli() {
|
||||
// Get command line arguments, skipping the first two (node and script name)
|
||||
const args = process.argv.slice(2);
|
||||
|
||||
if (args.length === 0) {
|
||||
console.error("Error: Please provide a message as a command line argument");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Join all arguments into a single message
|
||||
const message = args.join(" ");
|
||||
|
||||
// Create a new thread with the user's message as the initial event
|
||||
const thread = new Thread([{ type: "user_input", data: message }]);
|
||||
|
||||
// Run the agent loop with the thread
|
||||
const result = await agentLoop(thread);
|
||||
console.log(result);
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
import { cli } from "./cli"
|
||||
|
||||
async function hello(): Promise<void> {
|
||||
console.log('hello, world!')
|
||||
}
|
||||
|
||||
async function main() {
|
||||
await cli()
|
||||
}
|
||||
|
||||
main().catch(console.error)
|
||||
@@ -1,55 +0,0 @@
|
||||
import { b } from "../baml_client";
|
||||
|
||||
// tool call or a respond to human tool
|
||||
type AgentResponse = Awaited<ReturnType<typeof b.DetermineNextStep>>;
|
||||
|
||||
export interface Event {
|
||||
type: string
|
||||
data: any;
|
||||
}
|
||||
|
||||
export class Thread {
|
||||
events: Event[] = [];
|
||||
|
||||
constructor(events: Event[]) {
|
||||
this.events = events;
|
||||
}
|
||||
|
||||
serializeForLLM() {
|
||||
// can change this to whatever custom serialization you want to do, XML, etc
|
||||
// e.g. https://github.com/got-agents/agents/blob/59ebbfa236fc376618f16ee08eb0f3bf7b698892/linear-assistant-ts/src/agent.ts#L66-L105
|
||||
return JSON.stringify(this.events);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
export async function agentLoop(thread: Thread): Promise<string> {
|
||||
|
||||
while (true) {
|
||||
const nextStep = await b.DetermineNextStep(thread.serializeForLLM());
|
||||
console.log("nextStep", nextStep);
|
||||
|
||||
switch (nextStep.intent) {
|
||||
case "done_for_now":
|
||||
// response to human, return the next step object
|
||||
return nextStep.message;
|
||||
case "add":
|
||||
thread.events.push({
|
||||
"type": "tool_call",
|
||||
"data": nextStep
|
||||
});
|
||||
const result = nextStep.a + nextStep.b;
|
||||
console.log("tool_response", result);
|
||||
thread.events.push({
|
||||
"type": "tool_response",
|
||||
"data": result
|
||||
});
|
||||
continue;
|
||||
default:
|
||||
throw new Error(`Unknown intent: ${nextStep.intent}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,87 +0,0 @@
|
||||
import { AddTool, SubtractTool, DivideTool, MultiplyTool, b } from "../baml_client";
|
||||
|
||||
export interface Event {
|
||||
type: string
|
||||
data: any;
|
||||
}
|
||||
|
||||
export class Thread {
|
||||
events: Event[] = [];
|
||||
|
||||
constructor(events: Event[]) {
|
||||
this.events = events;
|
||||
}
|
||||
|
||||
serializeForLLM() {
|
||||
// can change this to whatever custom serialization you want to do, XML, etc
|
||||
// e.g. https://github.com/got-agents/agents/blob/59ebbfa236fc376618f16ee08eb0f3bf7b698892/linear-assistant-ts/src/agent.ts#L66-L105
|
||||
return JSON.stringify(this.events);
|
||||
}
|
||||
}
|
||||
|
||||
export type CalculatorTool = AddTool | SubtractTool | MultiplyTool | DivideTool;
|
||||
|
||||
export async function handleNextStep(nextStep: CalculatorTool, thread: Thread): Promise<Thread> {
|
||||
let result: number;
|
||||
switch (nextStep.intent) {
|
||||
case "add":
|
||||
result = nextStep.a + nextStep.b;
|
||||
console.log("tool_response", result);
|
||||
thread.events.push({
|
||||
"type": "tool_response",
|
||||
"data": result
|
||||
});
|
||||
return thread;
|
||||
case "subtract":
|
||||
result = nextStep.a - nextStep.b;
|
||||
console.log("tool_response", result);
|
||||
thread.events.push({
|
||||
"type": "tool_response",
|
||||
"data": result
|
||||
});
|
||||
return thread;
|
||||
case "multiply":
|
||||
result = nextStep.a * nextStep.b;
|
||||
console.log("tool_response", result);
|
||||
thread.events.push({
|
||||
"type": "tool_response",
|
||||
"data": result
|
||||
});
|
||||
return thread;
|
||||
case "divide":
|
||||
result = nextStep.a / nextStep.b;
|
||||
console.log("tool_response", result);
|
||||
thread.events.push({
|
||||
"type": "tool_response",
|
||||
"data": result
|
||||
});
|
||||
return thread;
|
||||
}
|
||||
}
|
||||
|
||||
export async function agentLoop(thread: Thread): Promise<Thread> {
|
||||
|
||||
while (true) {
|
||||
const nextStep = await b.DetermineNextStep(thread.serializeForLLM());
|
||||
console.log("nextStep", nextStep);
|
||||
|
||||
thread.events.push({
|
||||
"type": "tool_call",
|
||||
"data": nextStep
|
||||
});
|
||||
|
||||
switch (nextStep.intent) {
|
||||
case "done_for_now":
|
||||
case "request_more_information":
|
||||
// response to human, return the thread
|
||||
return thread;
|
||||
case "add":
|
||||
case "subtract":
|
||||
case "multiply":
|
||||
case "divide":
|
||||
thread = await handleNextStep(nextStep, thread);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
// cli.ts lets you invoke the agent loop from the command line
|
||||
|
||||
import { agentLoop, Thread, Event } from "../src/agent";
|
||||
|
||||
|
||||
|
||||
export async function cli() {
|
||||
// Get command line arguments, skipping the first two (node and script name)
|
||||
const args = process.argv.slice(2);
|
||||
|
||||
if (args.length === 0) {
|
||||
console.error("Error: Please provide a message as a command line argument");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Join all arguments into a single message
|
||||
const message = args.join(" ");
|
||||
|
||||
// Create a new thread with the user's message as the initial event
|
||||
const thread = new Thread([{ type: "user_input", data: message }]);
|
||||
|
||||
// Run the agent loop with the thread
|
||||
const result = await agentLoop(thread);
|
||||
let lastEvent = result.events.slice(-1)[0];
|
||||
|
||||
while (lastEvent.data.intent === "request_more_information") {
|
||||
const message = await askHuman(lastEvent.data.message);
|
||||
thread.events.push({ type: "human_response", data: message });
|
||||
const result = await agentLoop(thread);
|
||||
lastEvent = result.events.slice(-1)[0];
|
||||
}
|
||||
|
||||
// print the final result
|
||||
// optional - you could loop here too
|
||||
console.log(lastEvent.data.message);
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
async function askHuman(message: string) {
|
||||
const readline = require('readline').createInterface({
|
||||
input: process.stdin,
|
||||
output: process.stdout
|
||||
});
|
||||
|
||||
return new Promise((resolve) => {
|
||||
readline.question(`${message}\n> `, (answer: string) => {
|
||||
resolve(answer);
|
||||
});
|
||||
});
|
||||
}
|
||||
@@ -12,6 +12,7 @@ class Thread:
|
||||
|
||||
def serialize_as_xml(self):
|
||||
"""Serialize thread events to XML format for better token efficiency."""
|
||||
import yaml
|
||||
xml_parts = ["<thread>"]
|
||||
|
||||
for event in self.events:
|
||||
@@ -21,11 +22,11 @@ class Thread:
|
||||
if event_type == 'user_input':
|
||||
xml_parts.append(f' <user_input>{event_data}</user_input>')
|
||||
elif event_type == 'tool_call':
|
||||
xml_parts.append(f' <tool_call>')
|
||||
xml_parts.append(f' <tool>{event_data["tool"]}</tool>')
|
||||
xml_parts.append(f' <operation>{event_data["operation"]}</operation>')
|
||||
xml_parts.append(f' <result>{event_data["result"]}</result>')
|
||||
xml_parts.append(f' </tool_call>')
|
||||
# Use YAML for tool call args - more compact than nested XML
|
||||
yaml_content = yaml.dump(event_data, default_flow_style=False).strip()
|
||||
xml_parts.append(f' <{event_data["tool"]}>')
|
||||
xml_parts.append(' ' + '\n '.join(yaml_content.split('\n')))
|
||||
xml_parts.append(f' </{event_data["tool"]}>')
|
||||
elif event_type == 'clarification_request':
|
||||
xml_parts.append(f' <clarification_request>{event_data}</clarification_request>')
|
||||
elif event_type == 'clarification_response':
|
||||
|
||||
@@ -1,87 +0,0 @@
|
||||
import { AddTool, SubtractTool, DivideTool, MultiplyTool, b } from "../baml_client";
|
||||
|
||||
export interface Event {
|
||||
type: string
|
||||
data: any;
|
||||
}
|
||||
|
||||
export class Thread {
|
||||
events: Event[] = [];
|
||||
|
||||
constructor(events: Event[]) {
|
||||
this.events = events;
|
||||
}
|
||||
|
||||
serializeForLLM() {
|
||||
// can change this to whatever custom serialization you want to do, XML, etc
|
||||
// e.g. https://github.com/got-agents/agents/blob/59ebbfa236fc376618f16ee08eb0f3bf7b698892/linear-assistant-ts/src/agent.ts#L66-L105
|
||||
return JSON.stringify(this.events, null, 2);
|
||||
}
|
||||
}
|
||||
|
||||
export type CalculatorTool = AddTool | SubtractTool | MultiplyTool | DivideTool;
|
||||
|
||||
export async function handleNextStep(nextStep: CalculatorTool, thread: Thread): Promise<Thread> {
|
||||
let result: number;
|
||||
switch (nextStep.intent) {
|
||||
case "add":
|
||||
result = nextStep.a + nextStep.b;
|
||||
console.log("tool_response", result);
|
||||
thread.events.push({
|
||||
"type": "tool_response",
|
||||
"data": result
|
||||
});
|
||||
return thread;
|
||||
case "subtract":
|
||||
result = nextStep.a - nextStep.b;
|
||||
console.log("tool_response", result);
|
||||
thread.events.push({
|
||||
"type": "tool_response",
|
||||
"data": result
|
||||
});
|
||||
return thread;
|
||||
case "multiply":
|
||||
result = nextStep.a * nextStep.b;
|
||||
console.log("tool_response", result);
|
||||
thread.events.push({
|
||||
"type": "tool_response",
|
||||
"data": result
|
||||
});
|
||||
return thread;
|
||||
case "divide":
|
||||
result = nextStep.a / nextStep.b;
|
||||
console.log("tool_response", result);
|
||||
thread.events.push({
|
||||
"type": "tool_response",
|
||||
"data": result
|
||||
});
|
||||
return thread;
|
||||
}
|
||||
}
|
||||
|
||||
export async function agentLoop(thread: Thread): Promise<Thread> {
|
||||
|
||||
while (true) {
|
||||
const nextStep = await b.DetermineNextStep(thread.serializeForLLM());
|
||||
console.log("nextStep", nextStep);
|
||||
|
||||
thread.events.push({
|
||||
"type": "tool_call",
|
||||
"data": nextStep
|
||||
});
|
||||
|
||||
switch (nextStep.intent) {
|
||||
case "done_for_now":
|
||||
case "request_more_information":
|
||||
// response to human, return the thread
|
||||
return thread;
|
||||
case "add":
|
||||
case "subtract":
|
||||
case "multiply":
|
||||
case "divide":
|
||||
thread = await handleNextStep(nextStep, thread);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user