mirror of
https://github.com/MadcowD/ell.git
synced 2024-09-22 16:14:36 +03:00
lmp documentaiton
This commit is contained in:
20
docs/Makefile
Normal file
20
docs/Makefile
Normal file
@@ -0,0 +1,20 @@
|
||||
# Minimal makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line, and also
|
||||
# from the environment for the first two.
|
||||
SPHINXOPTS ?=
|
||||
SPHINXBUILD ?= sphinx-build
|
||||
SOURCEDIR = .
|
||||
BUILDDIR = _build
|
||||
|
||||
# Put it first so that "make" without argument is like "make help".
|
||||
help:
|
||||
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
|
||||
.PHONY: help Makefile
|
||||
|
||||
# Catch-all target: route all unknown targets to Sphinx using the new
|
||||
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
||||
%: Makefile
|
||||
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
27
docs/conf.py
Normal file
27
docs/conf.py
Normal file
@@ -0,0 +1,27 @@
|
||||
# Configuration file for the Sphinx documentation builder.
|
||||
#
|
||||
# For the full list of built-in configuration values, see the documentation:
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html
|
||||
|
||||
# -- Project information -----------------------------------------------------
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
|
||||
|
||||
project = 'ell'
|
||||
copyright = '2024, William Guss'
|
||||
author = 'William Guss'
|
||||
|
||||
# -- General configuration ---------------------------------------------------
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
|
||||
|
||||
# Add autodoc and napoleon to the extensions
|
||||
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon']
|
||||
|
||||
templates_path = ['_templates']
|
||||
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
|
||||
|
||||
html_theme = "pydata_sphinx_theme"
|
||||
|
||||
# Add the following lines if not already present
|
||||
import os
|
||||
import sys
|
||||
sys.path.insert(0, os.path.abspath('../src'))
|
||||
40
docs/index.rst
Normal file
40
docs/index.rst
Normal file
@@ -0,0 +1,40 @@
|
||||
.. ell documentation master file, created by
|
||||
sphinx-quickstart on Thu Aug 29 13:45:32 2024.
|
||||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
ell documentation
|
||||
=================
|
||||
|
||||
Welcome to the documentation for ``ell``, a lightweight, functional prompt engineering framework.
|
||||
|
||||
Overview
|
||||
--------
|
||||
|
||||
``ell`` is built on a few core principles:
|
||||
|
||||
1. **Prompts are programs, not strings**: In ``ell``, we think of using a language model as a discrete subroutine called a **language model program** (LMP).
|
||||
|
||||
2. **Prompts are parameters of a machine learning model**: ``ell`` treats prompts as learnable parameters.
|
||||
|
||||
3. **Every call to a language model is valuable**: ``ell`` emphasizes the importance of each language model interaction.
|
||||
|
||||
Key Features
|
||||
------------
|
||||
|
||||
- Functional approach to prompt engineering
|
||||
- Visualization and tracking of prompts using ``ell-studio``
|
||||
- Support for various storage backends (SQLite, PostgreSQL)
|
||||
- Integration with popular language models
|
||||
|
||||
For installation instructions, usage examples, and contribution guidelines, please refer to the project's GitHub repository.
|
||||
|
||||
Contents
|
||||
--------
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Contents:
|
||||
|
||||
ell
|
||||
reference/index
|
||||
35
docs/make.bat
Normal file
35
docs/make.bat
Normal file
@@ -0,0 +1,35 @@
|
||||
@ECHO OFF
|
||||
|
||||
pushd %~dp0
|
||||
|
||||
REM Command file for Sphinx documentation
|
||||
|
||||
if "%SPHINXBUILD%" == "" (
|
||||
set SPHINXBUILD=sphinx-build
|
||||
)
|
||||
set SOURCEDIR=.
|
||||
set BUILDDIR=_build
|
||||
|
||||
%SPHINXBUILD% >NUL 2>NUL
|
||||
if errorlevel 9009 (
|
||||
echo.
|
||||
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
|
||||
echo.installed, then set the SPHINXBUILD environment variable to point
|
||||
echo.to the full path of the 'sphinx-build' executable. Alternatively you
|
||||
echo.may add the Sphinx directory to PATH.
|
||||
echo.
|
||||
echo.If you don't have Sphinx installed, grab it from
|
||||
echo.https://www.sphinx-doc.org/
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
if "%1" == "" goto help
|
||||
|
||||
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
|
||||
goto end
|
||||
|
||||
:help
|
||||
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
|
||||
|
||||
:end
|
||||
popd
|
||||
@@ -2,7 +2,7 @@ import dataclasses
|
||||
import random
|
||||
from typing import Callable, List, Tuple
|
||||
import ell
|
||||
from ell._lstr import _lstr
|
||||
from ell.types._lstr import _lstr
|
||||
ell.config.verbose = True
|
||||
|
||||
|
||||
|
||||
7
docs/reference/ell.lmp.rst
Normal file
7
docs/reference/ell.lmp.rst
Normal file
@@ -0,0 +1,7 @@
|
||||
ell.lmp
|
||||
=======
|
||||
|
||||
.. automodule:: ell.lmp
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
7
docs/reference/ell.models.rst
Normal file
7
docs/reference/ell.models.rst
Normal file
@@ -0,0 +1,7 @@
|
||||
ell.models
|
||||
==========
|
||||
|
||||
.. automodule:: ell.models
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
7
docs/reference/ell.stores.rst
Normal file
7
docs/reference/ell.stores.rst
Normal file
@@ -0,0 +1,7 @@
|
||||
ell.stores
|
||||
==========
|
||||
|
||||
.. automodule:: ell.stores
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
124
docs/reference/ell.types.rst
Normal file
124
docs/reference/ell.types.rst
Normal file
@@ -0,0 +1,124 @@
|
||||
ell.types
|
||||
=========
|
||||
|
||||
.. automodule:: ell.types
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
Submodules
|
||||
----------
|
||||
|
||||
ell.types.lmp
|
||||
-------------
|
||||
|
||||
.. automodule:: ell.types.lmp
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
.. autoclass:: ell.types.lmp.SerializedLMPUses
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
.. autoclass:: ell.types.lmp.SerializedLMPBase
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
.. autoclass:: ell.types.lmp.SerializedLMP
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
.. autoclass:: ell.types.lmp.InvocationTrace
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
.. autoclass:: ell.types.lmp.InvocationBase
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
.. autoclass:: ell.types.lmp.InvocationContentsBase
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
.. autoclass:: ell.types.lmp.InvocationContents
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
.. autoclass:: ell.types.lmp.Invocation
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
ell.types.message
|
||||
-----------------
|
||||
|
||||
.. automodule:: ell.types.message
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
.. autoclass:: ell.types.message.ToolResult
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
.. autoclass:: ell.types.message.ToolCall
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
.. autoclass:: ell.types.message.ContentBlock
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
.. autoclass:: ell.types.message.Message
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
.. autofunction:: ell.types.message.system
|
||||
|
||||
.. autofunction:: ell.types.message.user
|
||||
|
||||
.. autofunction:: ell.types.message.assistant
|
||||
|
||||
.. autofunction:: ell.types.message.coerce_content_list
|
||||
|
||||
Type Aliases
|
||||
------------
|
||||
|
||||
.. py:data:: ell.types.message.InvocableTool
|
||||
:annotation: = Callable[..., Union[ToolResult, _lstr_generic, List[ContentBlock]]]
|
||||
|
||||
.. py:data:: ell.types.message.LMPParams
|
||||
:annotation: = Dict[str, Any]
|
||||
|
||||
.. py:data:: ell.types.message.MessageOrDict
|
||||
:annotation: = Union[Message, Dict[str, str]]
|
||||
|
||||
.. py:data:: ell.types.message.Chat
|
||||
:annotation: = List[Message]
|
||||
|
||||
.. py:data:: ell.types.message.MultiTurnLMP
|
||||
:annotation: = Callable[..., Chat]
|
||||
|
||||
.. py:data:: ell.types.message.OneTurn
|
||||
:annotation: = Callable[..., _lstr_generic]
|
||||
|
||||
.. py:data:: ell.types.message.ChatLMP
|
||||
:annotation: = Callable[[Chat, Any], Chat]
|
||||
|
||||
.. py:data:: ell.types.message.LMP
|
||||
:annotation: = Union[OneTurn, MultiTurnLMP, ChatLMP]
|
||||
|
||||
.. py:data:: ell.types.message.InvocableLM
|
||||
:annotation: = Callable[..., _lstr_generic]
|
||||
9
docs/reference/ell.util.rst
Normal file
9
docs/reference/ell.util.rst
Normal file
@@ -0,0 +1,9 @@
|
||||
ell.util
|
||||
========
|
||||
|
||||
.. automodule:: ell.util
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
.. autofunction:: ell.util.lexically_closured_source
|
||||
16
docs/reference/index.rst
Normal file
16
docs/reference/index.rst
Normal file
@@ -0,0 +1,16 @@
|
||||
.. automodule:: ell
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
Submodules
|
||||
----------
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 4
|
||||
|
||||
ell.lmp
|
||||
ell.models
|
||||
ell.stores
|
||||
ell.types
|
||||
ell.util
|
||||
@@ -7,7 +7,7 @@ import openai
|
||||
|
||||
|
||||
import ell.lmp.tool
|
||||
from ell._lstr import _lstr
|
||||
from ell.types._lstr import _lstr
|
||||
from ell.stores.sql import SQLiteStore
|
||||
|
||||
ell.config.verbose = True
|
||||
|
||||
261
poetry.lock
generated
261
poetry.lock
generated
@@ -1,5 +1,16 @@
|
||||
# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
|
||||
|
||||
[[package]]
|
||||
name = "alabaster"
|
||||
version = "0.7.16"
|
||||
description = "A light, configurable Sphinx theme"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
files = [
|
||||
{file = "alabaster-0.7.16-py3-none-any.whl", hash = "sha256:b46733c07dce03ae4e150330b975c75737fa60f0a7c591b6c8bf4928a28e2c92"},
|
||||
{file = "alabaster-0.7.16.tar.gz", hash = "sha256:75a8b99c28a5dad50dd7f8ccdd447a121ddb3892da9e53d1ca5cca3106d58d65"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "annotated-types"
|
||||
version = "0.7.0"
|
||||
@@ -52,6 +63,20 @@ tests = ["attrs[tests-no-zope]", "zope-interface"]
|
||||
tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"]
|
||||
tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"]
|
||||
|
||||
[[package]]
|
||||
name = "babel"
|
||||
version = "2.16.0"
|
||||
description = "Internationalization utilities"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "babel-2.16.0-py3-none-any.whl", hash = "sha256:368b5b98b37c06b7daf6696391c3240c938b37767d4584413e8438c5c435fa8b"},
|
||||
{file = "babel-2.16.0.tar.gz", hash = "sha256:d1f3554ca26605fe173f3de0c65f750f5a42f924499bf134de6423582298e316"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"]
|
||||
|
||||
[[package]]
|
||||
name = "black"
|
||||
version = "24.8.0"
|
||||
@@ -304,6 +329,17 @@ idna = ["idna (>=3.6)"]
|
||||
trio = ["trio (>=0.23)"]
|
||||
wmi = ["wmi (>=1.5.1)"]
|
||||
|
||||
[[package]]
|
||||
name = "docutils"
|
||||
version = "0.20.1"
|
||||
description = "Docutils -- Python Documentation Utilities"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "docutils-0.20.1-py3-none-any.whl", hash = "sha256:96f387a2c5562db4476f09f13bbab2192e764cac08ebbf3a34a95d9b1e4a59d6"},
|
||||
{file = "docutils-0.20.1.tar.gz", hash = "sha256:f08a4e276c3a1583a86dce3e34aba3fe04d02bba2dd51ed16106244e8a923e3b"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "email-validator"
|
||||
version = "2.2.0"
|
||||
@@ -561,6 +597,36 @@ files = [
|
||||
{file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "imagesize"
|
||||
version = "1.4.1"
|
||||
description = "Getting image size from png/jpeg/jpeg2000/gif file"
|
||||
optional = false
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
|
||||
files = [
|
||||
{file = "imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b"},
|
||||
{file = "imagesize-1.4.1.tar.gz", hash = "sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "importlib-metadata"
|
||||
version = "8.4.0"
|
||||
description = "Read metadata from Python packages"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "importlib_metadata-8.4.0-py3-none-any.whl", hash = "sha256:66f342cc6ac9818fc6ff340576acd24d65ba0b3efabb2b4ac08b598965a4a2f1"},
|
||||
{file = "importlib_metadata-8.4.0.tar.gz", hash = "sha256:9a547d3bc3608b025f93d403fdd1aae741c24fbb8314df4b155675742ce303c5"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
zipp = ">=0.5"
|
||||
|
||||
[package.extras]
|
||||
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
|
||||
perf = ["ipython"]
|
||||
test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"]
|
||||
|
||||
[[package]]
|
||||
name = "iniconfig"
|
||||
version = "2.0.0"
|
||||
@@ -1321,6 +1387,180 @@ files = [
|
||||
{file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "snowballstemmer"
|
||||
version = "2.2.0"
|
||||
description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms."
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a"},
|
||||
{file = "snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sphinx"
|
||||
version = "7.4.7"
|
||||
description = "Python documentation generator"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
files = [
|
||||
{file = "sphinx-7.4.7-py3-none-any.whl", hash = "sha256:c2419e2135d11f1951cd994d6eb18a1835bd8fdd8429f9ca375dc1f3281bd239"},
|
||||
{file = "sphinx-7.4.7.tar.gz", hash = "sha256:242f92a7ea7e6c5b406fdc2615413890ba9f699114a9c09192d7dfead2ee9cfe"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
alabaster = ">=0.7.14,<0.8.0"
|
||||
babel = ">=2.13"
|
||||
colorama = {version = ">=0.4.6", markers = "sys_platform == \"win32\""}
|
||||
docutils = ">=0.20,<0.22"
|
||||
imagesize = ">=1.3"
|
||||
importlib-metadata = {version = ">=6.0", markers = "python_version < \"3.10\""}
|
||||
Jinja2 = ">=3.1"
|
||||
packaging = ">=23.0"
|
||||
Pygments = ">=2.17"
|
||||
requests = ">=2.30.0"
|
||||
snowballstemmer = ">=2.2"
|
||||
sphinxcontrib-applehelp = "*"
|
||||
sphinxcontrib-devhelp = "*"
|
||||
sphinxcontrib-htmlhelp = ">=2.0.0"
|
||||
sphinxcontrib-jsmath = "*"
|
||||
sphinxcontrib-qthelp = "*"
|
||||
sphinxcontrib-serializinghtml = ">=1.1.9"
|
||||
tomli = {version = ">=2", markers = "python_version < \"3.11\""}
|
||||
|
||||
[package.extras]
|
||||
docs = ["sphinxcontrib-websupport"]
|
||||
lint = ["flake8 (>=6.0)", "importlib-metadata (>=6.0)", "mypy (==1.10.1)", "pytest (>=6.0)", "ruff (==0.5.2)", "sphinx-lint (>=0.9)", "tomli (>=2)", "types-docutils (==0.21.0.20240711)", "types-requests (>=2.30.0)"]
|
||||
test = ["cython (>=3.0)", "defusedxml (>=0.7.1)", "pytest (>=8.0)", "setuptools (>=70.0)", "typing_extensions (>=4.9)"]
|
||||
|
||||
[[package]]
|
||||
name = "sphinx-rtd-theme"
|
||||
version = "2.0.0"
|
||||
description = "Read the Docs theme for Sphinx"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
files = [
|
||||
{file = "sphinx_rtd_theme-2.0.0-py2.py3-none-any.whl", hash = "sha256:ec93d0856dc280cf3aee9a4c9807c60e027c7f7b461b77aeffed682e68f0e586"},
|
||||
{file = "sphinx_rtd_theme-2.0.0.tar.gz", hash = "sha256:bd5d7b80622406762073a04ef8fadc5f9151261563d47027de09910ce03afe6b"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
docutils = "<0.21"
|
||||
sphinx = ">=5,<8"
|
||||
sphinxcontrib-jquery = ">=4,<5"
|
||||
|
||||
[package.extras]
|
||||
dev = ["bump2version", "sphinxcontrib-httpdomain", "transifex-client", "wheel"]
|
||||
|
||||
[[package]]
|
||||
name = "sphinxcontrib-applehelp"
|
||||
version = "2.0.0"
|
||||
description = "sphinxcontrib-applehelp is a Sphinx extension which outputs Apple help books"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
files = [
|
||||
{file = "sphinxcontrib_applehelp-2.0.0-py3-none-any.whl", hash = "sha256:4cd3f0ec4ac5dd9c17ec65e9ab272c9b867ea77425228e68ecf08d6b28ddbdb5"},
|
||||
{file = "sphinxcontrib_applehelp-2.0.0.tar.gz", hash = "sha256:2f29ef331735ce958efa4734873f084941970894c6090408b079c61b2e1c06d1"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
lint = ["mypy", "ruff (==0.5.5)", "types-docutils"]
|
||||
standalone = ["Sphinx (>=5)"]
|
||||
test = ["pytest"]
|
||||
|
||||
[[package]]
|
||||
name = "sphinxcontrib-devhelp"
|
||||
version = "2.0.0"
|
||||
description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp documents"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
files = [
|
||||
{file = "sphinxcontrib_devhelp-2.0.0-py3-none-any.whl", hash = "sha256:aefb8b83854e4b0998877524d1029fd3e6879210422ee3780459e28a1f03a8a2"},
|
||||
{file = "sphinxcontrib_devhelp-2.0.0.tar.gz", hash = "sha256:411f5d96d445d1d73bb5d52133377b4248ec79db5c793ce7dbe59e074b4dd1ad"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
lint = ["mypy", "ruff (==0.5.5)", "types-docutils"]
|
||||
standalone = ["Sphinx (>=5)"]
|
||||
test = ["pytest"]
|
||||
|
||||
[[package]]
|
||||
name = "sphinxcontrib-htmlhelp"
|
||||
version = "2.1.0"
|
||||
description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
files = [
|
||||
{file = "sphinxcontrib_htmlhelp-2.1.0-py3-none-any.whl", hash = "sha256:166759820b47002d22914d64a075ce08f4c46818e17cfc9470a9786b759b19f8"},
|
||||
{file = "sphinxcontrib_htmlhelp-2.1.0.tar.gz", hash = "sha256:c9e2916ace8aad64cc13a0d233ee22317f2b9025b9cf3295249fa985cc7082e9"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
lint = ["mypy", "ruff (==0.5.5)", "types-docutils"]
|
||||
standalone = ["Sphinx (>=5)"]
|
||||
test = ["html5lib", "pytest"]
|
||||
|
||||
[[package]]
|
||||
name = "sphinxcontrib-jquery"
|
||||
version = "4.1"
|
||||
description = "Extension to include jQuery on newer Sphinx releases"
|
||||
optional = false
|
||||
python-versions = ">=2.7"
|
||||
files = [
|
||||
{file = "sphinxcontrib-jquery-4.1.tar.gz", hash = "sha256:1620739f04e36a2c779f1a131a2dfd49b2fd07351bf1968ced074365933abc7a"},
|
||||
{file = "sphinxcontrib_jquery-4.1-py2.py3-none-any.whl", hash = "sha256:f936030d7d0147dd026a4f2b5a57343d233f1fc7b363f68b3d4f1cb0993878ae"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
Sphinx = ">=1.8"
|
||||
|
||||
[[package]]
|
||||
name = "sphinxcontrib-jsmath"
|
||||
version = "1.0.1"
|
||||
description = "A sphinx extension which renders display math in HTML via JavaScript"
|
||||
optional = false
|
||||
python-versions = ">=3.5"
|
||||
files = [
|
||||
{file = "sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8"},
|
||||
{file = "sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
test = ["flake8", "mypy", "pytest"]
|
||||
|
||||
[[package]]
|
||||
name = "sphinxcontrib-qthelp"
|
||||
version = "2.0.0"
|
||||
description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp documents"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
files = [
|
||||
{file = "sphinxcontrib_qthelp-2.0.0-py3-none-any.whl", hash = "sha256:b18a828cdba941ccd6ee8445dbe72ffa3ef8cbe7505d8cd1fa0d42d3f2d5f3eb"},
|
||||
{file = "sphinxcontrib_qthelp-2.0.0.tar.gz", hash = "sha256:4fe7d0ac8fc171045be623aba3e2a8f613f8682731f9153bb2e40ece16b9bbab"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
lint = ["mypy", "ruff (==0.5.5)", "types-docutils"]
|
||||
standalone = ["Sphinx (>=5)"]
|
||||
test = ["defusedxml (>=0.7.1)", "pytest"]
|
||||
|
||||
[[package]]
|
||||
name = "sphinxcontrib-serializinghtml"
|
||||
version = "2.0.0"
|
||||
description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
files = [
|
||||
{file = "sphinxcontrib_serializinghtml-2.0.0-py3-none-any.whl", hash = "sha256:6e2cb0eef194e10c27ec0023bfeb25badbbb5868244cf5bc5bdc04e4464bf331"},
|
||||
{file = "sphinxcontrib_serializinghtml-2.0.0.tar.gz", hash = "sha256:e9d912827f872c029017a53f0ef2180b327c3f7fd23c87229f7a8e8b70031d4d"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
lint = ["mypy", "ruff (==0.5.5)", "types-docutils"]
|
||||
standalone = ["Sphinx (>=5)"]
|
||||
test = ["pytest"]
|
||||
|
||||
[[package]]
|
||||
name = "sqlalchemy"
|
||||
version = "2.0.31"
|
||||
@@ -1755,6 +1995,25 @@ files = [
|
||||
{file = "websockets-12.0.tar.gz", hash = "sha256:81df9cbcbb6c260de1e007e58c011bfebe2dafc8435107b0537f393dd38c8b1b"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zipp"
|
||||
version = "3.20.1"
|
||||
description = "Backport of pathlib-compatible object wrapper for zip files"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "zipp-3.20.1-py3-none-any.whl", hash = "sha256:9960cd8967c8f85a56f920d5d507274e74f9ff813a0ab8889a5b5be2daf44064"},
|
||||
{file = "zipp-3.20.1.tar.gz", hash = "sha256:c22b14cc4763c5a5b04134207736c107db42e9d3ef2d9779d465f5f1bcba572b"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"]
|
||||
cover = ["pytest-cov"]
|
||||
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
|
||||
enabler = ["pytest-enabler (>=2.2)"]
|
||||
test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"]
|
||||
type = ["pytest-mypy"]
|
||||
|
||||
[extras]
|
||||
npm-build = []
|
||||
npm-install = []
|
||||
@@ -1762,4 +2021,4 @@ npm-install = []
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = ">=3.9"
|
||||
content-hash = "2d427545284e05bec0149a8b29f48320d5fb6740fbb1a6ba9bdc9c512cc4c8f6"
|
||||
content-hash = "7420f059f6878a9303f46e7a1d91b617c4c7657b315279680d51e04c1425c102"
|
||||
|
||||
@@ -43,6 +43,8 @@ pillow = "^10.4.0"
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
pytest = "^8.3.2"
|
||||
|
||||
sphinx = "<8.0.0"
|
||||
sphinx-rtd-theme = "^2.0.0"
|
||||
[build-system]
|
||||
requires = ["poetry-core>=1.0.0", "poetry-dynamic-versioning>=1.0.0,<2.0.0"]
|
||||
build-backend = "poetry_dynamic_versioning.backend"
|
||||
|
||||
@@ -1,12 +1,79 @@
|
||||
"""
|
||||
ell: A Lightweight, Functional Prompt Engineering Framework
|
||||
===========================================================
|
||||
|
||||
ell is a Python library that provides a novel approach to prompt engineering
|
||||
and interaction with language models. It is built on the principles that
|
||||
prompts are programs, not just strings, and that every interaction with a
|
||||
language model is valuable.
|
||||
|
||||
Key Features:
|
||||
-------------
|
||||
1. Functional approach to prompt engineering
|
||||
2. Visualization, tracking, and versioning of prompts using ell-studio
|
||||
3. Support for various storage backends (SQLite, PostgreSQL)
|
||||
4. Integration with popular language models
|
||||
|
||||
Main Components:
|
||||
----------------
|
||||
- Language Model Programs (LMPs): Discrete subroutines for interacting with language models
|
||||
- Storage backends: For persisting and analyzing interactions
|
||||
- ell-studio: A visualization tool for tracking prompt engineering processes
|
||||
|
||||
Example Usage:
|
||||
--------------
|
||||
Here's a simple example of how to use ell:
|
||||
|
||||
import ell
|
||||
|
||||
@ell.lm(model="gpt-4")
|
||||
def generate_poem(topic: str):
|
||||
'''You are a helpful assistant that writes poems.'''
|
||||
return f"Write a short poem about {topic}."
|
||||
|
||||
# Use SQLite as the storage backend
|
||||
store = ell.SQLiteStore('my_project')
|
||||
store.install(autocommit=True)
|
||||
|
||||
# Generate a poem
|
||||
poem = generate_poem("autumn leaves")
|
||||
print(poem)
|
||||
|
||||
# Visualize your prompt engineering process
|
||||
# Run in terminal: python -m ell.studio --storage-dir ./my_project
|
||||
|
||||
For more detailed information, refer to the specific module documentation:
|
||||
- ell.lmp: Language Model Program decorators and utilities
|
||||
- ell.types: Type definitions for messages and content blocks
|
||||
- ell.models: Supported language models
|
||||
- ell.configurator: Configuration utilities
|
||||
|
||||
"""
|
||||
|
||||
|
||||
from ell.lmp.simple import simple
|
||||
from ell.lmp.tool import tool
|
||||
from ell.lmp.complex import complex
|
||||
from ell.types.message import Message, ContentBlock, system, user, assistant
|
||||
from ell.types.message import system, user, assistant
|
||||
from ell.__version__ import __version__
|
||||
|
||||
# registers all of the mdoels.
|
||||
# Import all models
|
||||
import ell.models
|
||||
|
||||
# Import everything from configurator
|
||||
from ell.configurator import *
|
||||
|
||||
__all__ = [
|
||||
'simple',
|
||||
'tool',
|
||||
'complex',
|
||||
'Message',
|
||||
'ContentBlock',
|
||||
'system',
|
||||
'user',
|
||||
'assistant',
|
||||
'__version__',
|
||||
]
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
import logging
|
||||
import threading
|
||||
from ell.types import SerializedLMP, Invocation, InvocationTrace, InvocationContents
|
||||
from ell.types.lmp import LMPType, utc_now
|
||||
from ell.types.studio import LMPType, utc_now
|
||||
import ell.util.closure
|
||||
from ell.configurator import config
|
||||
from ell._lstr import _lstr
|
||||
from ell.types._lstr import _lstr
|
||||
|
||||
import inspect
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
from ell.configurator import config
|
||||
from ell.lmp._track import _track
|
||||
from ell._lstr import _lstr
|
||||
from ell.types._lstr import _lstr
|
||||
from ell.types import Message, ContentBlock
|
||||
from ell.types.message import LMP, InvocableLM, LMPParams, MessageOrDict, _lstr_generic
|
||||
from ell.types.lmp import LMPType
|
||||
from ell.types.studio import LMPType
|
||||
from ell.util._warnings import _warnings
|
||||
from ell.util.api import call
|
||||
from ell.util.verbosity import compute_color, model_usage_logger_pre
|
||||
@@ -16,9 +16,167 @@ from typing import Any, Dict, Optional, List, Callable, Union
|
||||
|
||||
def complex(model: str, client: Optional[openai.Client] = None, exempt_from_tracking=False, tools: Optional[List[Callable]] = None, post_callback: Optional[Callable] = None, **api_params):
|
||||
"""
|
||||
Defines a basic language model program (a parameterization of an existing foundation model using a particular prompt.)
|
||||
A sophisticated language model programming decorator for complex LLM interactions.
|
||||
|
||||
This is a decorator that can be applied to any LMP type.
|
||||
This decorator transforms a function into a Language Model Program (LMP) capable of handling
|
||||
multi-turn conversations, tool usage, and various output formats. It's designed for advanced
|
||||
use cases where full control over the LLM's capabilities is needed.
|
||||
|
||||
Args:
|
||||
model (str): The name or identifier of the language model to use.
|
||||
client (Optional[openai.Client]): An optional OpenAI client instance. If not provided, a default client will be used.
|
||||
tools (Optional[List[Callable]]): A list of tool functions that can be used by the LLM. Only available for certain models.
|
||||
response_format (Optional[Dict[str, Any]]): The response format for the LLM. Only available for certain models.
|
||||
n (Optional[int]): The number of responses to generate for the LLM. Only available for certain models.
|
||||
temperature (Optional[float]): The temperature parameter for controlling the randomness of the LLM.
|
||||
max_tokens (Optional[int]): The maximum number of tokens to generate for the LLM.
|
||||
top_p (Optional[float]): The top-p sampling parameter for controlling the diversity of the LLM.
|
||||
frequency_penalty (Optional[float]): The frequency penalty parameter for controlling the LLM's repetition.
|
||||
presence_penalty (Optional[float]): The presence penalty parameter for controlling the LLM's relevance.
|
||||
stop (Optional[List[str]]): The stop sequence for the LLM.
|
||||
exempt_from_tracking (bool): If True, the LMP usage won't be tracked. Default is False.
|
||||
**api_params: Additional keyword arguments to pass to the underlying API call.
|
||||
|
||||
Returns:
|
||||
Callable: A decorator that can be applied to a function, transforming it into a complex LMP.
|
||||
|
||||
Functionality:
|
||||
1. Advanced LMP Creation:
|
||||
- Supports multi-turn conversations and stateful interactions.
|
||||
- Enables tool usage within the LLM context.
|
||||
- Allows for various output formats, including structured data and function calls.
|
||||
|
||||
2. Flexible Input Handling:
|
||||
- Can process both single prompts and conversation histories.
|
||||
- Supports multimodal inputs (text, images, etc.) in the prompt.
|
||||
|
||||
3. Comprehensive Integration:
|
||||
- Integrates with ell's tracking system for monitoring LMP versions, usage, and performance.
|
||||
- Supports various language models and API configurations.
|
||||
|
||||
4. Output Processing:
|
||||
- Can return raw LLM outputs or process them through a post-callback function.
|
||||
- Supports returning multiple message types (e.g., text, function calls, tool results).
|
||||
|
||||
Usage Modes and Examples:
|
||||
|
||||
1. Basic Prompt:
|
||||
@ell.complex(model="gpt-4")
|
||||
def generate_story(prompt: str) -> List[Message]:
|
||||
'''You are a creative story writer''' # System prompt
|
||||
return [
|
||||
ell.user(f"Write a short story based on this prompt: {prompt}")
|
||||
]
|
||||
|
||||
story : ell.Message = generate_story("A robot discovers emotions")
|
||||
print(story.text) # Access the text content of the last message
|
||||
|
||||
2. Multi-turn Conversation:
|
||||
@ell.complex(model="gpt-4")
|
||||
def chat_bot(message_history: List[Message]) -> List[Message]:
|
||||
return [
|
||||
ell.system("You are a helpful assistant."),
|
||||
] + message_history
|
||||
|
||||
conversation = [
|
||||
ell.user("Hello, who are you?"),
|
||||
ell.assistant("I'm an AI assistant. How can I help you today?"),
|
||||
ell.user("Can you explain quantum computing?")
|
||||
]
|
||||
response : ell.Message = chat_bot(conversation)
|
||||
print(response.text) # Print the assistant's response
|
||||
|
||||
3. Tool Usage:
|
||||
@ell.tool()
|
||||
def get_weather(location: str) -> str:
|
||||
# Implementation to fetch weather
|
||||
return f"The weather in {location} is sunny."
|
||||
|
||||
@ell.complex(model="gpt-4", tools=[get_weather])
|
||||
def weather_assistant(message_history: List[Message]) -> List[Message]:
|
||||
return [
|
||||
ell.system("You are a weather assistant. Use the get_weather tool when needed."),
|
||||
] + message_history
|
||||
|
||||
conversation = [
|
||||
ell.user("What's the weather like in New York?")
|
||||
]
|
||||
response : ell.Message = weather_assistant(conversation)
|
||||
|
||||
if response.tool_calls:
|
||||
tool_results = response.call_tools_and_collect_as_message()
|
||||
print("Tool results:", tool_results.text)
|
||||
|
||||
# Continue the conversation with tool results
|
||||
final_response = weather_assistant(conversation + [response, tool_results])
|
||||
print("Final response:", final_response.text)
|
||||
|
||||
4. Structured Output:
|
||||
from pydantic import BaseModel
|
||||
|
||||
class PersonInfo(BaseModel):
|
||||
name: str
|
||||
age: int
|
||||
|
||||
@ell.complex(model="gpt-4", response_format=PersonInfo)
|
||||
def extract_person_info(text: str) -> List[Message]:
|
||||
return [
|
||||
ell.system("Extract person information from the given text."),
|
||||
ell.user(text)
|
||||
]
|
||||
|
||||
text = "John Doe is a 30-year-old software engineer."
|
||||
result : ell.Message = extract_person_info(text)
|
||||
person_info = result.parsed_content[0]
|
||||
print(f"Name: {person_info.name}, Age: {person_info.age}")
|
||||
|
||||
5. Multimodal Input:
|
||||
@ell.complex(model="gpt-4-vision-preview")
|
||||
def describe_image(image: PIL.Image.Image) -> List[Message]:
|
||||
return [
|
||||
ell.system("Describe the contents of the image in detail."),
|
||||
ell.user([
|
||||
ContentBlock(text="What do you see in this image?"),
|
||||
ContentBlock(image=image)
|
||||
])
|
||||
]
|
||||
|
||||
image = PIL.Image.open("example.jpg")
|
||||
description = describe_image(image)
|
||||
print(description.text)
|
||||
|
||||
6. Parallel Tool Execution:
|
||||
@ell.complex(model="gpt-4", tools=[tool1, tool2, tool3], )
|
||||
def parallel_assistant(message_history: List[Message]) -> List[Message]:
|
||||
return [
|
||||
ell.system("You can use multiple tools in parallel."),
|
||||
] + message_history
|
||||
|
||||
response = parallel_assistant([ell.user("Perform tasks A, B, and C simultaneously.")])
|
||||
if response.tool_calls:
|
||||
tool_results : ell.Message = response.call_tools_and_collect_as_message(parallel=True, max_workers=3)
|
||||
print("Parallel tool results:", tool_results.text)
|
||||
|
||||
Helper Functions for Output Processing:
|
||||
- response.text: Get the full text content of the last message.
|
||||
- response.text_only: Get only the text content, excluding non-text elements.
|
||||
- response.tool_calls: Access the list of tool calls in the message.
|
||||
- response.tool_results: Access the list of tool results in the message.
|
||||
- response.parsed_content: Access structured data outputs.
|
||||
- response.call_tools_and_collect_as_message(): Execute tool calls and collect results.
|
||||
- Message(role="user", content=[...]).to_openai_message(): Convert to OpenAI API format.
|
||||
|
||||
Notes:
|
||||
- The decorated function should return a list of Message objects.
|
||||
- For tool usage, ensure that tools are properly decorated with @ell.tool().
|
||||
- When using structured outputs, specify the response_format in the decorator.
|
||||
- The complex decorator supports all features of simpler decorators like @ell.simple.
|
||||
- Use helper functions and properties to easily access and process different types of outputs.
|
||||
|
||||
See Also:
|
||||
- ell.simple: For simpler text-only LMP interactions.
|
||||
- ell.tool: For defining tools that can be used within complex LMPs.
|
||||
- ell.studio: For visualizing and analyzing LMP executions.
|
||||
"""
|
||||
default_client_from_decorator = client
|
||||
|
||||
@@ -81,5 +239,5 @@ def _get_messages(prompt_ret: Union[str, list[MessageOrDict]], prompt: LMP) -> l
|
||||
else:
|
||||
assert isinstance(
|
||||
prompt_ret, list
|
||||
), "Need to pass a list of MessagesOrDict to the language model"
|
||||
), "Need to pass a list of Messages to the language model"
|
||||
return prompt_ret
|
||||
|
||||
@@ -5,9 +5,93 @@ import openai
|
||||
from ell.lmp.complex import complex
|
||||
|
||||
|
||||
@wraps(complex)
|
||||
def simple(model: str, client: Optional[openai.Client] = None, exempt_from_tracking=False, **api_params):
|
||||
"""a basic language model programming decorator for text only llm prompting."""
|
||||
"""
|
||||
A basic language model programming decorator for text-only LLM outputs.
|
||||
|
||||
This decorator simplifies the process of creating Language Model Programs (LMPs)
|
||||
that return text-only outputs from language models, while supporting multimodal inputs.
|
||||
It wraps the more complex 'complex' decorator, providing a streamlined interface for common use cases.
|
||||
|
||||
Args:
|
||||
model (str): The name or identifier of the language model to use.
|
||||
client (Optional[openai.Client]): An optional OpenAI client instance. If not provided, a default client will be used.
|
||||
exempt_from_tracking (bool): If True, the LMP usage won't be tracked. Default is False.
|
||||
**api_params: Additional keyword arguments to pass to the underlying API call.
|
||||
|
||||
Returns:
|
||||
Callable: A decorator that can be applied to a function, transforming it into an LMP.
|
||||
|
||||
Functionality:
|
||||
1. Simplifies LMP Creation:
|
||||
- Provides a straightforward way to create LMPs with text-only outputs.
|
||||
- Supports multimodal inputs (text, images, etc.) in the prompt.
|
||||
- Automatically simplifies complex model responses to text-only format.
|
||||
|
||||
2. Integration with Language Models:
|
||||
- Supports various language models through the 'model' parameter.
|
||||
- Allows customization of API parameters for fine-tuned control.
|
||||
|
||||
3. Tracking and Monitoring:
|
||||
- Integrates with ell's tracking system for monitoring LMP versions over time, usage and performance.
|
||||
|
||||
Usage:
|
||||
The decorated function can return either a single prompt or a list of ell.Message objects:
|
||||
|
||||
@ell.simple(model="gpt-4", temperature=0.7)
|
||||
def summarize_text(text: str) -> str:
|
||||
'''You are an expert at summarizing text.''' # System prompt
|
||||
return f"Please summarize the following text:\n\n{text}" # User prompt
|
||||
|
||||
|
||||
@ell.simple(model="gpt-4", temperature=0.7)
|
||||
def describe_image(image : PIL.Image.Image) -> List[ell.Message]:
|
||||
'''Describe the contents of an image.''' # unused because we're returning a list of Messages
|
||||
return [
|
||||
# helper function for ell.Message(text="...", role="system")
|
||||
ell.system("You are an AI trained to describe images."),
|
||||
# helper function for ell.Message(content="...", role="user")
|
||||
ell.user([ContentBlock(text="Describe this image in detail."), ContentBlock(image=image)]),
|
||||
]
|
||||
|
||||
|
||||
image_description = describe_image(PIL.Image.open("https://example.com/image.jpg"))
|
||||
print(image_description)
|
||||
# Output will be a string text-only description of the image
|
||||
|
||||
summary = summarize_text("Long text to summarize...")
|
||||
print(summary)
|
||||
# Output will be a text-only summary
|
||||
|
||||
Notes:
|
||||
- This decorator is designed for text-only model outputs, but supports multimodal inputs.
|
||||
- It simplifies complex responses from language models to text-only format, regardless of
|
||||
the model's capability for structured outputs, function calling, or multimodal outputs.
|
||||
- For preserving complex model outputs (e.g., structured data, function calls, or multimodal
|
||||
outputs), use the @ell.complex decorator instead. @ell.complex returns a Message object (role='assistant')
|
||||
- The decorated function can return a string or a list of ell.Message objects for more
|
||||
complex prompts, including multimodal inputs.
|
||||
- If called with n > 1 in api_params, the wrapped LMP will return a list of strings for the n parallel outputs
|
||||
of the model instead of just one string. Otherwise, it will return a single string.
|
||||
- You can pass LM API parameters either in the decorator or when calling the decorated function.
|
||||
Parameters passed during the function call will override those set in the decorator.
|
||||
|
||||
Example of passing LM API params:
|
||||
@ell.simple(model="gpt-4", temperature=0.7)
|
||||
def generate_story(prompt: str) -> str:
|
||||
return f"Write a short story based on this prompt: {prompt}"
|
||||
|
||||
# Using default parameters
|
||||
story1 = generate_story("A day in the life of a time traveler")
|
||||
|
||||
# Overriding parameters during function call
|
||||
story2 = generate_story("An AI's first day of consciousness", lm_params={"temperature": 0.9, "max_tokens": 500})
|
||||
|
||||
See Also:
|
||||
- ell.complex: For LMPs that preserve full structure of model responses, including multimodal outputs.
|
||||
- ell.tool: For defining tools that can be used within complex LMPs.
|
||||
- ell.studio: For visualizing and analyzing LMP executions.
|
||||
"""
|
||||
assert 'tools' not in api_params, "tools are not supported in lm decorator, use multimodal decorator instead"
|
||||
assert 'tool_choice' not in api_params, "tool_choice is not supported in lm decorator, use multimodal decorator instead"
|
||||
assert 'response_format' not in api_params, "response_format is not supported in lm decorator, use multimodal decorator instead"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from functools import wraps
|
||||
import json
|
||||
from typing import Optional
|
||||
from typing import Any, Callable, Optional
|
||||
|
||||
from pydantic import Field, create_model
|
||||
from pydantic.fields import FieldInfo
|
||||
@@ -8,19 +8,127 @@ from ell.lmp._track import _track
|
||||
# from ell.types import ToolFunction, InvocableTool, ToolParams
|
||||
# from ell.util.verbosity import compute_color, tool_usage_logger_pre
|
||||
from ell.configurator import config
|
||||
from ell._lstr import _lstr
|
||||
from ell.types.lmp import LMPType
|
||||
from ell.types._lstr import _lstr
|
||||
from ell.types.studio import LMPType
|
||||
import inspect
|
||||
|
||||
from ell.types.message import ContentBlock, ToolResult, coerce_content_list
|
||||
from ell.types.message import ContentBlock, InvocableTool, ToolResult, coerce_content_list
|
||||
|
||||
|
||||
|
||||
def tool(*, exempt_from_tracking: bool = False, **tool_kwargs):
|
||||
"""
|
||||
Defines a tool that can be used by language models.
|
||||
Defines a tool for use in language model programs (LMPs) that support tool use.
|
||||
|
||||
This is a decorator that can be applied to any ToolFunction type.
|
||||
This decorator wraps a function, adding metadata and handling for tool invocations.
|
||||
It automatically extracts the tool's description and parameters from the function's
|
||||
docstring and type annotations, creating a structured representation for LMs to use.
|
||||
|
||||
Args:
|
||||
exempt_from_tracking (bool): If True, the tool usage won't be tracked. Default is False.
|
||||
**tool_kwargs: Additional keyword arguments for tool configuration.
|
||||
|
||||
Returns:
|
||||
Callable: A wrapped version of the original function, usable as a tool by LMs.
|
||||
|
||||
Requirements:
|
||||
- Function must have fully typed arguments (Pydantic-serializable).
|
||||
- Return value must be one of: str, JSON-serializable object, Pydantic model, or List[ContentBlock].
|
||||
- All parameters must have type annotations.
|
||||
- Complex types should be Pydantic models.
|
||||
- Function should have a descriptive docstring.
|
||||
- Can only be used in LMPs with @ell.complex decorators
|
||||
|
||||
Functionality:
|
||||
1. Metadata Extraction:
|
||||
- Uses function docstring as tool description.
|
||||
- Extracts parameter info from type annotations and docstring.
|
||||
- Creates a Pydantic model for parameter validation and schema generation.
|
||||
|
||||
2. Integration with LMs:
|
||||
- Can be passed to @ell.complex decorators.
|
||||
- Provides structured tool information to LMs.
|
||||
|
||||
3. Invocation Handling:
|
||||
- Manages tracking, logging, and result processing.
|
||||
- Wraps results in appropriate types (e.g., _lstr) for tracking.
|
||||
|
||||
Usage Modes:
|
||||
1. Normal Function Call:
|
||||
- Behaves like a regular Python function.
|
||||
- Example: result = my_tool(arg1="value", arg2=123)
|
||||
|
||||
2. LMP Tool Call:
|
||||
- Used within LMPs or with explicit _tool_call_id.
|
||||
- Returns a ToolResult object.
|
||||
- Example: result = my_tool(arg1="value", arg2=123, _tool_call_id="unique_id")
|
||||
|
||||
Result Coercion:
|
||||
- String → ContentBlock(text=result)
|
||||
- Pydantic BaseModel → ContentBlock(parsed=result)
|
||||
- List[ContentBlock] → Used as-is
|
||||
- Other types → ContentBlock(text=json.dumps(result))
|
||||
|
||||
Example:
|
||||
@ell.tool()
|
||||
def create_claim_draft(
|
||||
claim_details: str,
|
||||
claim_type: str,
|
||||
claim_amount: float,
|
||||
claim_date: str = Field(description="Date format: YYYY-MM-DD")
|
||||
) -> str:
|
||||
'''Create a claim draft. Returns the created claim ID.'''
|
||||
return "12345"
|
||||
|
||||
For use in a complex LMP:
|
||||
@ell.complex(model="gpt-4", tools=[create_claim_draft], temperature=0.1)
|
||||
def insurance_chatbot(message_history: List[Message]) -> List[Message]:
|
||||
# Chatbot implementation...
|
||||
|
||||
x = insurance_chatbot([
|
||||
ell.user("I crashed my car into a tree."),
|
||||
ell.assistant("I'm sorry to hear that. Can you provide more details?"),
|
||||
ell.user("The car is totaled and I need to file a claim. Happened on 2024-08-01. total value is like $5000")
|
||||
])
|
||||
print(x)
|
||||
'''ell.Message(content=[
|
||||
ContentBlock(tool_call(
|
||||
tool_call_id="asdas4e",
|
||||
tool_fn=create_claim_draft,
|
||||
input=create_claim_draftParams({
|
||||
claim_details="The car is totaled and I need to file a claim. Happened on 2024-08-01. total value is like $5000",
|
||||
claim_type="car",
|
||||
claim_amount=5000,
|
||||
claim_date="2024-08-01"
|
||||
})
|
||||
))
|
||||
], role='assistant')'''
|
||||
|
||||
if x.tool_calls:
|
||||
next_user_message = response_message.call_tools_and_collect_as_message()
|
||||
# This actually calls create_claim_draft
|
||||
print(next_user_message)
|
||||
'''
|
||||
ell.Message(content=[
|
||||
ContentBlock(tool_result=ToolResult(
|
||||
tool_call_id="asdas4e",
|
||||
result=[ContentBlock(text="12345")]
|
||||
))
|
||||
], role='user')
|
||||
'''
|
||||
y = insurance_chatbot(message_history + [x, next_user_message])
|
||||
print(y)
|
||||
'''
|
||||
ell.Message("I've filed that for you!", role='assistant')
|
||||
'''
|
||||
|
||||
|
||||
|
||||
Note:
|
||||
- Tools are integrated into LMP calls via the 'tools' parameter in @ell.complex.
|
||||
- LMs receive structured tool information, enabling understanding and usage within the conversation context.
|
||||
"""
|
||||
def decorator(fn: "ToolFunction") -> "InvocableTool":
|
||||
def tool_decorator(fn: Callable[..., Any]) -> InvocableTool:
|
||||
# color = compute_color(fn)
|
||||
_under_fn = fn
|
||||
|
||||
@@ -34,7 +142,6 @@ def tool(*, exempt_from_tracking: bool = False, **tool_kwargs):
|
||||
|
||||
#XXX: Post release, we need to wrap all tool arguments in type primitives for tracking I guess or change that tool makes the tool function inoperable.
|
||||
#XXX: Most people are not going to manually try and call the tool without a type primitive and if they do it will most likely be wrapped with l strs.
|
||||
|
||||
|
||||
# assert exempt_from_tracking or _invocation_origin is not None, "Invocation origin is required when using a tracked Tool"
|
||||
# Do nice logging hooks here.
|
||||
@@ -123,8 +230,17 @@ def tool(*, exempt_from_tracking: bool = False, **tool_kwargs):
|
||||
|
||||
# handle tracking last.
|
||||
if exempt_from_tracking:
|
||||
return wrapper
|
||||
ret = wrapper
|
||||
else:
|
||||
return _track(wrapper)
|
||||
ret= _track(wrapper)
|
||||
|
||||
return decorator
|
||||
# Helper function to get the Pydantic model for the tool
|
||||
def get_params_model():
|
||||
return wrapper.__ell_params_model__
|
||||
|
||||
# Attach the helper function to the wrapper
|
||||
wrapper.get_params_model = get_params_model
|
||||
ret.get_params_model = get_params_model
|
||||
return ret
|
||||
|
||||
return tool_decorator
|
||||
|
||||
@@ -1,2 +1,10 @@
|
||||
"""
|
||||
Attempts to registeres model names with their respective API client bindings. This allows for the creation of a unified interface for interacting with different LLM providers.
|
||||
|
||||
For example, to register an OpenAI model:
|
||||
@ell.simple(model='gpt-4o-mini') -> @ell.simple(model='gpt-4o-mini', client=openai.OpenAI())
|
||||
|
||||
"""
|
||||
|
||||
import ell.models.openai
|
||||
import ell.models.ollama
|
||||
@@ -7,6 +7,21 @@ logger = logging.getLogger(__name__)
|
||||
client = None
|
||||
|
||||
def register(base_url):
|
||||
"""
|
||||
Registers Ollama models with the provided base URL.
|
||||
|
||||
This function sets up the Ollama client with the given base URL and
|
||||
fetches available models from the Ollama API. It then registers these
|
||||
models with the global configuration, allowing them to be used within
|
||||
the ell framework.
|
||||
|
||||
Args:
|
||||
base_url (str): The base URL of the Ollama API endpoint.
|
||||
|
||||
Note:
|
||||
This function updates the global client and configuration.
|
||||
It logs any errors encountered during the process.
|
||||
"""
|
||||
global client
|
||||
client = openai.Client(base_url=base_url)
|
||||
|
||||
|
||||
@@ -1,3 +1,27 @@
|
||||
"""
|
||||
This module handles the registration of OpenAI models within the ell framework.
|
||||
|
||||
It provides functionality to register various OpenAI models with a given OpenAI client,
|
||||
making them available for use throughout the system. The module also sets up a default
|
||||
client behavior for unregistered models.
|
||||
|
||||
Key features:
|
||||
1. Registration of specific OpenAI models with their respective types (system, openai, openai-internal).
|
||||
2. Utilization of a default OpenAI client for any unregistered models,
|
||||
|
||||
The default client behavior ensures that even if a specific model is not explicitly
|
||||
registered, the system can still attempt to use it with the default OpenAI client.
|
||||
This fallback mechanism provides flexibility in model usage while maintaining a
|
||||
structured approach to model registration.
|
||||
|
||||
Note: The actual model availability may depend on your OpenAI account's access and the
|
||||
current offerings from OpenAI.
|
||||
|
||||
Additionally, due to the registration of default mdoels, the OpenAI client may be used for
|
||||
anthropic, cohere, groq, etc. models if their clients are not registered or fail
|
||||
to register due to an error (lack of API keys, rate limits, etc.)
|
||||
"""
|
||||
|
||||
from ell.configurator import config
|
||||
import openai
|
||||
|
||||
@@ -6,7 +30,22 @@ import colorama
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def register_openai_models(client: openai.Client):
|
||||
def register(client: openai.Client):
|
||||
"""
|
||||
Register OpenAI models with the provided client.
|
||||
|
||||
This function takes an OpenAI client and registers various OpenAI models
|
||||
with the global configuration. It allows the system to use these models
|
||||
for different AI tasks.
|
||||
|
||||
Args:
|
||||
client (openai.Client): An instance of the OpenAI client to be used
|
||||
for model registration.
|
||||
|
||||
Note:
|
||||
The function doesn't return anything but updates the global
|
||||
configuration with the registered models.
|
||||
"""
|
||||
model_data = [
|
||||
('gpt-4-1106-preview', 'system'),
|
||||
('gpt-4-32k-0314', 'openai'),
|
||||
@@ -45,5 +84,5 @@ try:
|
||||
except openai.OpenAIError as e:
|
||||
pass
|
||||
|
||||
register_openai_models(default_client)
|
||||
register(default_client)
|
||||
config._default_openai_client = default_client
|
||||
@@ -2,7 +2,7 @@ from abc import ABC, abstractmethod
|
||||
from contextlib import contextmanager
|
||||
from datetime import datetime
|
||||
from typing import Any, Optional, Dict, List, Set, Union
|
||||
from ell._lstr import _lstr
|
||||
from ell.types._lstr import _lstr
|
||||
from ell.types import SerializedLMP, Invocation
|
||||
from ell.types.message import InvocableLM
|
||||
|
||||
|
||||
@@ -9,10 +9,10 @@ import cattrs
|
||||
import numpy as np
|
||||
from sqlalchemy.sql import text
|
||||
from ell.types import InvocationTrace, SerializedLMP, Invocation, InvocationContents
|
||||
from ell._lstr import _lstr
|
||||
from ell.types._lstr import _lstr
|
||||
from sqlalchemy import or_, func, and_, extract, FromClause
|
||||
from sqlalchemy.types import TypeDecorator, VARCHAR
|
||||
from ell.types.lmp import SerializedLMPUses, utc_now
|
||||
from ell.types.studio import SerializedLMPUses, utc_now
|
||||
from ell.util.serialization import pydantic_ltype_aware_cattr
|
||||
import gzip
|
||||
import json
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
"""
|
||||
The primary types used in ell
|
||||
"""
|
||||
|
||||
from ell.types.message import *
|
||||
from ell.types.lmp import *
|
||||
from ell.types.studio import *
|
||||
from ell.types._lstr import *
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# todo: implement tracing for structured outs. this a v2 feature.
|
||||
import json
|
||||
from ell._lstr import _lstr
|
||||
from ell.types._lstr import _lstr
|
||||
from functools import cached_property
|
||||
from PIL.Image import Image
|
||||
import numpy as np
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
from .closure import lexically_closured_source
|
||||
@@ -5,7 +5,7 @@ import json
|
||||
from ell.configurator import config
|
||||
import openai
|
||||
from collections import defaultdict
|
||||
from ell._lstr import _lstr
|
||||
from ell.types._lstr import _lstr
|
||||
from ell.types import Message, ContentBlock, ToolCall
|
||||
|
||||
|
||||
|
||||
@@ -361,6 +361,47 @@ def get_referenced_names(code: str, module_name: str):
|
||||
CLOSURE_SOURCE: Dict[str, str] = {}
|
||||
|
||||
def lexically_closured_source(func, forced_dependencies: Optional[Dict[str, Any]] = None):
|
||||
"""
|
||||
Generate a lexically closured source for a given function.
|
||||
|
||||
This function takes a callable object (function, method, or class) and generates
|
||||
a lexically closured source code. It captures all the dependencies, including
|
||||
global variables, free variables, and nested functions, to create a self-contained
|
||||
version of the function that can be executed independently.
|
||||
|
||||
Args:
|
||||
func (Callable): The function or callable object to process.
|
||||
forced_dependencies (Optional[Dict[str, Any]]): A dictionary of additional
|
||||
dependencies to include in the closure. Keys are variable names, and
|
||||
values are the corresponding objects.
|
||||
|
||||
Returns:
|
||||
Tuple[str, Set[Any]]: A tuple containing two elements:
|
||||
1. The lexically closured source code as a string.
|
||||
2. A set of function objects that this closure uses.
|
||||
|
||||
Raises:
|
||||
ValueError: If the input is not a callable object.
|
||||
|
||||
Example:
|
||||
def outer(x):
|
||||
y = 10
|
||||
def inner():
|
||||
return x + y
|
||||
return inner
|
||||
|
||||
closured_source, uses = lexically_closured_source(outer)
|
||||
print(closured_source)
|
||||
# Output will include the source for both outer and inner functions,
|
||||
# along with any necessary imports and variable definitions.
|
||||
|
||||
Note:
|
||||
This function relies on the `lexical_closure` function to perform the
|
||||
actual closure generation. It also uses the `__ell_closure__` attribute
|
||||
of the function, which is expected to be set by the `lexical_closure` function.
|
||||
"""
|
||||
if not callable(func):
|
||||
raise ValueError("Input must be a callable object (function, method, or class).")
|
||||
_, fnclosure, uses = lexical_closure(func, initial_call=True, recursion_stack=[], forced_dependencies=forced_dependencies)
|
||||
return func.__ell_closure__, uses
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ import cattrs
|
||||
import numpy as np
|
||||
from pydantic import BaseModel
|
||||
import PIL
|
||||
from ell._lstr import _lstr
|
||||
from ell.types._lstr import _lstr
|
||||
|
||||
|
||||
pydantic_ltype_aware_cattr = cattrs.Converter()
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import numpy as np
|
||||
import pytest
|
||||
from ell._lstr import _lstr
|
||||
from ell.types._lstr import _lstr
|
||||
|
||||
|
||||
class TestLstr:
|
||||
|
||||
@@ -4,8 +4,8 @@ from sqlmodel import Session, select
|
||||
from ell.stores.sql import SQLStore, SerializedLMP
|
||||
from sqlalchemy import Engine, create_engine, func
|
||||
|
||||
from ell.types.lmp import LMPType
|
||||
from ell.types.lmp import utc_now
|
||||
from ell.types.studio import LMPType
|
||||
from ell.types.studio import utc_now
|
||||
|
||||
@pytest.fixture
|
||||
def in_memory_db():
|
||||
|
||||
Reference in New Issue
Block a user