mirror of
https://github.com/MadcowD/ell.git
synced 2024-09-22 16:14:36 +03:00
fix bug with dependencies?
This commit is contained in:
@@ -1,3 +1,4 @@
|
||||
from functools import wraps
|
||||
import pytest
|
||||
import math
|
||||
from typing import Set, Any
|
||||
@@ -9,6 +10,8 @@ from ell.util.closure import (
|
||||
get_referenced_names,
|
||||
is_function_called,
|
||||
)
|
||||
import ell
|
||||
|
||||
|
||||
def test_lexical_closure_simple_function():
|
||||
def simple_func(x):
|
||||
@@ -45,6 +48,7 @@ def test_lexical_closure_with_default_args():
|
||||
|
||||
result, _, _ = lexical_closure(func_with_default)
|
||||
print(result)
|
||||
|
||||
assert "def func_with_default(x=10):" in result
|
||||
|
||||
@pytest.mark.parametrize("value, expected", [
|
||||
@@ -68,8 +72,8 @@ def test_should_import():
|
||||
|
||||
def test_get_referenced_names():
|
||||
code = """
|
||||
import math
|
||||
result = math.sin(x) + math.cos(y)
|
||||
import math
|
||||
result = math.sin(x) + math.cos(y)
|
||||
"""
|
||||
referenced = get_referenced_names(code, "math")
|
||||
print(referenced)
|
||||
@@ -105,4 +109,31 @@ def test_lexical_closure_uses_type():
|
||||
|
||||
_, _, uses = lexical_closure(dummy_func, initial_call=True)
|
||||
assert isinstance(uses, Set)
|
||||
# You might want to add a more specific check for the content of 'uses'
|
||||
# You might want to add a more specific check for the content of 'uses'
|
||||
|
||||
|
||||
def test_lexical_closure_uses():
|
||||
|
||||
@ell.lm(model="gpt-4")
|
||||
def dependency_func():
|
||||
return "42"
|
||||
|
||||
|
||||
@ell.lm(model="gpt-4")
|
||||
def main_func():
|
||||
return dependency_func()
|
||||
|
||||
|
||||
# Check that uses is a set
|
||||
assert isinstance(main_func.__ell_uses__, set)
|
||||
|
||||
# Check that the set contains exactly one item
|
||||
assert dependency_func.__ell_hash__ in main_func.__ell_uses__
|
||||
assert len(main_func.__ell_uses__) == 1
|
||||
# Check that the item in the set starts with 'lmp-'
|
||||
assert list(main_func.__ell_uses__)[0].startswith('lmp-')
|
||||
assert len(dependency_func.__ell_uses__) == 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_lexical_closure_uses()
|
||||
@@ -1,98 +1,97 @@
|
||||
"""
|
||||
Pytest for the LM function (mocks the openai api so we can pretend to generate completions through the typical approach taken in the decorators (and adapters file.))
|
||||
"""
|
||||
# """
|
||||
# Pytest for the LM function (mocks the openai api so we can pretend to generate completions through the typical approach taken in the decorators (and adapters file.))
|
||||
# """
|
||||
|
||||
import ell
|
||||
from ell.decorators.lm import lm
|
||||
import pytest
|
||||
from unittest.mock import patch, MagicMock
|
||||
from ell.types import Message, LMPParams
|
||||
# import ell
|
||||
# from ell.decorators.lm import lm
|
||||
# import pytest
|
||||
# from unittest.mock import patch
|
||||
# from ell.types import Message, LMPParams
|
||||
|
||||
|
||||
@lm(model="gpt-4-turbo", provider=None, temperature=0.1, max_tokens=5)
|
||||
def lmp_with_default_system_prompt(*args, **kwargs):
|
||||
return "Test user prompt"
|
||||
|
||||
# @lm(model="gpt-4-turbo", temperature=0.1, max_tokens=5)
|
||||
# def lmp_with_default_system_prompt(*args, **kwargs):
|
||||
# return "Test user prompt"
|
||||
|
||||
|
||||
@lm(model="gpt-4-turbo", provider=None, temperature=0.1, max_tokens=5)
|
||||
def lmp_with_docstring_system_prompt(*args, **kwargs):
|
||||
"""Test system prompt""" # I personally prefer this sysntax but it's nto formattable so I'm not sure if it's the best approach. I think we can leave this in as a legacy feature but the default docs should be using the ell.system, ell.user, ...
|
||||
# @lm(model="gpt-4-turbo", temperature=0.1, max_tokens=5)
|
||||
# def lmp_with_docstring_system_prompt(*args, **kwargs):
|
||||
# """Test system prompt""" # I personally prefer this sysntax but it's nto formattable so I'm not sure if it's the best approach. I think we can leave this in as a legacy feature but the default docs should be using the ell.system, ell.user, ...
|
||||
|
||||
return "Test user prompt"
|
||||
# return "Test user prompt"
|
||||
|
||||
|
||||
@lm(model="gpt-4-turbo", provider=None, temperature=0.1, max_tokens=5)
|
||||
def lmp_with_message_fmt(*args, **kwargs):
|
||||
"""Just a normal doc stirng"""
|
||||
# @lm(model="gpt-4-turbo", temperature=0.1, max_tokens=5)
|
||||
# def lmp_with_message_fmt(*args, **kwargs):
|
||||
# """Just a normal doc stirng"""
|
||||
|
||||
return [
|
||||
Message(role="system", content="Test system prompt from message fmt"),
|
||||
Message(role="user", content="Test user prompt 3"),
|
||||
]
|
||||
# return [
|
||||
# Message(role="system", content="Test system prompt from message fmt"),
|
||||
# Message(role="user", content="Test user prompt 3"),
|
||||
# ]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def client_mock():
|
||||
with patch("ell.adapter.client.chat.completions.create") as mock:
|
||||
yield mock
|
||||
# @pytest.fixture
|
||||
# def mock_run_lm():
|
||||
# with patch("ell.util.lm._run_lm") as mock:
|
||||
# mock.return_value = ("Mocked content", None)
|
||||
# yield mock
|
||||
|
||||
|
||||
def test_lm_decorator_with_params(client_mock):
|
||||
client_mock.return_value = MagicMock(
|
||||
choices=[MagicMock(message=MagicMock(content="Mocked content"))]
|
||||
)
|
||||
result = lmp_with_default_system_prompt("input", lm_params=dict(temperature=0.5))
|
||||
# It should have been called twice
|
||||
print("client_mock was called with:", client_mock.call_args)
|
||||
client_mock.assert_called_with(
|
||||
model="gpt-4-turbo",
|
||||
messages=[
|
||||
Message(role="system", content=ell.config.default_system_prompt),
|
||||
Message(role="user", content="Test user prompt"),
|
||||
],
|
||||
temperature=0.5,
|
||||
max_tokens=5,
|
||||
)
|
||||
assert isinstance(result, str)
|
||||
assert result == "Mocked content"
|
||||
# def test_lm_decorator_with_params(mock_run_lm):
|
||||
# result = lmp_with_default_system_prompt("input", lm_params=dict(temperature=0.5))
|
||||
|
||||
# mock_run_lm.assert_called_once_with(
|
||||
# model="gpt-4-turbo",
|
||||
# messages=[
|
||||
# Message(role="system", content=ell.config.default_system_prompt),
|
||||
# Message(role="user", content="Test user prompt"),
|
||||
# ],
|
||||
# lm_kwargs=dict(temperature=0.5, max_tokens=5),
|
||||
# _invocation_origin=None,
|
||||
# exempt_from_tracking=False,
|
||||
# client=None,
|
||||
# _logging_color=None,
|
||||
# )
|
||||
# assert result == "Mocked content"
|
||||
|
||||
# @patch("ell.util.lm._run_lm")
|
||||
# def test_lm_decorator_with_docstring_system_prompt(mock_run_lm):
|
||||
# mock_run_lm.return_value = ("Mocked content", None)
|
||||
# result = lmp_with_docstring_system_prompt("input", lm_params=dict(temperature=0.5))
|
||||
|
||||
# mock_run_lm.assert_called_once_with(
|
||||
# model="gpt-4-turbo",
|
||||
# messages=[
|
||||
# Message(role="system", content="Test system prompt"),
|
||||
# Message(role="user", content="Test user prompt"),
|
||||
# ],
|
||||
# lm_kwargs=dict(temperature=0.5, max_tokens=5),
|
||||
# _invocation_origin=None,
|
||||
# exempt_from_tracking=False,
|
||||
# client=None,
|
||||
# _logging_color=None,
|
||||
# )
|
||||
# assert result == "Mocked content"
|
||||
|
||||
def test_lm_decorator_with_docstring_system_prompt(client_mock):
|
||||
client_mock.return_value = MagicMock(
|
||||
choices=[MagicMock(message=MagicMock(content="Mocked content"))]
|
||||
)
|
||||
result = lmp_with_docstring_system_prompt("input", lm_params=dict(temperature=0.5))
|
||||
print("client_mock was called with:", client_mock.call_args)
|
||||
client_mock.assert_called_with(
|
||||
model="gpt-4-turbo",
|
||||
messages=[
|
||||
Message(role="system", content="Test system prompt"),
|
||||
Message(role="user", content="Test user prompt"),
|
||||
],
|
||||
temperature=0.5,
|
||||
max_tokens=5,
|
||||
)
|
||||
assert isinstance(result, str)
|
||||
assert result == "Mocked content"
|
||||
# @patch("ell.util.lm._run_lm")
|
||||
# def test_lm_decorator_with_msg_fmt_system_prompt(mock_run_lm):
|
||||
# mock_run_lm.return_value = ("Mocked content from msg fmt", None)
|
||||
# result = lmp_with_message_fmt("input", lm_params=dict(temperature=0.5))
|
||||
|
||||
# mock_run_lm.assert_called_once_with(
|
||||
# model="gpt-4-turbo",
|
||||
# messages=[
|
||||
# Message(role="system", content="Test system prompt from message fmt"),
|
||||
# Message(role="user", content="Test user prompt 3"),
|
||||
# ],
|
||||
# lm_kwargs=dict(temperature=0.5, max_tokens=5),
|
||||
# _invocation_origin=None,
|
||||
# exempt_from_tracking=False,
|
||||
# client=None,
|
||||
# _logging_color=None,
|
||||
# )
|
||||
# assert result == "Mocked content from msg fmt"
|
||||
|
||||
def test_lm_decorator_with_msg_fmt_system_prompt(client_mock):
|
||||
client_mock.return_value = MagicMock(
|
||||
choices=[
|
||||
MagicMock(message=MagicMock(content="Mocked content from msg fmt"))
|
||||
]
|
||||
)
|
||||
result = lmp_with_default_system_prompt(
|
||||
"input", lm_params=dict(temperature=0.5), message_format="msg fmt"
|
||||
)
|
||||
print("client_mock was called with:", client_mock.call_args)
|
||||
client_mock.assert_called_with(
|
||||
model="gpt-4-turbo",
|
||||
messages=[
|
||||
Message(role="system", content="Test system prompt from message fmt"),
|
||||
Message(role="user", content="Test user prompt 3"), # come on cursor.
|
||||
],
|
||||
temperature=0.5,
|
||||
max_tokens=5,
|
||||
)
|
||||
assert isinstance(result, str)
|
||||
assert result == "Mocked content from msg fmt"
|
||||
# Todo: Figure out mocking.
|
||||
Reference in New Issue
Block a user