mirror of
https://github.com/MadcowD/ell.git
synced 2024-09-22 16:14:36 +03:00
fixing contrib
This commit is contained in:
@@ -27,7 +27,7 @@ An example of how to utilize the serializer to save and load invocations from th
|
||||
import ell
|
||||
|
||||
|
||||
@ell.lm(model="gpt-4-turbo", provider=None, temperature=0.1, max_tokens=5)
|
||||
@ell.simple(model="gpt-4-turbo", provider=None, temperature=0.1, max_tokens=5)
|
||||
def some_lmp(*args, **kwargs):
|
||||
"""Just a normal doc stirng"""
|
||||
return [
|
||||
|
||||
@@ -4,13 +4,13 @@
|
||||
# e.g.
|
||||
|
||||
|
||||
@ell.lm(model="gpt-4-turbo", temperature=0.1)
|
||||
@ell.simple(model="gpt-4-turbo", temperature=0.1)
|
||||
def blah():
|
||||
pass
|
||||
|
||||
|
||||
# Even then Azure is bullshit and doesn't use the same model names as oai so we cant really even have a registry. I guess we could do best effort and occasionally updat ehte lbirary when new models come out?
|
||||
@ell.lm(model="gpt-4-turbo", provider=AzureProvider, temperature=0.1)
|
||||
@ell.simple(model="gpt-4-turbo", provider=AzureProvider, temperature=0.1)
|
||||
def blah():
|
||||
pass
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
```
|
||||
import ell
|
||||
|
||||
@ell.lm
|
||||
@ell.simple
|
||||
def fn(): return "prompt"
|
||||
|
||||
with ell.cache(fn):
|
||||
@@ -23,7 +23,7 @@
|
||||
store = ell.stores.SQLiteStore("mystore")
|
||||
ell.use_store(store)
|
||||
|
||||
@ell.lm
|
||||
@ell.simple
|
||||
def fn(): return "prompt"
|
||||
|
||||
with ell.store.cache(lmp):
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
For example, let's consider the following language model program.
|
||||
```python
|
||||
@ell.lm(model="gpt-4o")
|
||||
@ell.simple(model="gpt-4o")
|
||||
def my_language_model_program():
|
||||
"""
|
||||
You are a good language model. You will respond nicely
|
||||
@@ -40,7 +40,7 @@ def some_unused_function_in_parent_scope():
|
||||
# do something unrelated
|
||||
pass
|
||||
|
||||
@ell.lm(model="gpt-4o")
|
||||
@ell.simple(model="gpt-4o")
|
||||
def prompt_dependent_on_a_constant(another_val : int):
|
||||
"""You are an expert arithmatician."""
|
||||
return f"Compute {SOMEVAR} + {another_val}"
|
||||
@@ -51,7 +51,7 @@ Our ideal serializer would just result in `serialize(prompt_dependent_on_a_const
|
||||
```python
|
||||
SOMEVAR = 3
|
||||
|
||||
@ell.lm(model="gpt-4o")
|
||||
@ell.simple(model="gpt-4o")
|
||||
def prompt_dependent_on_a_constant(another_val : int):
|
||||
"""You are an expert arithmatician."""
|
||||
return f"Compute {SOMEVAR} + {another_val}"
|
||||
|
||||
@@ -17,7 +17,7 @@ Because prompts are model parameterizations of GPT, and the training process is
|
||||
import ell
|
||||
|
||||
|
||||
@ell.lm(model="gpt-4-turbo", temperature=0.1)
|
||||
@ell.simple(model="gpt-4-turbo", temperature=0.1)
|
||||
def write_a_story(about: str) -> str:
|
||||
"""You are an expert story writer who sounds like a human. Write a story about the given topic."""
|
||||
|
||||
@@ -56,7 +56,7 @@ with chat_with_user(
|
||||
|
||||
|
||||
# You can define two lmps
|
||||
@ell.lm(model="gpt-4-turbo", stop=["A", "B"], temperature=0.0)
|
||||
@ell.simple(model="gpt-4-turbo", stop=["A", "B"], temperature=0.0)
|
||||
def compare_stories(a: str, b: str):
|
||||
"""You are an expert writing critic [...]""" # <-- System prompt if using a chat type model.
|
||||
|
||||
@@ -67,7 +67,7 @@ def compare_stories(a: str, b: str):
|
||||
# Alternatively we can be granular about message specificaiton
|
||||
|
||||
|
||||
@ell.lm(model="gpt-4-turbo", stop=["A", "B"], temperature=0.0)
|
||||
@ell.simple(model="gpt-4-turbo", stop=["A", "B"], temperature=0.0)
|
||||
def compare_stories(a: str, b: str):
|
||||
return [
|
||||
ell.Message(
|
||||
|
||||
0
src/ell/contrib/Untitled-1
Normal file
0
src/ell/contrib/Untitled-1
Normal file
@@ -10,10 +10,10 @@ def write_commit_message_for_diff(old : str, new : str) -> str:
|
||||
You will be given two version of source code.
|
||||
You will be expected to write a commit message that describes the changes between the two versions. Your commit message should be at most one sentence and highly specific to the changes made. Don't just discuss the functions changed but how they were specifically changed.
|
||||
Your commit message cannot be more than 10 words so use sentence fragments and be concise.
|
||||
The @ell.lm decorator turns a function into a call to a language model:
|
||||
The @ell.simple decorator turns a function into a call to a language model:
|
||||
the docstring is the system prompt and the string returned in the user prompt.
|
||||
It is extremely important that if these are change: your commit message must say what specifically changed in the user or system prompt rather than saying they were updated or changed geneircally.
|
||||
It is extremely important that you never refer to a @ell.lm docstring as a docstring; it is a system prompt.
|
||||
It is extremely important that you never refer to a @ell.simple docstring as a docstring; it is a system prompt.
|
||||
Don't say why a change was done but what specifically changed.
|
||||
Consider all changes ot the program including the globals and free variables
|
||||
Respond in the following format:
|
||||
@@ -48,7 +48,7 @@ if __name__ == "__main__":
|
||||
test_version_1 = '''import ell
|
||||
import numpy as np
|
||||
|
||||
@ell.lm(model="gpt-4o-mini")
|
||||
@ell.simple(model="gpt-4o-mini")
|
||||
def come_up_with_a_premise_for_a_joke_about(topic : str):
|
||||
"""You are an incredibly funny comedian. Come up with a premise for a joke about topic"""
|
||||
return f"come up with a premise for a joke about {topic}"
|
||||
@@ -56,7 +56,7 @@ def come_up_with_a_premise_for_a_joke_about(topic : str):
|
||||
def get_random_length():
|
||||
return int(np.random.beta(2, 5) * 300)
|
||||
|
||||
@ell.lm(model="gpt-4o-mini")
|
||||
@ell.simple(model="gpt-4o-mini")
|
||||
def joke(topic : str):
|
||||
"""You are a funny comedian. You respond in scripts for a standup comedy skit."""
|
||||
return f"Act out a full joke. Make your script {get_random_length()} words long. Here's the premise: {come_up_with_a_premise_for_a_joke_about(topic)}"'''
|
||||
@@ -64,7 +64,7 @@ def joke(topic : str):
|
||||
test_version_2 = '''import ell
|
||||
import numpy as np
|
||||
|
||||
@ell.lm(model="gpt-4o-mini")
|
||||
@ell.simple(model="gpt-4o-mini")
|
||||
def come_up_with_a_premise_for_a_joke_about(topic : str):
|
||||
"""You are an incredibly funny comedian. Come up with a premise for a joke about topic"""
|
||||
return f"come up with a premise for a joke about {topic}"
|
||||
@@ -72,7 +72,7 @@ def come_up_with_a_premise_for_a_joke_about(topic : str):
|
||||
def get_random_length():
|
||||
return int(np.random.beta(2, 5) * 300)
|
||||
|
||||
@ell.lm(model="gpt-4o-mini")
|
||||
@ell.simple(model="gpt-4o-mini")
|
||||
def joke(topic : str):
|
||||
"""You are a funny comedian. You respond in scripts for skits."""
|
||||
return f"Act out a full joke. Make your script {get_random_length()} words long. Here's the premise: {come_up_with_a_premise_for_a_joke_about(topic)}"'''
|
||||
|
||||
Reference in New Issue
Block a user