Merge branch 'main' of github.com:MadcowD/ell

t # the commit.
This commit is contained in:
William Guss
2024-09-13 17:15:06 -07:00
5 changed files with 17 additions and 9 deletions

View File

@@ -51,6 +51,7 @@ Prompt engineering goes from a dark art to a science with the right tools. **Ell
<source srcset="https://docs.ell.so/_static/ell_studio_better.webp" type="image/webp">
<img src="docs/src/_static/ell_studio_better.webp" alt="ell studio demonstration">
</picture>
```bash
ell-studio --storage ./logdir
```
@@ -105,4 +106,4 @@ This will install both `ell` and `ell studio` on your system, allowing you to st
## Next Steps
Explore the [documentation](https://docs.ell.so/) to learn more about `ell` and its features. Follow the [Getting Started guide](https://docs.ell.so/getting_started.html) to create your first Language Model Program. Join our [Discord community](https://discord.gg/vWntgU52Xb) to connect with other users and get support.
Explore the [documentation](https://docs.ell.so/) to learn more about `ell` and its features. Follow the [Getting Started guide](https://docs.ell.so/getting_started.html) to create your first Language Model Program. Join our [Discord community](https://discord.gg/vWntgU52Xb) to connect with other users and get support.

View File

@@ -98,7 +98,7 @@ While the previous example used the docstring for the system message and the ret
greeting = hello("Sam Altman")
print(greeting)
This approach allows you to construct more complex conversations within your LMP. Importantly, you'll want to use this approach when you have a variable system prompt because python only allows you to have a static system prompt.
This approach allows you to construct more complex conversations within your LMP. Importantly, you'll want to use this approach when you have a variable system prompt because python only allows you to have a static docstring.
Prompting as Language Model Programming
----------------------------------------

View File

@@ -18,11 +18,16 @@ try:
def content_block_to_openai_format(content_block: ContentBlock) -> Dict[str, Any]:
if content_block.image:
base64_image = serialize_image(content_block.image)
image_url = {"url": base64_image}
# add detail only if supplied by user
# OpenAI's default is "auto", we omit the "detail" key entirely if not provided by user
if content_block.image_detail:
image_url["detail"] = content_block.image_detail
return {
"type": "image_url",
"image_url": {
"url": base64_image
}
"image_url": image_url
}
elif content_block.text:
return {

View File

@@ -78,11 +78,11 @@ def main():
# Start the database watcher
loop = asyncio.new_event_loop()
config = uvicorn.Config(app=app, port=args.port, loop=loop)
config = uvicorn.Config(app=app, host=args.host, port=args.port, loop=loop)
server = uvicorn.Server(config)
loop.create_task(server.serve())
loop.create_task(db_watcher(db_path, app))
loop.run_forever()
if __name__ == "__main__":
main()
main()

View File

@@ -48,6 +48,7 @@ class ContentBlock(BaseModel):
text: Optional[_lstr_generic] = Field(default=None)
image: Optional[Union[PILImage.Image, str, np.ndarray]] = Field(default=None)
image_detail: Optional[str] = Field(default=None)
audio: Optional[Union[np.ndarray, List[float]]] = Field(default=None)
tool_call: Optional[ToolCall] = Field(default=None)
parsed: Optional[BaseModel] = Field(default=None)
@@ -56,8 +57,9 @@ class ContentBlock(BaseModel):
@model_validator(mode='after')
def check_single_non_null(self):
non_null_fields = [field for field, value in self.__dict__.items() if value is not None]
if len(non_null_fields) > 1:
raise ValueError(f"Only one field can be non-null. Found: {', '.join(non_null_fields)}")
# need to allow for image_detail to be set with an image
if len(non_null_fields) > 1 and set(non_null_fields) != {'image', 'image_detail'}:
raise ValueError(f"Only one field can be non-null (except for image with image_detail). Found: {', '.join(non_null_fields)}")
return self
@property