mirror of
				https://github.com/MadcowD/ell.git
				synced 2024-09-22 16:14:36 +03:00 
			
		
		
		
	initial commit
This commit is contained in:
		
							
								
								
									
										293
									
								
								.gitignore
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										293
									
								
								.gitignore
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,293 @@ | ||||
| # Byte-compiled / optimized / DLL files | ||||
| __pycache__/ | ||||
| *.py[cod] | ||||
| *$py.class | ||||
|  | ||||
| # C extensions | ||||
| *.so | ||||
| # macOS DS_Store files | ||||
| .DS_Store | ||||
|  | ||||
| # Distribution / packaging | ||||
| .Python | ||||
| build/ | ||||
| develop-eggs/ | ||||
| dist/ | ||||
| downloads/ | ||||
| eggs/ | ||||
| .eggs/ | ||||
| lib/ | ||||
| lib64/ | ||||
| parts/ | ||||
| sdist/ | ||||
| var/ | ||||
| wheels/ | ||||
| share/python-wheels/ | ||||
| *.egg-info/ | ||||
| .installed.cfg | ||||
| *.egg | ||||
| MANIFEST | ||||
|  | ||||
| # PyInstaller | ||||
| #  Usually these files are written by a python script from a template | ||||
| #  before PyInstaller builds the exe, so as to inject date/other infos into it. | ||||
| *.manifest | ||||
| *.spec | ||||
|  | ||||
| # Installer logs | ||||
| pip-log.txt | ||||
| pip-delete-this-directory.txt | ||||
|  | ||||
| # Unit test / coverage reports | ||||
| htmlcov/ | ||||
| .tox/ | ||||
| .nox/ | ||||
| .coverage | ||||
| .coverage.* | ||||
| .cache | ||||
| nosetests.xml | ||||
| coverage.xml | ||||
| *.cover | ||||
| *.py,cover | ||||
| .hypothesis/ | ||||
| .pytest_cache/ | ||||
| cover/ | ||||
|  | ||||
| # Translations | ||||
| *.mo | ||||
| *.pot | ||||
|  | ||||
| # Django stuff: | ||||
| *.log | ||||
| local_settings.py | ||||
| db.sqlite3 | ||||
| db.sqlite3-journal | ||||
|  | ||||
| # Flask stuff: | ||||
| instance/ | ||||
| .webassets-cache | ||||
|  | ||||
| # Scrapy stuff: | ||||
| .scrapy | ||||
|  | ||||
| # Sphinx documentation | ||||
| docs/_build/ | ||||
|  | ||||
| # PyBuilder | ||||
| .pybuilder/ | ||||
| target/ | ||||
|  | ||||
| # Jupyter Notebook | ||||
| .ipynb_checkpoints | ||||
|  | ||||
| # IPython | ||||
| profile_default/ | ||||
| ipython_config.py | ||||
|  | ||||
| # pyenv | ||||
| #   For a library or package, you might want to ignore these files since the code is | ||||
| #   intended to run in multiple environments; otherwise, check them in: | ||||
| # .python-version | ||||
|  | ||||
| # pipenv | ||||
| #   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. | ||||
| #   However, in case of collaboration, if having platform-specific dependencies or dependencies | ||||
| #   having no cross-platform support, pipenv may install dependencies that don't work, or not | ||||
| #   install all needed dependencies. | ||||
| #Pipfile.lock | ||||
|  | ||||
| # poetry | ||||
| #   Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. | ||||
| #   This is especially recommended for binary packages to ensure reproducibility, and is more | ||||
| #   commonly ignored for libraries. | ||||
| #   https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control | ||||
| #poetry.lock | ||||
|  | ||||
| # pdm | ||||
| #   Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. | ||||
| #pdm.lock | ||||
| #   pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it | ||||
| #   in version control. | ||||
| #   https://pdm.fming.dev/#use-with-ide | ||||
| .pdm.toml | ||||
|  | ||||
| # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm | ||||
| __pypackages__/ | ||||
|  | ||||
| # Celery stuff | ||||
| celerybeat-schedule | ||||
| celerybeat.pid | ||||
|  | ||||
| # SageMath parsed files | ||||
| *.sage.py | ||||
|  | ||||
| # Environments | ||||
| .env | ||||
| .venv | ||||
| env/ | ||||
| venv/ | ||||
| ENV/ | ||||
| env.bak/ | ||||
| venv.bak/ | ||||
|  | ||||
| # Spyder project settings | ||||
| .spyderproject | ||||
| .spyproject | ||||
|  | ||||
| # Rope project settings | ||||
| .ropeproject | ||||
|  | ||||
| # mkdocs documentation | ||||
| /site | ||||
|  | ||||
| # mypy | ||||
| .mypy_cache/ | ||||
| .dmypy.json | ||||
| dmypy.json | ||||
|  | ||||
| # Pyre type checker | ||||
| .pyre/ | ||||
|  | ||||
| # pytype static type analyzer | ||||
| .pytype/ | ||||
|  | ||||
| # Cython debug symbols | ||||
| cython_debug/ | ||||
|  | ||||
| # PyCharm | ||||
| #  JetBrains specific template is maintained in a separate JetBrains.gitignore that can | ||||
| #  be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore | ||||
| #  and can be added to the global gitignore or merged into this file.  For a more nuclear | ||||
| #  option (not recommended) you can uncomment the following to ignore the entire idea folder. | ||||
| #.idea/ | ||||
|  | ||||
| # Logs | ||||
| logs | ||||
| *.log | ||||
| npm-debug.log* | ||||
| yarn-debug.log* | ||||
| yarn-error.log* | ||||
| lerna-debug.log* | ||||
| .pnpm-debug.log* | ||||
|  | ||||
| # Diagnostic reports (https://nodejs.org/api/report.html) | ||||
| report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json | ||||
|  | ||||
| # Runtime data | ||||
| pids | ||||
| *.pid | ||||
| *.seed | ||||
| *.pid.lock | ||||
|  | ||||
| # Directory for instrumented libs generated by jscoverage/JSCover | ||||
| lib-cov | ||||
|  | ||||
| # Coverage directory used by tools like istanbul | ||||
| coverage | ||||
| *.lcov | ||||
|  | ||||
| # nyc test coverage | ||||
| .nyc_output | ||||
|  | ||||
| # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) | ||||
| .grunt | ||||
|  | ||||
| # Bower dependency directory (https://bower.io/) | ||||
| bower_components | ||||
|  | ||||
| # node-waf configuration | ||||
| .lock-wscript | ||||
|  | ||||
| # Compiled binary addons (https://nodejs.org/api/addons.html) | ||||
| build/Release | ||||
|  | ||||
| # Dependency directories | ||||
| node_modules/ | ||||
| jspm_packages/ | ||||
|  | ||||
| # Snowpack dependency directory (https://snowpack.dev/) | ||||
| web_modules/ | ||||
|  | ||||
| # TypeScript cache | ||||
| *.tsbuildinfo | ||||
|  | ||||
| # Optional npm cache directory | ||||
| .npm | ||||
|  | ||||
| # Optional eslint cache | ||||
| .eslintcache | ||||
|  | ||||
| # Optional stylelint cache | ||||
| .stylelintcache | ||||
|  | ||||
| # Microbundle cache | ||||
| .rpt2_cache/ | ||||
| .rts2_cache_cjs/ | ||||
| .rts2_cache_es/ | ||||
| .rts2_cache_umd/ | ||||
|  | ||||
| # Optional REPL history | ||||
| .node_repl_history | ||||
|  | ||||
| # Output of 'npm pack' | ||||
| *.tgz | ||||
|  | ||||
| # Yarn Integrity file | ||||
| .yarn-integrity | ||||
|  | ||||
| # dotenv environment variable files | ||||
| .env | ||||
| .env.development.local | ||||
| .env.test.local | ||||
| .env.production.local | ||||
| .env.local | ||||
|  | ||||
| # parcel-bundler cache (https://parceljs.org/) | ||||
| .cache | ||||
| .parcel-cache | ||||
|  | ||||
| # Next.js build output | ||||
| .next | ||||
| out | ||||
|  | ||||
| # Nuxt.js build / generate output | ||||
| .nuxt | ||||
| dist | ||||
|  | ||||
| # Gatsby files | ||||
| .cache/ | ||||
| # Comment in the public line in if your project uses Gatsby and not Next.js | ||||
| # https://nextjs.org/blog/next-9-1#public-directory-support | ||||
| # public | ||||
|  | ||||
| # vuepress build output | ||||
| .vuepress/dist | ||||
|  | ||||
| # vuepress v2.x temp and cache directory | ||||
| .temp | ||||
| .cache | ||||
|  | ||||
| # Docusaurus cache and generated files | ||||
| .docusaurus | ||||
|  | ||||
| # Serverless directories | ||||
| .serverless/ | ||||
|  | ||||
| # FuseBox cache | ||||
| .fusebox/ | ||||
|  | ||||
| # DynamoDB Local files | ||||
| .dynamodb/ | ||||
|  | ||||
| # TernJS port file | ||||
| .tern-port | ||||
|  | ||||
| # Stores VSCode versions used for testing VSCode extensions | ||||
| .vscode-test | ||||
|  | ||||
| # yarn v2 | ||||
| .yarn/cache | ||||
| .yarn/unplugged | ||||
| .yarn/build-state.yml | ||||
| .yarn/install-state.gz | ||||
| .pnp.* | ||||
							
								
								
									
										11
									
								
								LICENSE
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								LICENSE
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| Copyright 2023 William Hebgen Guss | ||||
|  | ||||
| Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: | ||||
|  | ||||
| 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. | ||||
|  | ||||
| 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. | ||||
|  | ||||
| 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. | ||||
|  | ||||
| THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||||
							
								
								
									
										144
									
								
								README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										144
									
								
								README.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,144 @@ | ||||
| # What is ell? | ||||
| A extremly tiny language model prompting library that treats prompts like weights to be serialized. | ||||
|  | ||||
| # Logging | ||||
| ```python | ||||
| @ell.lm("this_is_my_model") | ||||
| def hello(world : str): | ||||
|     return f"Say hello to {world}" | ||||
| ``` | ||||
|  | ||||
| We want this to log to the console when someone sets a logging flag. | ||||
|  | ||||
| # Optimizer | ||||
| Prompts can be optimized using a variety of techniques (in particular we can optimize them against various bench marks using soft prompting or hard prompting.) | ||||
| ```python | ||||
| opt = ell.Optimizer(hyper_params) | ||||
| # This only accounts for simple one shot optimizations. What about minibatches and control about what the optimizer sees? | ||||
| # I suppose it really isn't that deep and we can abstract that away from the model context. | ||||
| optimized_hello = opt.optimize(hello_world, eval_fn, dataset) | ||||
|  | ||||
| # Why should this be a state? | ||||
| serializer = ell.Serializer() | ||||
| ell.save(optimized_hello, "lol.ell") | ||||
| # Need to define a type of callable fn that results from a model optimzier so that people can easily implement their own optimizers. This will come later of course. | ||||
| ``` | ||||
| -> | ||||
| Raw python code plus any model serialization thats on top of it.. with the original function hash etc. Can be re serialized in another context. | ||||
|  | ||||
| # Serialization | ||||
| ```python | ||||
| """ | ||||
| An example of how to utilize the serializer to save and load invocations from the model. | ||||
| """ | ||||
|  | ||||
| import ell | ||||
|  | ||||
|  | ||||
| @ell.lm(model="gpt-4-turbo", provider=None, temperature=0.1, max_tokens=5) | ||||
| def some_lmp(*args, **kwargs): | ||||
|     """Just a normal doc stirng""" | ||||
|     return [ | ||||
|         ell.system("Test system prompt from message fmt"), | ||||
|         ell.user("Test user prompt 3"), | ||||
|     ] | ||||
|  | ||||
|  | ||||
| # much cleaner. | ||||
| if __name__ == "__main__": | ||||
|     serializer = ell.Serializer("location") | ||||
|     serializer.install()  # Any invocation hereafter will be saved. | ||||
|  | ||||
|     # Some open questions can we | ||||
|  | ||||
| ``` | ||||
|  | ||||
| The above is an exmaple as to why we'd want to have instances of serializers. We think of it as storing al linvocaitons and models as a program is run or evolves. The problem is you need to install the serializer every time and that doens't feel so good?  | ||||
|  | ||||
| For example in version control we just save all outputs on a commit but you ahve to remember the serialization location etc instead of there being some global store. Which is optimal? | ||||
|  | ||||
| Alternatively serialization happens by default in some global serialzier diretory? No I hate this. | ||||
|  | ||||
| Whats the middleground. We detect a .ell direcotry near the file? No thats unintuitive. This hsould behave like tensorboard | ||||
| - [] Look at tensorboard, pytorch, & wandb equivalent no need to reinvent. | ||||
|  | ||||
|  What if we instal two different serializers???? | ||||
|  People dont like spexifying file locations it is cumbersome. | ||||
|  | ||||
|  | ||||
| # Todos | ||||
|  | ||||
|  | ||||
| ## some notes fom lat at night | ||||
|  | ||||
| - [X] graph view | ||||
| - [ ] someway of visualizing timeline nicely | ||||
| - [ ] comment system | ||||
| - [ ] human evals immediately & easily.  | ||||
| - [X] highlight the fn being called & link it to other ell fn's. | ||||
| - [ ] keyboard shortcuts for navigating the invocations (expand with . to see detialed view of the fn call) | ||||
| - [ ] everything linkable | ||||
| - [ x comparisson mode for lms & double blind for evals. | ||||
| - [ ] evaluations & metrics (ai as well.) | ||||
| - [x] tie to git commit as well or any versioning like this. | ||||
| - [ ] feel like this should be a vscode plugin but idk, tensorboard is fine too. | ||||
| - [ ] codebases will have lots of prompts, need to be organized.. (perhaps by module or something) | ||||
| - [ ] live updates & new indicators. | ||||
|  | ||||
| - [ ] chain of prompt visualization, per invocation is not the right work flow, but I can imagine a bunch of cards that go top to bottom with a line in between them. (like a timeline) and then you can click on the line to see the invocation. (or something like that) would be live | ||||
|  * this remidns me that we are actually probably doing it wrong with @decorators, bc what is an operation? a call to a language model? there's a lot that can go into an invocation & you visualize that as a step through really with frames in programming | ||||
|  * so this is probably all a bad idea. | ||||
|  - [ ] navigation should be as easy as vscode. cmd shift p or spotlifht | ||||
|  | ||||
|  | ||||
| another note, | ||||
| don't need to do L funcs (wrappers) I can jsut have calls to the language model recorded and keep track of the chain on my custom string object :) | ||||
|  | ||||
|  | ||||
|  | ||||
| Some more notes: | ||||
| - [ ] Developing should be by func so I can run & go look @ the results even if the hash changes | ||||
| - [ ] Call stacks from langmsith are nice, but also fingerprinting  | ||||
| - [ ] Depdendencies take up a lot of space when someone is grocking a prompt, so should we hide them or just scorll down to the bottom where it is? | ||||
| - [ ] Fix weird rehashing issue of the main prompt whenever subprompt changes? Or just make commits more of a background deal. | ||||
| - [ ] Auto document commit changes | ||||
| - [ ] Think about evaluator framework.. | ||||
| - [ ] Builtins for classifiers, like logit debiasing. | ||||
| - [ ] We need checkpointing, we haven't tried using this in an ipython notebook yet, might be dogshit. | ||||
|  | ||||
|  | ||||
| NEEDS TO FIX: | ||||
| ``` | ||||
|  | ||||
| # this wont get the the uses to trigger because this calls a function that calls it, we need to recursively check all fns that are called to see if it calls it. | ||||
| def int_cast_comparator(story1, story2): | ||||
|     return int(compare_two_stories(story1, story2)[0].split(":")[1].strip()) - 1 | ||||
|  | ||||
|  | ||||
| @ell.track | ||||
| def get_really_good_story(about: str): | ||||
|     stories = write_a_story(about, lm_params=dict(n=20)) | ||||
|  | ||||
|     def int_cast_comparator(story1, story2): | ||||
|         return int(compare_two_stories(story1, story2)[0].split(":")[1].strip()) - 1 | ||||
|  | ||||
|     while len(stories) > 1: | ||||
|         ranking = tournament_ranking(stories, int_cast_comparator) | ||||
|         print(ranking) | ||||
|         # Eliminate low ranking storyes | ||||
|         stories = stories[np.where(ranking > np.min(ranking))] | ||||
|  | ||||
|     return stories[np.argmax(ranking)] | ||||
| ``` | ||||
|  | ||||
|  | ||||
| #  Reimplementaiton | ||||
| - [] Serializers (saving prompts via serializing code)  | ||||
|     - [] Which format are we chosing to serialize in for the vcs | ||||
| - [] Adapters (different api providers) (mdoel proxy) | ||||
|     - [] How to handle multimodal inputs and prompting  | ||||
| - [] Lstr origination (this is tracking various different lm strs through different prop interfaces) | ||||
|  | ||||
| - [x] Mock API mode for testing. | ||||
|  | ||||
| - [] Ell studio | ||||
							
								
								
									
										207
									
								
								docs/ramblings/notes_on_adapters.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										207
									
								
								docs/ramblings/notes_on_adapters.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,207 @@ | ||||
| # How do we want to handle model adaptation.. | ||||
| # I don't really want to maitnain some base registry and update it every damn time a new model comes out, but it's not clear how we specify providers otherwise. | ||||
|  | ||||
| # e.g. | ||||
|  | ||||
|  | ||||
| @ell.lm(model="gpt-4-turbo", temperature=0.1) | ||||
| def blah(): | ||||
|     pass | ||||
|  | ||||
|  | ||||
| # Even then Azure is bullshit and doesn't use the same model names as oai so we cant really even have a registry. I guess we could do best effort and occasionally updat ehte lbirary when new models come out? | ||||
| @ell.lm(model="gpt-4-turbo", provider=AzureProvider, temperature=0.1) | ||||
| def blah(): | ||||
|     pass | ||||
|  | ||||
|  | ||||
|  | ||||
| # Do we make each provider implement several types of supported interfaces? | ||||
|  | ||||
| class Provider(abc.ABC): | ||||
|     def __init__ | ||||
|  | ||||
|     pass | ||||
|  | ||||
|  | ||||
| # I mean OAI has basically set the standard for how all providers in teract. | ||||
| class OAILikeProvider(abc.ABC): | ||||
|  | ||||
|  | ||||
| # Also do we care about tracking embeddings? | ||||
| # no not yet lol, but we clearly nee a generic invocation stack. | ||||
|  | ||||
| # We can just focus on text output models for now and revisit later if necessary. | ||||
|  | ||||
| # Wow that was ass  | ||||
|  | ||||
|  | ||||
| # Am I really going to expect my users to pass around the same 'client' class to all the models.. Also since this is inthe decorartor they'd have to define this client globally. I also want thigns to be mostly static; there's not really a reason to reinstantiate these providers. Except for changing the 'client' | ||||
|  | ||||
|  | ||||
| # the only reaosn for the client is to enable switching between different model infra bakcends for oai lol rather than hard coding ours. | ||||
| # we could also jsut adopt oai's philosophy on this and just use their cliejts as our providers class. but i hate the idea that i have to pass clients around all the time for different mdoe lclasses. | ||||
|  | ||||
|  | ||||
|  | ||||
| # this is very much a user decision and in fact you might even want to load balance (a la azure not implementing this..) | ||||
| register('gpt-4-turbo', oai_client) | ||||
| register('llama-70b-chat', groq_client) | ||||
|  | ||||
|  | ||||
| # how to balance this with low barrirer to entry. env vars didnt' work last time.  | ||||
|  | ||||
| # some amount of initialization of the library needs to happen at the beginning. | ||||
| # this is a requirement in that while we could just default to oai models from oai and attempt to use the oai client on first invocation of an lmp | ||||
| ell.init( | ||||
|     oai_client=..., | ||||
| ) | ||||
|  | ||||
| # okay so by default we will use the standard provider of a model if a model is 'standard' from a provider ie llama can get fucked but 4 turbo we'll go w oai | ||||
| # we will try to initialize using the env vars for dany defauly provider but essentially we need to prvoide a runtime interface for a user to select which provider they want to use for a class of mdoels. | ||||
| # im fine using my own fucking client.. | ||||
|  | ||||
| # would there ever be a situation when the user wants to switch between clients for different lmps | ||||
| # rate limits etc. | ||||
|  | ||||
| # i really didnt want to make this my job but now it's my job. | ||||
|  | ||||
| # ew | ||||
| """ell.set_provider( | ||||
|     models=ell.providers.OpenAI.models, | ||||
|     provider=ell.providers.Azure | ||||
| )""" | ||||
|  | ||||
| # or... | ||||
| # we could go entirely functional | ||||
|  | ||||
| # fuck conflict resolution | ||||
| ""ell.register('gpt-4-turbo', OpenAI.chat) | ||||
| "" | ||||
|  | ||||
| # inherently you just don't want to fuck around with | ||||
|  | ||||
| ""blah(lm_params=dict(client=my_openai_client)) | ||||
| "" | ||||
| # or even | ||||
|  | ||||
| with ell.use_client(my_openai_client): #<-- well maybe actually i like this | ||||
|     blah() | ||||
|  | ||||
| # or even | ||||
| with_client(blah, openai)() | ||||
|  | ||||
|  | ||||
| # it might be as simple as saying: "Just implement the oai standard and you're good to go.." | ||||
|  | ||||
| # should see how one atualyl invokes mystral etc. | ||||
| # brb | ||||
|  | ||||
| """ | ||||
| from mistralai.client import MistralClient | ||||
| from mistralai.models.chat_completion import ChatMessage | ||||
|  | ||||
| api_key = os.environ["MISTRAL_API_KEY"] | ||||
| model = "mistral-large-latest" | ||||
|  | ||||
| client = MistralClient(api_key=api_key) | ||||
|  | ||||
| messages = [ | ||||
|     ChatMessage(role="user", content="What is the best French cheese?") | ||||
| ] | ||||
|  | ||||
| # No streaming | ||||
| chat_response = client.chat( | ||||
|     model=model, | ||||
|     messages=messages, | ||||
| ) | ||||
| """ | ||||
|  | ||||
| """from openai import OpenAI | ||||
|  | ||||
| client = OpenAI() | ||||
|  | ||||
| response = client.chat.completions.create( | ||||
|     model="gpt-3.5-turbo", | ||||
|     messages=[ | ||||
|         {"role": "system", "content": "You are a helpful assistant."}, | ||||
|         {"role": "user", "content": "Who won the world series in 2020?"}, | ||||
|         {"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."}, | ||||
|         {"role": "user", "content": "Where was it played?"} | ||||
|     ] | ||||
| ) | ||||
| """ | ||||
|  | ||||
| # As much as I like the with_client framework, it does't actually relegate a certain model to a certain provider. | ||||
|  | ||||
| # We could get django about htis shit | ||||
|  | ||||
|  | ||||
|  | ||||
| class ProviderMeta(): | ||||
| from typing import List, Type | ||||
|  | ||||
| from ell.types import MessageOrDict | ||||
|  | ||||
| class ProviderMeta(type): | ||||
|     def __init__(cls, name, bases, attrs): | ||||
|         super().__init__(name, bases, attrs) | ||||
|         if hasattr(cls, 'models'): | ||||
|             cls.register_models(cls.models) | ||||
|  | ||||
|     @staticmethod | ||||
|     def register_models(models): | ||||
|         for model in models: | ||||
|             print(f"Registering model: {model}") | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
| class OAILikeProvider(Provider): | ||||
|     models = [ | ||||
|         "gpt-4-turbo" | ||||
|     ] | ||||
|     @staticmethod | ||||
|     def chat_completions(client, model, messages : List[MessageOrDict]): | ||||
|         client.chat.completions.create( | ||||
|             model=model, | ||||
|             messages=messages | ||||
|         ) | ||||
|  | ||||
| OAIProvider = OAILikeProvider | ||||
| # Ah so this is weird: We actually might have providers with different model classes that we want to specify  for example, azure doesn't have defualt names for these models and they are in the user namespace... So literally when we want to use an azure provier we have 'isntantiate it'. That's fucking annoying lol.  | ||||
|  | ||||
| # For example we'd actually want the user to be able to easily switch to gpt-4-turbo without changing all their lmp code. | ||||
| AzureProvider( | ||||
|     model_map = { | ||||
|         oai.GPT4Turbo: "ell-production-canada-west-gpt4-turbo" | ||||
|     } | ||||
| ) | ||||
|  | ||||
| # and azure is so fucked that i'm pretty sure you need to specify different clients for different regions.. | ||||
| # :(  | ||||
|  | ||||
| # just adopt the oai standard :\ please. this hurts. | ||||
| # Like the model map is per client. So now we can't even disambiguate providers and model maps. | ||||
|  | ||||
|  | ||||
| # Mistral had to be special didn';t it;; | ||||
| class MistralProvider(Provider): | ||||
|     models = [ | ||||
|         "some trash model" | ||||
|     ] | ||||
|  | ||||
|     def chat(client, model, message): | ||||
|         chat_response = client.chat( | ||||
|         model=model, | ||||
|         messages=messages, | ||||
|     ) | ||||
|  | ||||
|  | ||||
| # then we handle conflict resolution: | ||||
| # "Two providers have registered this mdoel, you msut specify a provider." | ||||
|  | ||||
| # Basically we don't handle the client ptoblem but we do handle the code mismatch problem | ||||
|  | ||||
| # So really it'll also be per model. | ||||
							
								
								
									
										75
									
								
								docs/ramblings/serialization.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										75
									
								
								docs/ramblings/serialization.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,75 @@ | ||||
| # Serializer Notes | ||||
|  | ||||
| ## The context: What is a language model program? | ||||
|  We are going to build a sterilizer that serializes a set of prompts that are defined using our language model programming framework. In this case we don't think of prompts is just text that go into a language model, but actually teens in a program that produce that are sent to a language model API. | ||||
|  | ||||
| For example, let's consider the following language model program. | ||||
| ```python | ||||
| @ell.lm(model="gpt-4o") | ||||
| def my_language_model_program(): | ||||
|     """ | ||||
|     You are a good language model. You will respond nicely | ||||
|     """ | ||||
|     return "Hello, world!" | ||||
|  | ||||
| print(my_prompt()) # hello back to you! | ||||
| ``` | ||||
|  | ||||
| This langauge model program when called results in the following 'prompt' string content being sent to a language model. | ||||
|  | ||||
| ``` | ||||
| # HTTP request to OpenAI chat completion endpoint | ||||
| ╔════════════════════════════════════════════════════════════════════════╗ | ||||
| ║ system: You are a good language model. You will respond nicely         ║ | ||||
| ║ user: Hello, world!                                                    ║ | ||||
| ╚════════════════════════════════════════════════════════════════════════╝ | ||||
| # HTTP response | ||||
| ╔════════════════════════════════════════════════════════════════════════╗ | ||||
| ║ assistant: hello back to you!                                          ║ | ||||
| ╚════════════════════════════════════════════════════════════════════════╝ | ||||
| ``` | ||||
|  | ||||
| Now how might we keep track of a language model program and its outputs over time? | ||||
|  | ||||
| Consider serializing the prompt `prompt_dependent_on_a_constant` below | ||||
| ```python | ||||
|  | ||||
| SOMEVAR = 3 | ||||
|  | ||||
| def some_unused_function_in_parent_scope(): | ||||
|     # do something unrelated | ||||
|     pass | ||||
|  | ||||
| @ell.lm(model="gpt-4o") | ||||
| def prompt_dependent_on_a_constant(another_val : int): | ||||
|     """You are an expert arithmatician.""" | ||||
|     return f"Compute {SOMEVAR} + {another_val}" | ||||
| ``` | ||||
|  | ||||
| Our ideal serializer would just result in `serialize(prompt_dependent_on_a_constant)` returning the minimal set of data required to reconstitute the program that lead to the invocation of gpt-4o: | ||||
|  | ||||
| ```python | ||||
| SOMEVAR = 3 | ||||
|  | ||||
| @ell.lm(model="gpt-4o") | ||||
| def prompt_dependent_on_a_constant(another_val : int): | ||||
|     """You are an expert arithmatician.""" | ||||
|     return f"Compute {SOMEVAR} + {another_val}" | ||||
| ``` | ||||
|  | ||||
| This minimal expression of the 'prompt' is called it's lexical closure.  | ||||
|  | ||||
| ## How should we store these lexical closures? | ||||
| Typically, as someone gets a language model to execute in the way they want to execute they go through process called prompt engineering. In this process, they will modify the prompt (in this case the entire language model program/lexical closure of the code that produces the messages sent to the language model) and test the quality of invoking it by running certain calls of the language mode, and then checking the outputs to see if they reach a certain quality bar. This process for a developer or team of developers typically happens in between traditional commits in a control setting, for example, you can think of this process as graduate student descent instead of gradient descent. As we modify the code and test the outputs, we are somehow making a trace through the parameter space of the language model programs. So there are several modes of this type of serialization if we draw the analogy back to traditional machine learning model training or development. | ||||
|  | ||||
| First in order to effectively modify the prompts and find the one that best suits the type of executions you're looking for you need to keep track of the version history of the individual prompt in this case the history of the language model program during this period between commits in a repository or production setting, you also need to keep track of the individual invocations of those programs: What did GPT for output when the program for the language model that produced the prompt that was sent to the language model looked a certain way.  | ||||
|  | ||||
|  | ||||
| In a startup, it seems like every single vacation to a language model is valuable, we can later retrain or distill the language model, and we should in some sense. Want to keep these around so the way we store these invocations and their related language model programs that produced them should be in some sense not just a local thing. If I'm in a team of a lot of people, I want to think of this storage and the relative code that produced it in a group environment and this happens typically to start environment by thinking about you know, versing systems, and branching, and so and so forth, but it's not exactly clear how we might do this with this type of prom because these are sort of smaller things that exist within a get versioning environment already and are intra-commit. But in some sense, we know that it would be a nice prompt June to environment for a single person or single developer. If you could somehow keep track of the trace of prompts and programs and their related invocations over the process of prompt engineer.  | ||||
|  | ||||
| Additionally, we care about serialization. A more production oriented point of view. When we deploy some larger system that uses language model programs like a web app or a gaming environments or some other large tool chain that only is from a production standpoint this is typically where a lot of the volume will happen and we actually do care about the invitations. In this case, we only need to seize once when the version of the program changes and n, every time engineers modifying a little piece of coat. We also want to persist in the language model in a really scalable way so we can grab them at a later time servers or services to re-download them and try another model on them. In this case, we can imagine some larger invocation store which shows and keep track of all of our model programs overtime as they are versed in production and they related in vacations. This is a lot different of a storage environment than the local developer because the local developer is basically going to have a file system that can open up into look around or maybe it's a related ELL directory that exists at the base of their repo and executing code automatically locate the directory and sterilize the prompts there and they'll be introversion and that's fine and then you can just do you run the command ELL studio you're one of these control posit and it'll let you look at the invocations in a really clean way. But when we this production environment, we don't want to use a local file store. We want to persist this in database right in the database might be my sequ, it's whatever you want to be right there. There's a certain scheme that will live there and need to build a set of serial framework going to enable us to have local development of life for prompt engineers and then production level storage of the language programs and their related vacations. | ||||
|  | ||||
|  | ||||
| So now on sort of a brainstorming mode, my initial thought was like this really does feel like it and you want to look at like the diff between different language model programs as they evolve overtime but we don't really care about branching and we need this to work both in the database and the local file store. It's not going to exist within a typical GT repository because DGIT repository is the version of the entire project and this is more of like a do you know for the local development environment? This is more of like a file system database of the intro commit or in between versions of the language model programs and their related invocations What initial idea I had was we could actually use one of these sort of virtual get repo things that are sort of memory base get repositories to make a serve copy of a GIT repository that is the dot ELL repository exist the top level and it like uses literal GIT objects, but I don't actually know why would we would why we would even want to do that in the first place be you could just store the prompts like in their raw format as strings or compressed strings and then the invocations again in some sort of compressed like invocation parts file and maybe the whole point of GIT that is a great sort of object for this space format that doesn't change that much and is highly structured. But there are a lot of ways to do this, we could think about doing sort of buff level local storage for the serialization of prompts overtime as we run them in that dot ELL directories so not even thinking about this is GIT, and then, in that case, we would basically create diffs and a version history of the different language programs in a somewhat farcical way by literally looking at the individual objects in the pro buff storage and then converting them to texting and then running a diff library on them. But again, I am really open to a lot of ideas. We need to build a great local serialization sort of model versioning framework that is good for developer that's modifying the prompts you know as they're going along and then maybe they switch branches entirely using GIT in the prompt doesn't exist or it's reverted back to the original state and so their versioning there can be totally messed up so we were not really thinking about or maybe we're integrating this somehow with the existing GIT context in a meaningful and clever way where we can think about which branch or which current development environment a language model program was developed inside of despite not being committed or added to the actual GIT object store. | ||||
|  | ||||
| And then also when you think about how we'r make this framework automatically serialize server context, right so like as I'm developing my ELL library serialization should automatically detect that ELL repository, and we want to keep track of things automatically. It should have the option for me to explicitly specify how we're going to sterilize, and then in the production environment, I will explicitly specify that we're serializing all of our prompts on a server back, and all of the same functionality that does exist for civilization for a local developer context will be wrapped into a nice production level object store whether that backend for invocations is S3 or a database like Mongo DB it doesn't really matter.  Explain why you chose your approach thoroughly. | ||||
							
								
								
									
										83
									
								
								docs/ramblings/spec_notes.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										83
									
								
								docs/ramblings/spec_notes.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,83 @@ | ||||
| """ | ||||
| This is the general spec we'd like to use for ell. | ||||
| Prompts should be functional:  | ||||
|  | ||||
| write_a_story() -> "Some story from GPT4 etc." | ||||
|  | ||||
| Prompts should be serializeable (either automatically or not) | ||||
|  | ||||
|  | ||||
| serializer.save(write_a_story, "write_a_story.bin")... | ||||
|  | ||||
|  | ||||
| Because prompts are model parameterizations of GPT, and the training process is that of prompt engineering; check pointing should be rigorous and we don't want to rley on git or another vcs to do this.. | ||||
|  | ||||
| """ | ||||
|  | ||||
| import ell | ||||
|  | ||||
|  | ||||
| @ell.lm(model="gpt-4-turbo", temperature=0.1) | ||||
| def write_a_story(about: str) -> str: | ||||
|     """You are an expert story writer who sounds like a human. Write a story about the given topic.""" | ||||
|  | ||||
|     return f"Write a story about {about}" | ||||
|  | ||||
|  | ||||
| # Should we really alow overriding the system prompt here? | ||||
| write_a_story("a cat and a dog", system_prompt) | ||||
|  | ||||
|  | ||||
| # I don't like this because the end-user has to recapitulate the chat prompt format if they are intereacting with the model repeatedly. I should be able to in some sense have a format where I don't need to track the history e.g. @ell.chatlm | ||||
|  | ||||
|  | ||||
| # we could force the chat components to take as input the history but this is an awkward interface imo. | ||||
| @ell.chat(model="gpt-4-turbo") | ||||
| def chat_with_user(*, history: ChatPrompt, user_message): ... | ||||
|  | ||||
|  | ||||
| with chat_with_user( | ||||
|     "some topical parameters, because primarily I'm defining the system prompt and the initial shit" | ||||
| ) as chat: | ||||
|     recent_msg = "" | ||||
|     while "[end]" not in recent_msg: | ||||
|         recent_msg = chat(input()) | ||||
|         print(f"Assistant: {recent_msg}") | ||||
|  | ||||
|  | ||||
| # What this would do is automatically provide the history from the user in terms of a "Chat prompt" so long as the first param of this user function is a history; or perhaps as lon as they specify as a kwarg this history. | ||||
|  | ||||
| # Okay but now I'm putting on the end user the requirement that they know how to put as a kwarg this history shit.. | ||||
|  | ||||
| # Chat what do you think of this bullshit. | ||||
|  | ||||
|  | ||||
| # Okay this is where we are: | ||||
|  | ||||
|  | ||||
| # You can define two lmps | ||||
| @ell.lm(model="gpt-4-turbo", stop=["A", "B"], temperature=0.0) | ||||
| def compare_stories(a: str, b: str): | ||||
|     """You are an expert writing critic [...]"""  # <-- System prompt if using a chat type model. | ||||
|  | ||||
|     return f"Which of the following stories is better (anaswer with A or B) followed by your reasoning.\n\nA: {a}\n\nB: {b}" | ||||
|  | ||||
|  | ||||
| # This is really compact way of specifying a comparison | ||||
| # Alternatively we can be granular about message specificaiton | ||||
|  | ||||
|  | ||||
| @ell.lm(model="gpt-4-turbo", stop=["A", "B"], temperature=0.0) | ||||
| def compare_stories(a: str, b: str): | ||||
|     return [ | ||||
|         ell.Message( | ||||
|             role="system", content="""You are an expert writing critic [...]""" | ||||
|         ), | ||||
|         ell.Message( | ||||
|             role="user", | ||||
|             content=f"Which of the following stories is better (answer with A or B) followed by your reasoning.\n\nA: {a}\n\nB: {b}", | ||||
|         ), | ||||
|     ] | ||||
|  | ||||
|  | ||||
| # My computer is dying right now what the fuck. Someone please buy me a new mac. | ||||
							
								
								
									
										20
									
								
								docs/todo.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										20
									
								
								docs/todo.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,20 @@ | ||||
| # Ell Todos | ||||
|  | ||||
| ## Ell Studio | ||||
|  | ||||
| * Group LMPS by Run: At high-level, we would like to be able to visualize all of the various prompts that existed during a specific run of a python process. There are some issues here around when I'm doing process parallel vacations, or go unicorn style web apps that are running language model programs but some notion of this was the serialized form of the source code when I ran these invocations and they all belong to that same run category for what are the latest language model programs and visualizing the graph thereabouts. | ||||
| * An index for fast querying of the file store.  | ||||
| * Nice interface for viewing invocations themselvers i na readable way.  | ||||
| * Visaulizing the dependency grapho language mdoel programs. | ||||
| * Visualziing the "Horizontal flow of invocaitons" | ||||
| * Cost analysis of different language model chains | ||||
| * 'Automatic language model commit messages based on diffs'  | ||||
| * Automatic RLHF and comparison data collection | ||||
| * Metric functiosn and test tracking  | ||||
| * A vscode plugin | ||||
| * If in git repo and doing local developemnt (not deployment) automatic creation of a filesystem serialziaiton directory for current root. ellconfig.yaml  | ||||
|  | ||||
|  | ||||
| ## Serialization  | ||||
| * Originators work | ||||
| * Build an index. | ||||
							
								
								
									
										23
									
								
								ell-studio/.gitignore
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										23
									
								
								ell-studio/.gitignore
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,23 @@ | ||||
| # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. | ||||
|  | ||||
| # dependencies | ||||
| /node_modules | ||||
| /.pnp | ||||
| .pnp.js | ||||
|  | ||||
| # testing | ||||
| /coverage | ||||
|  | ||||
| # production | ||||
| /build | ||||
|  | ||||
| # misc | ||||
| .DS_Store | ||||
| .env.local | ||||
| .env.development.local | ||||
| .env.test.local | ||||
| .env.production.local | ||||
|  | ||||
| npm-debug.log* | ||||
| yarn-debug.log* | ||||
| yarn-error.log* | ||||
							
								
								
									
										70
									
								
								ell-studio/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										70
									
								
								ell-studio/README.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,70 @@ | ||||
| # Getting Started with Create React App | ||||
|  | ||||
| This project was bootstrapped with [Create React App](https://github.com/facebook/create-react-app). | ||||
|  | ||||
| ## Available Scripts | ||||
|  | ||||
| In the project directory, you can run: | ||||
|  | ||||
| ### `npm start` | ||||
|  | ||||
| Runs the app in the development mode.\ | ||||
| Open [http://localhost:3000](http://localhost:3000) to view it in your browser. | ||||
|  | ||||
| The page will reload when you make changes.\ | ||||
| You may also see any lint errors in the console. | ||||
|  | ||||
| ### `npm test` | ||||
|  | ||||
| Launches the test runner in the interactive watch mode.\ | ||||
| See the section about [running tests](https://facebook.github.io/create-react-app/docs/running-tests) for more information. | ||||
|  | ||||
| ### `npm run build` | ||||
|  | ||||
| Builds the app for production to the `build` folder.\ | ||||
| It correctly bundles React in production mode and optimizes the build for the best performance. | ||||
|  | ||||
| The build is minified and the filenames include the hashes.\ | ||||
| Your app is ready to be deployed! | ||||
|  | ||||
| See the section about [deployment](https://facebook.github.io/create-react-app/docs/deployment) for more information. | ||||
|  | ||||
| ### `npm run eject` | ||||
|  | ||||
| **Note: this is a one-way operation. Once you `eject`, you can't go back!** | ||||
|  | ||||
| If you aren't satisfied with the build tool and configuration choices, you can `eject` at any time. This command will remove the single build dependency from your project. | ||||
|  | ||||
| Instead, it will copy all the configuration files and the transitive dependencies (webpack, Babel, ESLint, etc) right into your project so you have full control over them. All of the commands except `eject` will still work, but they will point to the copied scripts so you can tweak them. At this point you're on your own. | ||||
|  | ||||
| You don't have to ever use `eject`. The curated feature set is suitable for small and middle deployments, and you shouldn't feel obligated to use this feature. However we understand that this tool wouldn't be useful if you couldn't customize it when you are ready for it. | ||||
|  | ||||
| ## Learn More | ||||
|  | ||||
| You can learn more in the [Create React App documentation](https://facebook.github.io/create-react-app/docs/getting-started). | ||||
|  | ||||
| To learn React, check out the [React documentation](https://reactjs.org/). | ||||
|  | ||||
| ### Code Splitting | ||||
|  | ||||
| This section has moved here: [https://facebook.github.io/create-react-app/docs/code-splitting](https://facebook.github.io/create-react-app/docs/code-splitting) | ||||
|  | ||||
| ### Analyzing the Bundle Size | ||||
|  | ||||
| This section has moved here: [https://facebook.github.io/create-react-app/docs/analyzing-the-bundle-size](https://facebook.github.io/create-react-app/docs/analyzing-the-bundle-size) | ||||
|  | ||||
| ### Making a Progressive Web App | ||||
|  | ||||
| This section has moved here: [https://facebook.github.io/create-react-app/docs/making-a-progressive-web-app](https://facebook.github.io/create-react-app/docs/making-a-progressive-web-app) | ||||
|  | ||||
| ### Advanced Configuration | ||||
|  | ||||
| This section has moved here: [https://facebook.github.io/create-react-app/docs/advanced-configuration](https://facebook.github.io/create-react-app/docs/advanced-configuration) | ||||
|  | ||||
| ### Deployment | ||||
|  | ||||
| This section has moved here: [https://facebook.github.io/create-react-app/docs/deployment](https://facebook.github.io/create-react-app/docs/deployment) | ||||
|  | ||||
| ### `npm run build` fails to minify | ||||
|  | ||||
| This section has moved here: [https://facebook.github.io/create-react-app/docs/troubleshooting#npm-run-build-fails-to-minify](https://facebook.github.io/create-react-app/docs/troubleshooting#npm-run-build-fails-to-minify) | ||||
							
								
								
									
										19895
									
								
								ell-studio/package-lock.json
									
									
									
										generated
									
									
									
										Normal file
									
								
							
							
						
						
									
										19895
									
								
								ell-studio/package-lock.json
									
									
									
										generated
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										46
									
								
								ell-studio/package.json
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										46
									
								
								ell-studio/package.json
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,46 @@ | ||||
| { | ||||
|   "name": "ell-studio", | ||||
|   "version": "0.1.0", | ||||
|   "private": true, | ||||
|   "dependencies": { | ||||
|     "@heroicons/react": "^2.1.5", | ||||
|     "@testing-library/jest-dom": "^5.17.0", | ||||
|     "@testing-library/react": "^13.4.0", | ||||
|     "@testing-library/user-event": "^13.5.0", | ||||
|     "axios": "^1.6.0", | ||||
|     "react": "^18.3.1", | ||||
|     "react-dom": "^18.3.1", | ||||
|     "react-router-dom": "^6.18.0", | ||||
|     "react-scripts": "5.0.1", | ||||
|     "web-vitals": "^2.1.4" | ||||
|   }, | ||||
|   "scripts": { | ||||
|     "start": "react-scripts start", | ||||
|     "build": "react-scripts build", | ||||
|     "test": "react-scripts test", | ||||
|     "eject": "react-scripts eject" | ||||
|   }, | ||||
|   "eslintConfig": { | ||||
|     "extends": [ | ||||
|       "react-app", | ||||
|       "react-app/jest" | ||||
|     ] | ||||
|   }, | ||||
|   "browserslist": { | ||||
|     "production": [ | ||||
|       ">0.2%", | ||||
|       "not dead", | ||||
|       "not op_mini all" | ||||
|     ], | ||||
|     "development": [ | ||||
|       "last 1 chrome version", | ||||
|       "last 1 firefox version", | ||||
|       "last 1 safari version" | ||||
|     ] | ||||
|   }, | ||||
|   "devDependencies": { | ||||
|     "autoprefixer": "^10.4.16", | ||||
|     "postcss": "^8.4.31", | ||||
|     "tailwindcss": "^3.3.5" | ||||
|   } | ||||
| } | ||||
							
								
								
									
										
											BIN
										
									
								
								ell-studio/public/favicon.ico
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								ell-studio/public/favicon.ico
									
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							| After Width: | Height: | Size: 3.8 KiB | 
							
								
								
									
										43
									
								
								ell-studio/public/index.html
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										43
									
								
								ell-studio/public/index.html
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,43 @@ | ||||
| <!DOCTYPE html> | ||||
| <html lang="en"> | ||||
|   <head> | ||||
|     <meta charset="utf-8" /> | ||||
|     <link rel="icon" href="%PUBLIC_URL%/favicon.ico" /> | ||||
|     <meta name="viewport" content="width=device-width, initial-scale=1" /> | ||||
|     <meta name="theme-color" content="#000000" /> | ||||
|     <meta | ||||
|       name="description" | ||||
|       content="Web site created using create-react-app" | ||||
|     /> | ||||
|     <link rel="apple-touch-icon" href="%PUBLIC_URL%/logo192.png" /> | ||||
|     <!-- | ||||
|       manifest.json provides metadata used when your web app is installed on a | ||||
|       user's mobile device or desktop. See https://developers.google.com/web/fundamentals/web-app-manifest/ | ||||
|     --> | ||||
|     <link rel="manifest" href="%PUBLIC_URL%/manifest.json" /> | ||||
|     <!-- | ||||
|       Notice the use of %PUBLIC_URL% in the tags above. | ||||
|       It will be replaced with the URL of the `public` folder during the build. | ||||
|       Only files inside the `public` folder can be referenced from the HTML. | ||||
|  | ||||
|       Unlike "/favicon.ico" or "favicon.ico", "%PUBLIC_URL%/favicon.ico" will | ||||
|       work correctly both with client-side routing and a non-root public URL. | ||||
|       Learn how to configure a non-root public URL by running `npm run build`. | ||||
|     --> | ||||
|     <title>React App</title> | ||||
|   </head> | ||||
|   <body> | ||||
|     <noscript>You need to enable JavaScript to run this app.</noscript> | ||||
|     <div id="root"></div> | ||||
|     <!-- | ||||
|       This HTML file is a template. | ||||
|       If you open it directly in the browser, you will see an empty page. | ||||
|  | ||||
|       You can add webfonts, meta tags, or analytics to this file. | ||||
|       The build step will place the bundled scripts into the <body> tag. | ||||
|  | ||||
|       To begin the development, run `npm start` or `yarn start`. | ||||
|       To create a production bundle, use `npm run build` or `yarn build`. | ||||
|     --> | ||||
|   </body> | ||||
| </html> | ||||
							
								
								
									
										
											BIN
										
									
								
								ell-studio/public/logo192.png
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								ell-studio/public/logo192.png
									
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							| After Width: | Height: | Size: 5.2 KiB | 
							
								
								
									
										
											BIN
										
									
								
								ell-studio/public/logo512.png
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								ell-studio/public/logo512.png
									
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							| After Width: | Height: | Size: 9.4 KiB | 
							
								
								
									
										25
									
								
								ell-studio/public/manifest.json
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										25
									
								
								ell-studio/public/manifest.json
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,25 @@ | ||||
| { | ||||
|   "short_name": "React App", | ||||
|   "name": "Create React App Sample", | ||||
|   "icons": [ | ||||
|     { | ||||
|       "src": "favicon.ico", | ||||
|       "sizes": "64x64 32x32 24x24 16x16", | ||||
|       "type": "image/x-icon" | ||||
|     }, | ||||
|     { | ||||
|       "src": "logo192.png", | ||||
|       "type": "image/png", | ||||
|       "sizes": "192x192" | ||||
|     }, | ||||
|     { | ||||
|       "src": "logo512.png", | ||||
|       "type": "image/png", | ||||
|       "sizes": "512x512" | ||||
|     } | ||||
|   ], | ||||
|   "start_url": ".", | ||||
|   "display": "standalone", | ||||
|   "theme_color": "#000000", | ||||
|   "background_color": "#ffffff" | ||||
| } | ||||
							
								
								
									
										3
									
								
								ell-studio/public/robots.txt
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										3
									
								
								ell-studio/public/robots.txt
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,3 @@ | ||||
| # https://www.robotstxt.org/robotstxt.html | ||||
| User-agent: * | ||||
| Disallow: | ||||
							
								
								
									
										38
									
								
								ell-studio/src/App.css
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										38
									
								
								ell-studio/src/App.css
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,38 @@ | ||||
| .App { | ||||
|   text-align: center; | ||||
| } | ||||
|  | ||||
| .App-logo { | ||||
|   height: 40vmin; | ||||
|   pointer-events: none; | ||||
| } | ||||
|  | ||||
| @media (prefers-reduced-motion: no-preference) { | ||||
|   .App-logo { | ||||
|     animation: App-logo-spin infinite 20s linear; | ||||
|   } | ||||
| } | ||||
|  | ||||
| .App-header { | ||||
|   background-color: #282c34; | ||||
|   min-height: 100vh; | ||||
|   display: flex; | ||||
|   flex-direction: column; | ||||
|   align-items: center; | ||||
|   justify-content: center; | ||||
|   font-size: calc(10px + 2vmin); | ||||
|   color: white; | ||||
| } | ||||
|  | ||||
| .App-link { | ||||
|   color: #61dafb; | ||||
| } | ||||
|  | ||||
| @keyframes App-logo-spin { | ||||
|   from { | ||||
|     transform: rotate(0deg); | ||||
|   } | ||||
|   to { | ||||
|     transform: rotate(360deg); | ||||
|   } | ||||
| } | ||||
							
								
								
									
										30
									
								
								ell-studio/src/App.js
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										30
									
								
								ell-studio/src/App.js
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,30 @@ | ||||
| import React from 'react'; | ||||
| import { BrowserRouter as Router, Route, Routes } from 'react-router-dom'; | ||||
| import Header from './components/Header'; | ||||
| import Sidebar from './components/Sidebar'; | ||||
| import LMPList from './components/LMPList'; | ||||
| import LMPDetails from './components/LMPDetails'; | ||||
| import { ThemeProvider } from './contexts/ThemeContext'; | ||||
|  | ||||
| function App() { | ||||
|   return ( | ||||
|     <ThemeProvider> | ||||
|       <Router> | ||||
|         <div className="flex min-h-screen bg-gray-900 text-gray-100"> | ||||
|           <Sidebar /> | ||||
|           <div className="flex-1"> | ||||
|             <Header /> | ||||
|             <main className="container mx-auto px-6 py-8"> | ||||
|               <Routes> | ||||
|                 <Route path="/" element={<LMPList />} /> | ||||
|                 <Route path="/lmp/:id" element={<LMPDetails />} /> | ||||
|               </Routes> | ||||
|             </main> | ||||
|           </div> | ||||
|         </div> | ||||
|       </Router> | ||||
|     </ThemeProvider> | ||||
|   ); | ||||
| } | ||||
|  | ||||
| export default App; | ||||
							
								
								
									
										8
									
								
								ell-studio/src/App.test.js
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										8
									
								
								ell-studio/src/App.test.js
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,8 @@ | ||||
| import { render, screen } from '@testing-library/react'; | ||||
| import App from './App'; | ||||
|  | ||||
| test('renders learn react link', () => { | ||||
|   render(<App />); | ||||
|   const linkElement = screen.getByText(/learn react/i); | ||||
|   expect(linkElement).toBeInTheDocument(); | ||||
| }); | ||||
							
								
								
									
										16
									
								
								ell-studio/src/components/Header.js
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										16
									
								
								ell-studio/src/components/Header.js
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,16 @@ | ||||
| import React from 'react'; | ||||
| import { useTheme } from '../contexts/ThemeContext'; | ||||
|  | ||||
| const Header = () => { | ||||
|   const { darkMode } = useTheme(); | ||||
|  | ||||
|   return ( | ||||
|     <header className={`bg-gray-800 shadow-lg ${darkMode ? 'text-white' : 'text-gray-900'}`}> | ||||
|       <div className="container mx-auto px-6 py-4"> | ||||
|         <h1 className="text-2xl font-bold">LMP Visualization Studio</h1> | ||||
|       </div> | ||||
|     </header> | ||||
|   ); | ||||
| }; | ||||
|  | ||||
| export default Header; | ||||
							
								
								
									
										134
									
								
								ell-studio/src/components/LMPDetails.js
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										134
									
								
								ell-studio/src/components/LMPDetails.js
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,134 @@ | ||||
| import React, { useState, useEffect } from 'react'; | ||||
| import { useParams } from 'react-router-dom'; | ||||
| import axios from 'axios'; | ||||
| import { useTheme } from '../contexts/ThemeContext'; | ||||
| import { ChevronDownIcon, ChevronUpIcon } from '@heroicons/react/24/solid'; | ||||
|  | ||||
| function LMPDetails() { | ||||
|   const { id } = useParams(); | ||||
|   const [lmp, setLmp] = useState(null); | ||||
|   const [versionHistory, setVersionHistory] = useState([]); | ||||
|   const [invocations, setInvocations] = useState([]); | ||||
|   const [expandedVersion, setExpandedVersion] = useState(null); | ||||
|   const { darkMode } = useTheme(); | ||||
|  | ||||
|   useEffect(() => { | ||||
|     const fetchLMPDetails = async () => { | ||||
|       try { | ||||
|         const lmpResponse = await axios.get(`http://127.0.0.1:5000/api/lmps/${id}`); | ||||
|         setLmp(lmpResponse.data); | ||||
|  | ||||
|         const versionHistoryResponse = await axios.get(`http://127.0.0.1:5000/api/lmps/${id}/versions`); | ||||
|         setVersionHistory(versionHistoryResponse.data); | ||||
|  | ||||
|         const invocationsResponse = await axios.get(`http://127.0.0.1:5000/api/invocations/${id}`); | ||||
|         setInvocations(invocationsResponse.data); | ||||
|       } catch (error) { | ||||
|         console.error('Error fetching LMP details:', error); | ||||
|       } | ||||
|     }; | ||||
|     fetchLMPDetails(); | ||||
|   }, [id]); | ||||
|  | ||||
|   if (!lmp) return <div className={`flex items-center justify-center h-screen ${darkMode ? 'bg-gray-900 text-gray-100' : 'bg-gray-100 text-gray-800'}`}>Loading...</div>; | ||||
|  | ||||
|   const toggleVersionExpand = (index) => { | ||||
|     setExpandedVersion(expandedVersion === index ? null : index); | ||||
|   }; | ||||
|  | ||||
|   return ( | ||||
|     <div className={`min-h-screen ${darkMode ? 'bg-gray-900 text-gray-100' : 'bg-gray-100 text-gray-800'}`}> | ||||
|       <div className="container mx-auto px-4 py-8"> | ||||
|         <h1 className="text-3xl font-bold mb-6">{lmp.name}</h1> | ||||
|         <div className={`bg-${darkMode ? 'gray-800' : 'white'} rounded-lg shadow-md p-6 mb-8`}> | ||||
|           <p className="text-sm mb-4">ID: {lmp.lmp_id}</p> | ||||
|           <h2 className="text-2xl font-semibold mb-4">Source Code</h2> | ||||
|           <pre className={`bg-${darkMode ? 'gray-700' : 'gray-100'} p-4 rounded-md overflow-x-auto`}> | ||||
|             <code>{lmp.source}</code> | ||||
|           </pre> | ||||
|           <div className="mt-6 grid grid-cols-2 gap-4"> | ||||
|             <div> | ||||
|               <h3 className="text-xl font-semibold mb-2">Details</h3> | ||||
|               <p><strong>Created at:</strong> {new Date(lmp.created_at * 1000).toLocaleString()}</p> | ||||
|               <p><strong>Is LMP:</strong> {lmp.is_lmp ? 'Yes' : 'No'}</p> | ||||
|             </div> | ||||
|             {lmp.lm_kwargs && ( | ||||
|               <div> | ||||
|                 <h3 className="text-xl font-semibold mb-2">LM Keywords</h3> | ||||
|                 <pre className={`bg-${darkMode ? 'gray-700' : 'gray-100'} p-4 rounded-md overflow-x-auto`}> | ||||
|                   <code>{JSON.stringify(JSON.parse(lmp.lm_kwargs), null, 2)}</code> | ||||
|                 </pre> | ||||
|               </div> | ||||
|             )} | ||||
|           </div> | ||||
|         </div> | ||||
|         <h2 className="text-2xl font-semibold mb-4">Version History</h2> | ||||
|         <div className="space-y-4"> | ||||
|           {versionHistory.map((version, index) => ( | ||||
|             <div key={index} className={`bg-${darkMode ? 'gray-800' : 'white'} rounded-lg shadow-md overflow-hidden`}> | ||||
|               <div  | ||||
|                 className={`p-4 cursor-pointer flex justify-between items-center ${expandedVersion === index ? `bg-${darkMode ? 'gray-700' : 'gray-200'}` : ''}`} | ||||
|                 onClick={() => toggleVersionExpand(index)} | ||||
|               > | ||||
|                 <h3 className="text-xl font-semibold">Version {versionHistory.length - index}</h3> | ||||
|                 {expandedVersion === index ? ( | ||||
|                   <ChevronUpIcon className="h-5 w-5" /> | ||||
|                 ) : ( | ||||
|                   <ChevronDownIcon className="h-5 w-5" /> | ||||
|                 )} | ||||
|               </div> | ||||
|               {expandedVersion === index && ( | ||||
|                 <div className="p-4 border-t border-gray-600"> | ||||
|                   <p><strong>LMP ID:</strong> {version.lmp_id}</p> | ||||
|                   <p><strong>Created at:</strong> {new Date(version.created_at * 1000).toLocaleString()}</p> | ||||
|                   <h4 className="font-semibold mt-2 mb-2">Source Code</h4> | ||||
|                   <pre className={`bg-${darkMode ? 'gray-700' : 'gray-100'} p-4 rounded-md overflow-x-auto`}> | ||||
|                     <code>{version.source}</code> | ||||
|                   </pre> | ||||
|                 </div> | ||||
|               )} | ||||
|             </div> | ||||
|           ))} | ||||
|         </div> | ||||
|         <h2 className="text-2xl font-semibold mt-8 mb-4">Invocations</h2> | ||||
|         <div className="space-y-4"> | ||||
|           {invocations.map((invocation, index) => ( | ||||
|             <div key={index} className={`bg-${darkMode ? 'gray-800' : 'white'} rounded-lg shadow-md overflow-hidden`}> | ||||
|               <div  | ||||
|                 className={`p-4 cursor-pointer flex justify-between items-center ${expandedVersion === index ? `bg-${darkMode ? 'gray-700' : 'gray-200'}` : ''}`} | ||||
|                 onClick={() => toggleVersionExpand(index)} | ||||
|               > | ||||
|                 <h3 className="text-xl font-semibold">Version {invocations.length - index}</h3> | ||||
|                 {expandedVersion === index ? ( | ||||
|                   <ChevronUpIcon className="h-5 w-5" /> | ||||
|                 ) : ( | ||||
|                   <ChevronDownIcon className="h-5 w-5" /> | ||||
|                 )} | ||||
|               </div> | ||||
|               {expandedVersion === index && ( | ||||
|                 <div className="p-4 border-t border-gray-600"> | ||||
|                   <div className="grid grid-cols-2 gap-4"> | ||||
|                     <div> | ||||
|                       <p><strong>Args:</strong> {invocation.args}</p> | ||||
|                       <p><strong>Kwargs:</strong> {invocation.kwargs}</p> | ||||
|                       <p><strong>Result:</strong> {invocation.result}</p> | ||||
|                       <p><strong>Created at:</strong> {new Date(invocation.created_at * 1000).toLocaleString()}</p> | ||||
|                     </div> | ||||
|                     <div> | ||||
|                       <h4 className="font-semibold mb-2">Invocation Kwargs</h4> | ||||
|                       <pre className={`bg-${darkMode ? 'gray-700' : 'gray-100'} p-4 rounded-md overflow-x-auto`}> | ||||
|                         <code>{JSON.stringify(invocation.invocation_kwargs, null, 2)}</code> | ||||
|                       </pre> | ||||
|                     </div> | ||||
|                   </div> | ||||
|                 </div> | ||||
|               )} | ||||
|             </div> | ||||
|           ))} | ||||
|         </div> | ||||
|       </div> | ||||
|     </div> | ||||
|   ); | ||||
| } | ||||
|  | ||||
| export default LMPDetails; | ||||
							
								
								
									
										116
									
								
								ell-studio/src/components/LMPList.js
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										116
									
								
								ell-studio/src/components/LMPList.js
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,116 @@ | ||||
| import React, { useState, useEffect } from 'react'; | ||||
| import { Link } from 'react-router-dom'; | ||||
| import axios from 'axios'; | ||||
| import { useTheme } from '../contexts/ThemeContext'; | ||||
|  | ||||
| function LMPList() { | ||||
|   const [lmps, setLmps] = useState([]); | ||||
|   const { darkMode } = useTheme(); | ||||
|   const [expandedLMP, setExpandedLMP] = useState(null); | ||||
|  | ||||
|   useEffect(() => { | ||||
|     const fetchLMPs = async () => { | ||||
|       try { | ||||
|         const response = await axios.get('http://127.0.0.1:5000/api/lmps'); | ||||
|         const aggregatedLMPs = aggregateLMPsByName(response.data); | ||||
|         setLmps(aggregatedLMPs); | ||||
|       } catch (error) { | ||||
|         console.error('Error fetching LMPs:', error); | ||||
|       } | ||||
|     }; | ||||
|     fetchLMPs(); | ||||
|   }, []); | ||||
|  | ||||
|   const aggregateLMPsByName = (lmpList) => { | ||||
|     const lmpMap = new Map(); | ||||
|     lmpList.forEach(lmp => { | ||||
|       if (!lmpMap.has(lmp.name)) { | ||||
|         lmpMap.set(lmp.name, { ...lmp, versions: [] }); | ||||
|       } | ||||
|       lmpMap.get(lmp.name).versions.push({ | ||||
|         lmp_id: lmp.lmp_id, | ||||
|         created_at: lmp.created_at, | ||||
|         invocations: lmp.invocations || 0 | ||||
|       }); | ||||
|     }); | ||||
|     return Array.from(lmpMap.values()).map(lmp => ({ | ||||
|       ...lmp, | ||||
|       versions: lmp.versions.sort((a, b) => b.created_at - a.created_at) | ||||
|     })); | ||||
|   }; | ||||
|  | ||||
|   const toggleExpand = (lmpName, event) => { | ||||
|     // Prevent toggling when clicking on the link | ||||
|     if (event.target.tagName.toLowerCase() !== 'a') { | ||||
|       setExpandedLMP(expandedLMP === lmpName ? null : lmpName); | ||||
|     } | ||||
|   }; | ||||
|  | ||||
|   const truncateId = (id) => { | ||||
|     return id.length > 8 ? `${id.substring(0, 8)}...` : id; | ||||
|   }; | ||||
|  | ||||
|   return ( | ||||
|     <div className={`bg-${darkMode ? 'gray-900' : 'gray-100'} min-h-screen`}> | ||||
|       <div className="container mx-auto px-4 py-8"> | ||||
|         <h1 className={`text-3xl font-bold mb-6 ${darkMode ? 'text-gray-100' : 'text-gray-800'}`}>Language Model Programs</h1> | ||||
|         <div className="space-y-4"> | ||||
|           {lmps.map((lmp) => ( | ||||
|             <div  | ||||
|               key={lmp.name}  | ||||
|               className={`bg-${darkMode ? 'gray-800' : 'white'} rounded-lg shadow-md p-6 cursor-pointer`} | ||||
|               onClick={(e) => toggleExpand(lmp.name, e)} | ||||
|             > | ||||
|               <div className="flex justify-between items-center mb-2"> | ||||
|                 <Link  | ||||
|                   to={`/lmp/${lmp.versions[0].lmp_id}`}  | ||||
|                   className={`text-xl font-semibold ${darkMode ? 'text-gray-100 hover:text-blue-300' : 'text-gray-800 hover:text-blue-600'}`} | ||||
|                   onClick={(e) => e.stopPropagation()} // Prevent card expansion when clicking the link | ||||
|                 > | ||||
|                   {lmp.name} | ||||
|                 </Link> | ||||
|                 <div className="flex space-x-2"> | ||||
|                   <span className={`text-xs px-2 py-1 rounded-full ${darkMode ? 'bg-gray-700 text-gray-300' : 'bg-gray-200 text-gray-700'}`}> | ||||
|                     ID: {truncateId(lmp.versions[0].lmp_id)} | ||||
|                   </span> | ||||
|                   <span className={`text-xs px-2 py-1 rounded-full ${darkMode ? 'bg-blue-600 text-white' : 'bg-blue-100 text-blue-800'}`}> | ||||
|                     {lmp.versions.length} Version{lmp.versions.length > 1 ? 's' : ''} | ||||
|                   </span> | ||||
|                 </div> | ||||
|               </div> | ||||
|               <div className={`bg-${darkMode ? 'gray-700' : 'gray-100'} rounded p-3 mb-4`}> | ||||
|                 <code className={`text-sm ${darkMode ? 'text-gray-300' : 'text-gray-700'}`}> | ||||
|                   {lmp.source.length > 100 ? `${lmp.source.substring(0, 100)}...` : lmp.source} | ||||
|                 </code> | ||||
|               </div> | ||||
|               <div className="flex justify-between items-center"> | ||||
|                 <p className={`text-xs ${darkMode ? 'text-gray-400' : 'text-gray-500'}`}> | ||||
|                   Latest Version: {new Date(lmp.versions[0].created_at * 1000).toLocaleString()} | ||||
|                 </p> | ||||
|               </div> | ||||
|               {expandedLMP === lmp.name && lmp.versions.length > 1 && ( | ||||
|                 <div className={`mt-4 ${darkMode ? 'text-gray-300' : 'text-gray-600'}`}> | ||||
|                   <h3 className="text-sm font-semibold mb-2">Version History:</h3> | ||||
|                   <ul className="space-y-4"> | ||||
|                     {lmp.versions.map((version, index) => ( | ||||
|                       <li key={version.lmp_id} className="flex items-center"> | ||||
|                         <div className={`w-4 h-4 rounded-full ${darkMode ? 'bg-blue-500' : 'bg-blue-400'} mr-2`}></div> | ||||
|                         <div> | ||||
|                           <p className="text-xs font-semibold">Version {lmp.versions.length - index}</p> | ||||
|                           <p className="text-xs">{new Date(version.created_at * 1000).toLocaleString()}</p> | ||||
|                           <p className="text-xs">Invocations: {version.invocations}</p> | ||||
|                         </div> | ||||
|                       </li> | ||||
|                     ))} | ||||
|                   </ul> | ||||
|                 </div> | ||||
|               )} | ||||
|             </div> | ||||
|           ))} | ||||
|         </div> | ||||
|       </div> | ||||
|     </div> | ||||
|   ); | ||||
| } | ||||
|  | ||||
| export default LMPList; | ||||
							
								
								
									
										44
									
								
								ell-studio/src/components/LMPVersions.js
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										44
									
								
								ell-studio/src/components/LMPVersions.js
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,44 @@ | ||||
| import React from 'react'; | ||||
| import { Link } from 'react-router-dom'; | ||||
|  | ||||
| function formatTimeAgo(date) { | ||||
|   const seconds = Math.floor((new Date() - new Date(date)) / 1000); | ||||
|   const intervals = [ | ||||
|     { label: 'year', seconds: 31536000 }, | ||||
|     { label: 'month', seconds: 2592000 }, | ||||
|     { label: 'day', seconds: 86400 }, | ||||
|     { label: 'hour', seconds: 3600 }, | ||||
|     { label: 'minute', seconds: 60 }, | ||||
|     { label: 'second', seconds: 1 } | ||||
|   ]; | ||||
|  | ||||
|   for (let i = 0; i < intervals.length; i++) { | ||||
|     const interval = intervals[i]; | ||||
|     const count = Math.floor(seconds / interval.seconds); | ||||
|     if (count >= 1) { | ||||
|       return `${count} ${interval.label}${count > 1 ? 's' : ''} ago`; | ||||
|     } | ||||
|   } | ||||
|   return 'just now'; | ||||
| } | ||||
|  | ||||
| function LMPVersions({ versions }) { | ||||
|   return ( | ||||
|     <div className="mt-4 space-y-2"> | ||||
|       <h3 className="text-lg font-semibold">Version History</h3> | ||||
|       <ul className="space-y-2"> | ||||
|         {versions.map((version, index) => ( | ||||
|           <li key={version.id} className="flex items-center space-x-2"> | ||||
|             <span className="text-gray-500">{index === 0 ? 'Latest' : `v${versions.length - index}`}</span> | ||||
|             <Link to={`/lmp/${version.lmp_id}`} className="font-mono bg-gray-100 px-2 py-1 rounded text-sm"> | ||||
|               {version.lmp_id} | ||||
|             </Link> | ||||
|             <span className="text-gray-500 text-sm">{formatTimeAgo(version.created_at)}</span> | ||||
|           </li> | ||||
|         ))} | ||||
|       </ul> | ||||
|     </div> | ||||
|   ); | ||||
| } | ||||
|  | ||||
| export default LMPVersions; | ||||
							
								
								
									
										28
									
								
								ell-studio/src/components/Sidebar.js
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										28
									
								
								ell-studio/src/components/Sidebar.js
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,28 @@ | ||||
| import React from 'react'; | ||||
| import { Link } from 'react-router-dom'; | ||||
| import { useTheme } from '../contexts/ThemeContext'; | ||||
|  | ||||
| const Sidebar = () => { | ||||
|   const { darkMode, toggleDarkMode } = useTheme(); | ||||
|  | ||||
|   return ( | ||||
|     <aside className="w-64 bg-gray-800 p-6"> | ||||
|       <nav className="space-y-4"> | ||||
|         <Link to="/" className="block text-gray-300 hover:text-white">Dashboard</Link> | ||||
|         <Link to="/models" className="block text-gray-300 hover:text-white">Models</Link> | ||||
|         <Link to="/datasets" className="block text-gray-300 hover:text-white">Datasets</Link> | ||||
|         <Link to="/visualizations" className="block text-gray-300 hover:text-white">Visualizations</Link> | ||||
|       </nav> | ||||
|       <div className="mt-8"> | ||||
|         <button | ||||
|           onClick={toggleDarkMode} | ||||
|           className="px-4 py-2 bg-gray-700 text-gray-300 rounded hover:bg-gray-600" | ||||
|         > | ||||
|           {darkMode ? 'Light Mode' : 'Dark Mode'} | ||||
|         </button> | ||||
|       </div> | ||||
|     </aside> | ||||
|   ); | ||||
| }; | ||||
|  | ||||
| export default Sidebar; | ||||
							
								
								
									
										19
									
								
								ell-studio/src/contexts/ThemeContext.js
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										19
									
								
								ell-studio/src/contexts/ThemeContext.js
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,19 @@ | ||||
| import React, { createContext, useState, useContext } from 'react'; | ||||
|  | ||||
| const ThemeContext = createContext(); | ||||
|  | ||||
| export const useTheme = () => useContext(ThemeContext); | ||||
|  | ||||
| export const ThemeProvider = ({ children }) => { | ||||
|   const [darkMode, setDarkMode] = useState(true); | ||||
|  | ||||
|   const toggleDarkMode = () => { | ||||
|     setDarkMode(!darkMode); | ||||
|   }; | ||||
|  | ||||
|   return ( | ||||
|     <ThemeContext.Provider value={{ darkMode, toggleDarkMode }}> | ||||
|       {children} | ||||
|     </ThemeContext.Provider> | ||||
|   ); | ||||
| }; | ||||
							
								
								
									
										3
									
								
								ell-studio/src/index.css
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										3
									
								
								ell-studio/src/index.css
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,3 @@ | ||||
| @tailwind base; | ||||
| @tailwind components; | ||||
| @tailwind utilities; | ||||
							
								
								
									
										17
									
								
								ell-studio/src/index.js
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										17
									
								
								ell-studio/src/index.js
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,17 @@ | ||||
| import React from 'react'; | ||||
| import ReactDOM from 'react-dom/client'; | ||||
| import './index.css'; | ||||
| import App from './App'; | ||||
| import reportWebVitals from './reportWebVitals'; | ||||
|  | ||||
| const root = ReactDOM.createRoot(document.getElementById('root')); | ||||
| root.render( | ||||
|   <React.StrictMode> | ||||
|     <App /> | ||||
|   </React.StrictMode> | ||||
| ); | ||||
|  | ||||
| // If you want to start measuring performance in your app, pass a function | ||||
| // to log results (for example: reportWebVitals(console.log)) | ||||
| // or send to an analytics endpoint. Learn more: https://bit.ly/CRA-vitals | ||||
| reportWebVitals(); | ||||
							
								
								
									
										1
									
								
								ell-studio/src/logo.svg
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								ell-studio/src/logo.svg
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1 @@ | ||||
| <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 841.9 595.3"><g fill="#61DAFB"><path d="M666.3 296.5c0-32.5-40.7-63.3-103.1-82.4 14.4-63.6 8-114.2-20.2-130.4-6.5-3.8-14.1-5.6-22.4-5.6v22.3c4.6 0 8.3.9 11.4 2.6 13.6 7.8 19.5 37.5 14.9 75.7-1.1 9.4-2.9 19.3-5.1 29.4-19.6-4.8-41-8.5-63.5-10.9-13.5-18.5-27.5-35.3-41.6-50 32.6-30.3 63.2-46.9 84-46.9V78c-27.5 0-63.5 19.6-99.9 53.6-36.4-33.8-72.4-53.2-99.9-53.2v22.3c20.7 0 51.4 16.5 84 46.6-14 14.7-28 31.4-41.3 49.9-22.6 2.4-44 6.1-63.6 11-2.3-10-4-19.7-5.2-29-4.7-38.2 1.1-67.9 14.6-75.8 3-1.8 6.9-2.6 11.5-2.6V78.5c-8.4 0-16 1.8-22.6 5.6-28.1 16.2-34.4 66.7-19.9 130.1-62.2 19.2-102.7 49.9-102.7 82.3 0 32.5 40.7 63.3 103.1 82.4-14.4 63.6-8 114.2 20.2 130.4 6.5 3.8 14.1 5.6 22.5 5.6 27.5 0 63.5-19.6 99.9-53.6 36.4 33.8 72.4 53.2 99.9 53.2 8.4 0 16-1.8 22.6-5.6 28.1-16.2 34.4-66.7 19.9-130.1 62-19.1 102.5-49.9 102.5-82.3zm-130.2-66.7c-3.7 12.9-8.3 26.2-13.5 39.5-4.1-8-8.4-16-13.1-24-4.6-8-9.5-15.8-14.4-23.4 14.2 2.1 27.9 4.7 41 7.9zm-45.8 106.5c-7.8 13.5-15.8 26.3-24.1 38.2-14.9 1.3-30 2-45.2 2-15.1 0-30.2-.7-45-1.9-8.3-11.9-16.4-24.6-24.2-38-7.6-13.1-14.5-26.4-20.8-39.8 6.2-13.4 13.2-26.8 20.7-39.9 7.8-13.5 15.8-26.3 24.1-38.2 14.9-1.3 30-2 45.2-2 15.1 0 30.2.7 45 1.9 8.3 11.9 16.4 24.6 24.2 38 7.6 13.1 14.5 26.4 20.8 39.8-6.3 13.4-13.2 26.8-20.7 39.9zm32.3-13c5.4 13.4 10 26.8 13.8 39.8-13.1 3.2-26.9 5.9-41.2 8 4.9-7.7 9.8-15.6 14.4-23.7 4.6-8 8.9-16.1 13-24.1zM421.2 430c-9.3-9.6-18.6-20.3-27.8-32 9 .4 18.2.7 27.5.7 9.4 0 18.7-.2 27.8-.7-9 11.7-18.3 22.4-27.5 32zm-74.4-58.9c-14.2-2.1-27.9-4.7-41-7.9 3.7-12.9 8.3-26.2 13.5-39.5 4.1 8 8.4 16 13.1 24 4.7 8 9.5 15.8 14.4 23.4zM420.7 163c9.3 9.6 18.6 20.3 27.8 32-9-.4-18.2-.7-27.5-.7-9.4 0-18.7.2-27.8.7 9-11.7 18.3-22.4 27.5-32zm-74 58.9c-4.9 7.7-9.8 15.6-14.4 23.7-4.6 8-8.9 16-13 24-5.4-13.4-10-26.8-13.8-39.8 13.1-3.1 26.9-5.8 41.2-7.9zm-90.5 125.2c-35.4-15.1-58.3-34.9-58.3-50.6 0-15.7 22.9-35.6 58.3-50.6 8.6-3.7 18-7 27.7-10.1 5.7 19.6 13.2 40 22.5 60.9-9.2 20.8-16.6 41.1-22.2 60.6-9.9-3.1-19.3-6.5-28-10.2zM310 490c-13.6-7.8-19.5-37.5-14.9-75.7 1.1-9.4 2.9-19.3 5.1-29.4 19.6 4.8 41 8.5 63.5 10.9 13.5 18.5 27.5 35.3 41.6 50-32.6 30.3-63.2 46.9-84 46.9-4.5-.1-8.3-1-11.3-2.7zm237.2-76.2c4.7 38.2-1.1 67.9-14.6 75.8-3 1.8-6.9 2.6-11.5 2.6-20.7 0-51.4-16.5-84-46.6 14-14.7 28-31.4 41.3-49.9 22.6-2.4 44-6.1 63.6-11 2.3 10.1 4.1 19.8 5.2 29.1zm38.5-66.7c-8.6 3.7-18 7-27.7 10.1-5.7-19.6-13.2-40-22.5-60.9 9.2-20.8 16.6-41.1 22.2-60.6 9.9 3.1 19.3 6.5 28.1 10.2 35.4 15.1 58.3 34.9 58.3 50.6-.1 15.7-23 35.6-58.4 50.6zM320.8 78.4z"/><circle cx="420.9" cy="296.5" r="45.7"/><path d="M520.5 78.1z"/></g></svg> | ||||
| After Width: | Height: | Size: 2.6 KiB | 
							
								
								
									
										13
									
								
								ell-studio/src/reportWebVitals.js
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										13
									
								
								ell-studio/src/reportWebVitals.js
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,13 @@ | ||||
| const reportWebVitals = onPerfEntry => { | ||||
|   if (onPerfEntry && onPerfEntry instanceof Function) { | ||||
|     import('web-vitals').then(({ getCLS, getFID, getFCP, getLCP, getTTFB }) => { | ||||
|       getCLS(onPerfEntry); | ||||
|       getFID(onPerfEntry); | ||||
|       getFCP(onPerfEntry); | ||||
|       getLCP(onPerfEntry); | ||||
|       getTTFB(onPerfEntry); | ||||
|     }); | ||||
|   } | ||||
| }; | ||||
|  | ||||
| export default reportWebVitals; | ||||
							
								
								
									
										5
									
								
								ell-studio/src/setupTests.js
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										5
									
								
								ell-studio/src/setupTests.js
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,5 @@ | ||||
| // jest-dom adds custom jest matchers for asserting on DOM nodes. | ||||
| // allows you to do things like: | ||||
| // expect(element).toHaveTextContent(/react/i) | ||||
| // learn more: https://github.com/testing-library/jest-dom | ||||
| import '@testing-library/jest-dom'; | ||||
							
								
								
									
										9
									
								
								ell-studio/tailwind.config.js
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										9
									
								
								ell-studio/tailwind.config.js
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,9 @@ | ||||
| module.exports = { | ||||
|   content: [ | ||||
|     "./src/**/*.{js,jsx,ts,tsx}", | ||||
|   ], | ||||
|   theme: { | ||||
|     extend: {}, | ||||
|   }, | ||||
|   plugins: [], | ||||
| } | ||||
							
								
								
									
										631
									
								
								ell/poetry.lock
									
									
									
										generated
									
									
									
										Normal file
									
								
							
							
						
						
									
										631
									
								
								ell/poetry.lock
									
									
									
										generated
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,631 @@ | ||||
| # This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. | ||||
|  | ||||
| [[package]] | ||||
| name = "annotated-types" | ||||
| version = "0.6.0" | ||||
| description = "Reusable constraint types to use with typing.Annotated" | ||||
| optional = false | ||||
| python-versions = ">=3.8" | ||||
| files = [ | ||||
|     {file = "annotated_types-0.6.0-py3-none-any.whl", hash = "sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43"}, | ||||
|     {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"}, | ||||
| ] | ||||
|  | ||||
| [[package]] | ||||
| name = "anyio" | ||||
| version = "4.3.0" | ||||
| description = "High level compatibility layer for multiple asynchronous event loop implementations" | ||||
| optional = false | ||||
| python-versions = ">=3.8" | ||||
| files = [ | ||||
|     {file = "anyio-4.3.0-py3-none-any.whl", hash = "sha256:048e05d0f6caeed70d731f3db756d35dcc1f35747c8c403364a8332c630441b8"}, | ||||
|     {file = "anyio-4.3.0.tar.gz", hash = "sha256:f75253795a87df48568485fd18cdd2a3fa5c4f7c5be8e5e36637733fce06fed6"}, | ||||
| ] | ||||
|  | ||||
| [package.dependencies] | ||||
| idna = ">=2.8" | ||||
| sniffio = ">=1.1" | ||||
|  | ||||
| [package.extras] | ||||
| doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] | ||||
| test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] | ||||
| trio = ["trio (>=0.23)"] | ||||
|  | ||||
| [[package]] | ||||
| name = "blinker" | ||||
| version = "1.8.2" | ||||
| description = "Fast, simple object-to-object and broadcast signaling" | ||||
| optional = false | ||||
| python-versions = ">=3.8" | ||||
| files = [ | ||||
|     {file = "blinker-1.8.2-py3-none-any.whl", hash = "sha256:1779309f71bf239144b9399d06ae925637cf6634cf6bd131104184531bf67c01"}, | ||||
|     {file = "blinker-1.8.2.tar.gz", hash = "sha256:8f77b09d3bf7c795e969e9486f39c2c5e9c39d4ee07424be2bc594ece9642d83"}, | ||||
| ] | ||||
|  | ||||
| [[package]] | ||||
| name = "certifi" | ||||
| version = "2024.2.2" | ||||
| description = "Python package for providing Mozilla's CA Bundle." | ||||
| optional = false | ||||
| python-versions = ">=3.6" | ||||
| files = [ | ||||
|     {file = "certifi-2024.2.2-py3-none-any.whl", hash = "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1"}, | ||||
|     {file = "certifi-2024.2.2.tar.gz", hash = "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f"}, | ||||
| ] | ||||
|  | ||||
| [[package]] | ||||
| name = "click" | ||||
| version = "8.1.7" | ||||
| description = "Composable command line interface toolkit" | ||||
| optional = false | ||||
| python-versions = ">=3.7" | ||||
| files = [ | ||||
|     {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, | ||||
|     {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, | ||||
| ] | ||||
|  | ||||
| [package.dependencies] | ||||
| colorama = {version = "*", markers = "platform_system == \"Windows\""} | ||||
|  | ||||
| [[package]] | ||||
| name = "colorama" | ||||
| version = "0.4.6" | ||||
| description = "Cross-platform colored terminal text." | ||||
| optional = false | ||||
| python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" | ||||
| files = [ | ||||
|     {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, | ||||
|     {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, | ||||
| ] | ||||
|  | ||||
| [[package]] | ||||
| name = "coloredlogs" | ||||
| version = "15.0.1" | ||||
| description = "Colored terminal output for Python's logging module" | ||||
| optional = false | ||||
| python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" | ||||
| files = [ | ||||
|     {file = "coloredlogs-15.0.1-py2.py3-none-any.whl", hash = "sha256:612ee75c546f53e92e70049c9dbfcc18c935a2b9a53b66085ce9ef6a6e5c0934"}, | ||||
|     {file = "coloredlogs-15.0.1.tar.gz", hash = "sha256:7c991aa71a4577af2f82600d8f8f3a89f936baeaf9b50a9c197da014e5bf16b0"}, | ||||
| ] | ||||
|  | ||||
| [package.dependencies] | ||||
| humanfriendly = ">=9.1" | ||||
|  | ||||
| [package.extras] | ||||
| cron = ["capturer (>=2.4)"] | ||||
|  | ||||
| [[package]] | ||||
| name = "distro" | ||||
| version = "1.9.0" | ||||
| description = "Distro - an OS platform information API" | ||||
| optional = false | ||||
| python-versions = ">=3.6" | ||||
| files = [ | ||||
|     {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"}, | ||||
|     {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, | ||||
| ] | ||||
|  | ||||
| [[package]] | ||||
| name = "flask" | ||||
| version = "3.0.3" | ||||
| description = "A simple framework for building complex web applications." | ||||
| optional = false | ||||
| python-versions = ">=3.8" | ||||
| files = [ | ||||
|     {file = "flask-3.0.3-py3-none-any.whl", hash = "sha256:34e815dfaa43340d1d15a5c3a02b8476004037eb4840b34910c6e21679d288f3"}, | ||||
|     {file = "flask-3.0.3.tar.gz", hash = "sha256:ceb27b0af3823ea2737928a4d99d125a06175b8512c445cbd9a9ce200ef76842"}, | ||||
| ] | ||||
|  | ||||
| [package.dependencies] | ||||
| blinker = ">=1.6.2" | ||||
| click = ">=8.1.3" | ||||
| itsdangerous = ">=2.1.2" | ||||
| Jinja2 = ">=3.1.2" | ||||
| Werkzeug = ">=3.0.0" | ||||
|  | ||||
| [package.extras] | ||||
| async = ["asgiref (>=3.2)"] | ||||
| dotenv = ["python-dotenv"] | ||||
|  | ||||
| [[package]] | ||||
| name = "flask-cors" | ||||
| version = "4.0.1" | ||||
| description = "A Flask extension adding a decorator for CORS support" | ||||
| optional = false | ||||
| python-versions = "*" | ||||
| files = [ | ||||
|     {file = "Flask_Cors-4.0.1-py2.py3-none-any.whl", hash = "sha256:f2a704e4458665580c074b714c4627dd5a306b333deb9074d0b1794dfa2fb677"}, | ||||
|     {file = "flask_cors-4.0.1.tar.gz", hash = "sha256:eeb69b342142fdbf4766ad99357a7f3876a2ceb77689dc10ff912aac06c389e4"}, | ||||
| ] | ||||
|  | ||||
| [package.dependencies] | ||||
| Flask = ">=0.9" | ||||
|  | ||||
| [[package]] | ||||
| name = "h11" | ||||
| version = "0.14.0" | ||||
| description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" | ||||
| optional = false | ||||
| python-versions = ">=3.7" | ||||
| files = [ | ||||
|     {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, | ||||
|     {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, | ||||
| ] | ||||
|  | ||||
| [[package]] | ||||
| name = "httpcore" | ||||
| version = "1.0.5" | ||||
| description = "A minimal low-level HTTP client." | ||||
| optional = false | ||||
| python-versions = ">=3.8" | ||||
| files = [ | ||||
|     {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"}, | ||||
|     {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"}, | ||||
| ] | ||||
|  | ||||
| [package.dependencies] | ||||
| certifi = "*" | ||||
| h11 = ">=0.13,<0.15" | ||||
|  | ||||
| [package.extras] | ||||
| asyncio = ["anyio (>=4.0,<5.0)"] | ||||
| http2 = ["h2 (>=3,<5)"] | ||||
| socks = ["socksio (==1.*)"] | ||||
| trio = ["trio (>=0.22.0,<0.26.0)"] | ||||
|  | ||||
| [[package]] | ||||
| name = "httpx" | ||||
| version = "0.27.0" | ||||
| description = "The next generation HTTP client." | ||||
| optional = false | ||||
| python-versions = ">=3.8" | ||||
| files = [ | ||||
|     {file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"}, | ||||
|     {file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"}, | ||||
| ] | ||||
|  | ||||
| [package.dependencies] | ||||
| anyio = "*" | ||||
| certifi = "*" | ||||
| httpcore = "==1.*" | ||||
| idna = "*" | ||||
| sniffio = "*" | ||||
|  | ||||
| [package.extras] | ||||
| brotli = ["brotli", "brotlicffi"] | ||||
| cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] | ||||
| http2 = ["h2 (>=3,<5)"] | ||||
| socks = ["socksio (==1.*)"] | ||||
|  | ||||
| [[package]] | ||||
| name = "humanfriendly" | ||||
| version = "10.0" | ||||
| description = "Human friendly output for text interfaces using Python" | ||||
| optional = false | ||||
| python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" | ||||
| files = [ | ||||
|     {file = "humanfriendly-10.0-py2.py3-none-any.whl", hash = "sha256:1697e1a8a8f550fd43c2865cd84542fc175a61dcb779b6fee18cf6b6ccba1477"}, | ||||
|     {file = "humanfriendly-10.0.tar.gz", hash = "sha256:6b0b831ce8f15f7300721aa49829fc4e83921a9a301cc7f606be6686a2288ddc"}, | ||||
| ] | ||||
|  | ||||
| [package.dependencies] | ||||
| pyreadline3 = {version = "*", markers = "sys_platform == \"win32\" and python_version >= \"3.8\""} | ||||
|  | ||||
| [[package]] | ||||
| name = "idna" | ||||
| version = "3.7" | ||||
| description = "Internationalized Domain Names in Applications (IDNA)" | ||||
| optional = false | ||||
| python-versions = ">=3.5" | ||||
| files = [ | ||||
|     {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, | ||||
|     {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, | ||||
| ] | ||||
|  | ||||
| [[package]] | ||||
| name = "iniconfig" | ||||
| version = "2.0.0" | ||||
| description = "brain-dead simple config-ini parsing" | ||||
| optional = false | ||||
| python-versions = ">=3.7" | ||||
| files = [ | ||||
|     {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, | ||||
|     {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, | ||||
| ] | ||||
|  | ||||
| [[package]] | ||||
| name = "itsdangerous" | ||||
| version = "2.2.0" | ||||
| description = "Safely pass data to untrusted environments and back." | ||||
| optional = false | ||||
| python-versions = ">=3.8" | ||||
| files = [ | ||||
|     {file = "itsdangerous-2.2.0-py3-none-any.whl", hash = "sha256:c6242fc49e35958c8b15141343aa660db5fc54d4f13a1db01a3f5891b98700ef"}, | ||||
|     {file = "itsdangerous-2.2.0.tar.gz", hash = "sha256:e0050c0b7da1eea53ffaf149c0cfbb5c6e2e2b69c4bef22c81fa6eb73e5f6173"}, | ||||
| ] | ||||
|  | ||||
| [[package]] | ||||
| name = "jinja2" | ||||
| version = "3.1.4" | ||||
| description = "A very fast and expressive template engine." | ||||
| optional = false | ||||
| python-versions = ">=3.7" | ||||
| files = [ | ||||
|     {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"}, | ||||
|     {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"}, | ||||
| ] | ||||
|  | ||||
| [package.dependencies] | ||||
| MarkupSafe = ">=2.0" | ||||
|  | ||||
| [package.extras] | ||||
| i18n = ["Babel (>=2.7)"] | ||||
|  | ||||
| [[package]] | ||||
| name = "markupsafe" | ||||
| version = "2.1.5" | ||||
| description = "Safely add untrusted strings to HTML/XML markup." | ||||
| optional = false | ||||
| python-versions = ">=3.7" | ||||
| files = [ | ||||
|     {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp37-cp37m-win32.whl", hash = "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp37-cp37m-win_amd64.whl", hash = "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp38-cp38-win32.whl", hash = "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp39-cp39-win32.whl", hash = "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf"}, | ||||
|     {file = "MarkupSafe-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5"}, | ||||
|     {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, | ||||
| ] | ||||
|  | ||||
| [[package]] | ||||
| name = "numpy" | ||||
| version = "1.26.4" | ||||
| description = "Fundamental package for array computing in Python" | ||||
| optional = false | ||||
| python-versions = ">=3.9" | ||||
| files = [ | ||||
|     {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, | ||||
|     {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, | ||||
|     {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4"}, | ||||
|     {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f"}, | ||||
|     {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a"}, | ||||
|     {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2"}, | ||||
|     {file = "numpy-1.26.4-cp310-cp310-win32.whl", hash = "sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07"}, | ||||
|     {file = "numpy-1.26.4-cp310-cp310-win_amd64.whl", hash = "sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5"}, | ||||
|     {file = "numpy-1.26.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71"}, | ||||
|     {file = "numpy-1.26.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef"}, | ||||
|     {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e"}, | ||||
|     {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5"}, | ||||
|     {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a"}, | ||||
|     {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a"}, | ||||
|     {file = "numpy-1.26.4-cp311-cp311-win32.whl", hash = "sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20"}, | ||||
|     {file = "numpy-1.26.4-cp311-cp311-win_amd64.whl", hash = "sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2"}, | ||||
|     {file = "numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218"}, | ||||
|     {file = "numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b"}, | ||||
|     {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b"}, | ||||
|     {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed"}, | ||||
|     {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a"}, | ||||
|     {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0"}, | ||||
|     {file = "numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110"}, | ||||
|     {file = "numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818"}, | ||||
|     {file = "numpy-1.26.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c"}, | ||||
|     {file = "numpy-1.26.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be"}, | ||||
|     {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764"}, | ||||
|     {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3"}, | ||||
|     {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd"}, | ||||
|     {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c"}, | ||||
|     {file = "numpy-1.26.4-cp39-cp39-win32.whl", hash = "sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6"}, | ||||
|     {file = "numpy-1.26.4-cp39-cp39-win_amd64.whl", hash = "sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea"}, | ||||
|     {file = "numpy-1.26.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30"}, | ||||
|     {file = "numpy-1.26.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c"}, | ||||
|     {file = "numpy-1.26.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0"}, | ||||
|     {file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"}, | ||||
| ] | ||||
|  | ||||
| [[package]] | ||||
| name = "openai" | ||||
| version = "1.28.0" | ||||
| description = "The official Python library for the openai API" | ||||
| optional = false | ||||
| python-versions = ">=3.7.1" | ||||
| files = [ | ||||
|     {file = "openai-1.28.0-py3-none-any.whl", hash = "sha256:94b5a99f5121e1747dda1bb8fff31820d5ab4b49056a9cf2e3605f5c90011955"}, | ||||
|     {file = "openai-1.28.0.tar.gz", hash = "sha256:ac43b8b48aec70de4b76cfc96ae906bf8d5814427475b9dabb662f84f655f0e1"}, | ||||
| ] | ||||
|  | ||||
| [package.dependencies] | ||||
| anyio = ">=3.5.0,<5" | ||||
| distro = ">=1.7.0,<2" | ||||
| httpx = ">=0.23.0,<1" | ||||
| pydantic = ">=1.9.0,<3" | ||||
| sniffio = "*" | ||||
| tqdm = ">4" | ||||
| typing-extensions = ">=4.7,<5" | ||||
|  | ||||
| [package.extras] | ||||
| datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] | ||||
|  | ||||
| [[package]] | ||||
| name = "packaging" | ||||
| version = "24.0" | ||||
| description = "Core utilities for Python packages" | ||||
| optional = false | ||||
| python-versions = ">=3.7" | ||||
| files = [ | ||||
|     {file = "packaging-24.0-py3-none-any.whl", hash = "sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5"}, | ||||
|     {file = "packaging-24.0.tar.gz", hash = "sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9"}, | ||||
| ] | ||||
|  | ||||
| [[package]] | ||||
| name = "pluggy" | ||||
| version = "1.5.0" | ||||
| description = "plugin and hook calling mechanisms for python" | ||||
| optional = false | ||||
| python-versions = ">=3.8" | ||||
| files = [ | ||||
|     {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, | ||||
|     {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, | ||||
| ] | ||||
|  | ||||
| [package.extras] | ||||
| dev = ["pre-commit", "tox"] | ||||
| testing = ["pytest", "pytest-benchmark"] | ||||
|  | ||||
| [[package]] | ||||
| name = "pydantic" | ||||
| version = "2.7.1" | ||||
| description = "Data validation using Python type hints" | ||||
| optional = false | ||||
| python-versions = ">=3.8" | ||||
| files = [ | ||||
|     {file = "pydantic-2.7.1-py3-none-any.whl", hash = "sha256:e029badca45266732a9a79898a15ae2e8b14840b1eabbb25844be28f0b33f3d5"}, | ||||
|     {file = "pydantic-2.7.1.tar.gz", hash = "sha256:e9dbb5eada8abe4d9ae5f46b9939aead650cd2b68f249bb3a8139dbe125803cc"}, | ||||
| ] | ||||
|  | ||||
| [package.dependencies] | ||||
| annotated-types = ">=0.4.0" | ||||
| pydantic-core = "2.18.2" | ||||
| typing-extensions = ">=4.6.1" | ||||
|  | ||||
| [package.extras] | ||||
| email = ["email-validator (>=2.0.0)"] | ||||
|  | ||||
| [[package]] | ||||
| name = "pydantic-core" | ||||
| version = "2.18.2" | ||||
| description = "Core functionality for Pydantic validation and serialization" | ||||
| optional = false | ||||
| python-versions = ">=3.8" | ||||
| files = [ | ||||
|     {file = "pydantic_core-2.18.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:9e08e867b306f525802df7cd16c44ff5ebbe747ff0ca6cf3fde7f36c05a59a81"}, | ||||
|     {file = "pydantic_core-2.18.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f0a21cbaa69900cbe1a2e7cad2aa74ac3cf21b10c3efb0fa0b80305274c0e8a2"}, | ||||
|     {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0680b1f1f11fda801397de52c36ce38ef1c1dc841a0927a94f226dea29c3ae3d"}, | ||||
|     {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:95b9d5e72481d3780ba3442eac863eae92ae43a5f3adb5b4d0a1de89d42bb250"}, | ||||
|     {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4fcf5cd9c4b655ad666ca332b9a081112cd7a58a8b5a6ca7a3104bc950f2038"}, | ||||
|     {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b5155ff768083cb1d62f3e143b49a8a3432e6789a3abee8acd005c3c7af1c74"}, | ||||
|     {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:553ef617b6836fc7e4df130bb851e32fe357ce36336d897fd6646d6058d980af"}, | ||||
|     {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b89ed9eb7d616ef5714e5590e6cf7f23b02d0d539767d33561e3675d6f9e3857"}, | ||||
|     {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:75f7e9488238e920ab6204399ded280dc4c307d034f3924cd7f90a38b1829563"}, | ||||
|     {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ef26c9e94a8c04a1b2924149a9cb081836913818e55681722d7f29af88fe7b38"}, | ||||
|     {file = "pydantic_core-2.18.2-cp310-none-win32.whl", hash = "sha256:182245ff6b0039e82b6bb585ed55a64d7c81c560715d1bad0cbad6dfa07b4027"}, | ||||
|     {file = "pydantic_core-2.18.2-cp310-none-win_amd64.whl", hash = "sha256:e23ec367a948b6d812301afc1b13f8094ab7b2c280af66ef450efc357d2ae543"}, | ||||
|     {file = "pydantic_core-2.18.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:219da3f096d50a157f33645a1cf31c0ad1fe829a92181dd1311022f986e5fbe3"}, | ||||
|     {file = "pydantic_core-2.18.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cc1cfd88a64e012b74e94cd00bbe0f9c6df57049c97f02bb07d39e9c852e19a4"}, | ||||
|     {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05b7133a6e6aeb8df37d6f413f7705a37ab4031597f64ab56384c94d98fa0e90"}, | ||||
|     {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:224c421235f6102e8737032483f43c1a8cfb1d2f45740c44166219599358c2cd"}, | ||||
|     {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b14d82cdb934e99dda6d9d60dc84a24379820176cc4a0d123f88df319ae9c150"}, | ||||
|     {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2728b01246a3bba6de144f9e3115b532ee44bd6cf39795194fb75491824a1413"}, | ||||
|     {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:470b94480bb5ee929f5acba6995251ada5e059a5ef3e0dfc63cca287283ebfa6"}, | ||||
|     {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:997abc4df705d1295a42f95b4eec4950a37ad8ae46d913caeee117b6b198811c"}, | ||||
|     {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:75250dbc5290e3f1a0f4618db35e51a165186f9034eff158f3d490b3fed9f8a0"}, | ||||
|     {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4456f2dca97c425231d7315737d45239b2b51a50dc2b6f0c2bb181fce6207664"}, | ||||
|     {file = "pydantic_core-2.18.2-cp311-none-win32.whl", hash = "sha256:269322dcc3d8bdb69f054681edff86276b2ff972447863cf34c8b860f5188e2e"}, | ||||
|     {file = "pydantic_core-2.18.2-cp311-none-win_amd64.whl", hash = "sha256:800d60565aec896f25bc3cfa56d2277d52d5182af08162f7954f938c06dc4ee3"}, | ||||
|     {file = "pydantic_core-2.18.2-cp311-none-win_arm64.whl", hash = "sha256:1404c69d6a676245199767ba4f633cce5f4ad4181f9d0ccb0577e1f66cf4c46d"}, | ||||
|     {file = "pydantic_core-2.18.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:fb2bd7be70c0fe4dfd32c951bc813d9fe6ebcbfdd15a07527796c8204bd36242"}, | ||||
|     {file = "pydantic_core-2.18.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6132dd3bd52838acddca05a72aafb6eab6536aa145e923bb50f45e78b7251043"}, | ||||
|     {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d904828195733c183d20a54230c0df0eb46ec746ea1a666730787353e87182"}, | ||||
|     {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c9bd70772c720142be1020eac55f8143a34ec9f82d75a8e7a07852023e46617f"}, | ||||
|     {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b8ed04b3582771764538f7ee7001b02e1170223cf9b75dff0bc698fadb00cf3"}, | ||||
|     {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e6dac87ddb34aaec85f873d737e9d06a3555a1cc1a8e0c44b7f8d5daeb89d86f"}, | ||||
|     {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ca4ae5a27ad7a4ee5170aebce1574b375de390bc01284f87b18d43a3984df72"}, | ||||
|     {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:886eec03591b7cf058467a70a87733b35f44707bd86cf64a615584fd72488b7c"}, | ||||
|     {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ca7b0c1f1c983e064caa85f3792dd2fe3526b3505378874afa84baf662e12241"}, | ||||
|     {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b4356d3538c3649337df4074e81b85f0616b79731fe22dd11b99499b2ebbdf3"}, | ||||
|     {file = "pydantic_core-2.18.2-cp312-none-win32.whl", hash = "sha256:8b172601454f2d7701121bbec3425dd71efcb787a027edf49724c9cefc14c038"}, | ||||
|     {file = "pydantic_core-2.18.2-cp312-none-win_amd64.whl", hash = "sha256:b1bd7e47b1558ea872bd16c8502c414f9e90dcf12f1395129d7bb42a09a95438"}, | ||||
|     {file = "pydantic_core-2.18.2-cp312-none-win_arm64.whl", hash = "sha256:98758d627ff397e752bc339272c14c98199c613f922d4a384ddc07526c86a2ec"}, | ||||
|     {file = "pydantic_core-2.18.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:9fdad8e35f278b2c3eb77cbdc5c0a49dada440657bf738d6905ce106dc1de439"}, | ||||
|     {file = "pydantic_core-2.18.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1d90c3265ae107f91a4f279f4d6f6f1d4907ac76c6868b27dc7fb33688cfb347"}, | ||||
|     {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:390193c770399861d8df9670fb0d1874f330c79caaca4642332df7c682bf6b91"}, | ||||
|     {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:82d5d4d78e4448683cb467897fe24e2b74bb7b973a541ea1dcfec1d3cbce39fb"}, | ||||
|     {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4774f3184d2ef3e14e8693194f661dea5a4d6ca4e3dc8e39786d33a94865cefd"}, | ||||
|     {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d4d938ec0adf5167cb335acb25a4ee69a8107e4984f8fbd2e897021d9e4ca21b"}, | ||||
|     {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0e8b1be28239fc64a88a8189d1df7fad8be8c1ae47fcc33e43d4be15f99cc70"}, | ||||
|     {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:868649da93e5a3d5eacc2b5b3b9235c98ccdbfd443832f31e075f54419e1b96b"}, | ||||
|     {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:78363590ef93d5d226ba21a90a03ea89a20738ee5b7da83d771d283fd8a56761"}, | ||||
|     {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:852e966fbd035a6468fc0a3496589b45e2208ec7ca95c26470a54daed82a0788"}, | ||||
|     {file = "pydantic_core-2.18.2-cp38-none-win32.whl", hash = "sha256:6a46e22a707e7ad4484ac9ee9f290f9d501df45954184e23fc29408dfad61350"}, | ||||
|     {file = "pydantic_core-2.18.2-cp38-none-win_amd64.whl", hash = "sha256:d91cb5ea8b11607cc757675051f61b3d93f15eca3cefb3e6c704a5d6e8440f4e"}, | ||||
|     {file = "pydantic_core-2.18.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:ae0a8a797a5e56c053610fa7be147993fe50960fa43609ff2a9552b0e07013e8"}, | ||||
|     {file = "pydantic_core-2.18.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:042473b6280246b1dbf530559246f6842b56119c2926d1e52b631bdc46075f2a"}, | ||||
|     {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a388a77e629b9ec814c1b1e6b3b595fe521d2cdc625fcca26fbc2d44c816804"}, | ||||
|     {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25add29b8f3b233ae90ccef2d902d0ae0432eb0d45370fe315d1a5cf231004b"}, | ||||
|     {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f459a5ce8434614dfd39bbebf1041952ae01da6bed9855008cb33b875cb024c0"}, | ||||
|     {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eff2de745698eb46eeb51193a9f41d67d834d50e424aef27df2fcdee1b153845"}, | ||||
|     {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8309f67285bdfe65c372ea3722b7a5642680f3dba538566340a9d36e920b5f0"}, | ||||
|     {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f93a8a2e3938ff656a7c1bc57193b1319960ac015b6e87d76c76bf14fe0244b4"}, | ||||
|     {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:22057013c8c1e272eb8d0eebc796701167d8377441ec894a8fed1af64a0bf399"}, | ||||
|     {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cfeecd1ac6cc1fb2692c3d5110781c965aabd4ec5d32799773ca7b1456ac636b"}, | ||||
|     {file = "pydantic_core-2.18.2-cp39-none-win32.whl", hash = "sha256:0d69b4c2f6bb3e130dba60d34c0845ba31b69babdd3f78f7c0c8fae5021a253e"}, | ||||
|     {file = "pydantic_core-2.18.2-cp39-none-win_amd64.whl", hash = "sha256:d9319e499827271b09b4e411905b24a426b8fb69464dfa1696258f53a3334641"}, | ||||
|     {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a1874c6dd4113308bd0eb568418e6114b252afe44319ead2b4081e9b9521fe75"}, | ||||
|     {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:ccdd111c03bfd3666bd2472b674c6899550e09e9f298954cfc896ab92b5b0e6d"}, | ||||
|     {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e18609ceaa6eed63753037fc06ebb16041d17d28199ae5aba0052c51449650a9"}, | ||||
|     {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e5c584d357c4e2baf0ff7baf44f4994be121e16a2c88918a5817331fc7599d7"}, | ||||
|     {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43f0f463cf89ace478de71a318b1b4f05ebc456a9b9300d027b4b57c1a2064fb"}, | ||||
|     {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e1b395e58b10b73b07b7cf740d728dd4ff9365ac46c18751bf8b3d8cca8f625a"}, | ||||
|     {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0098300eebb1c837271d3d1a2cd2911e7c11b396eac9661655ee524a7f10587b"}, | ||||
|     {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:36789b70d613fbac0a25bb07ab3d9dba4d2e38af609c020cf4d888d165ee0bf3"}, | ||||
|     {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3f9a801e7c8f1ef8718da265bba008fa121243dfe37c1cea17840b0944dfd72c"}, | ||||
|     {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:3a6515ebc6e69d85502b4951d89131ca4e036078ea35533bb76327f8424531ce"}, | ||||
|     {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20aca1e2298c56ececfd8ed159ae4dde2df0781988c97ef77d5c16ff4bd5b400"}, | ||||
|     {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:223ee893d77a310a0391dca6df00f70bbc2f36a71a895cecd9a0e762dc37b349"}, | ||||
|     {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2334ce8c673ee93a1d6a65bd90327588387ba073c17e61bf19b4fd97d688d63c"}, | ||||
|     {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:cbca948f2d14b09d20268cda7b0367723d79063f26c4ffc523af9042cad95592"}, | ||||
|     {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b3ef08e20ec49e02d5c6717a91bb5af9b20f1805583cb0adfe9ba2c6b505b5ae"}, | ||||
|     {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c6fdc8627910eed0c01aed6a390a252fe3ea6d472ee70fdde56273f198938374"}, | ||||
|     {file = "pydantic_core-2.18.2.tar.gz", hash = "sha256:2e29d20810dfc3043ee13ac7d9e25105799817683348823f305ab3f349b9386e"}, | ||||
| ] | ||||
|  | ||||
| [package.dependencies] | ||||
| typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" | ||||
|  | ||||
| [[package]] | ||||
| name = "pyreadline3" | ||||
| version = "3.4.1" | ||||
| description = "A python implementation of GNU readline." | ||||
| optional = false | ||||
| python-versions = "*" | ||||
| files = [ | ||||
|     {file = "pyreadline3-3.4.1-py3-none-any.whl", hash = "sha256:b0efb6516fd4fb07b45949053826a62fa4cb353db5be2bbb4a7aa1fdd1e345fb"}, | ||||
|     {file = "pyreadline3-3.4.1.tar.gz", hash = "sha256:6f3d1f7b8a31ba32b73917cefc1f28cc660562f39aea8646d30bd6eff21f7bae"}, | ||||
| ] | ||||
|  | ||||
| [[package]] | ||||
| name = "pytest" | ||||
| version = "8.2.0" | ||||
| description = "pytest: simple powerful testing with Python" | ||||
| optional = false | ||||
| python-versions = ">=3.8" | ||||
| files = [ | ||||
|     {file = "pytest-8.2.0-py3-none-any.whl", hash = "sha256:1733f0620f6cda4095bbf0d9ff8022486e91892245bb9e7d5542c018f612f233"}, | ||||
|     {file = "pytest-8.2.0.tar.gz", hash = "sha256:d507d4482197eac0ba2bae2e9babf0672eb333017bcedaa5fb1a3d42c1174b3f"}, | ||||
| ] | ||||
|  | ||||
| [package.dependencies] | ||||
| colorama = {version = "*", markers = "sys_platform == \"win32\""} | ||||
| iniconfig = "*" | ||||
| packaging = "*" | ||||
| pluggy = ">=1.5,<2.0" | ||||
|  | ||||
| [package.extras] | ||||
| dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] | ||||
|  | ||||
| [[package]] | ||||
| name = "sniffio" | ||||
| version = "1.3.1" | ||||
| description = "Sniff out which async library your code is running under" | ||||
| optional = false | ||||
| python-versions = ">=3.7" | ||||
| files = [ | ||||
|     {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, | ||||
|     {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, | ||||
| ] | ||||
|  | ||||
| [[package]] | ||||
| name = "tqdm" | ||||
| version = "4.66.4" | ||||
| description = "Fast, Extensible Progress Meter" | ||||
| optional = false | ||||
| python-versions = ">=3.7" | ||||
| files = [ | ||||
|     {file = "tqdm-4.66.4-py3-none-any.whl", hash = "sha256:b75ca56b413b030bc3f00af51fd2c1a1a5eac6a0c1cca83cbb37a5c52abce644"}, | ||||
|     {file = "tqdm-4.66.4.tar.gz", hash = "sha256:e4d936c9de8727928f3be6079590e97d9abfe8d39a590be678eb5919ffc186bb"}, | ||||
| ] | ||||
|  | ||||
| [package.dependencies] | ||||
| colorama = {version = "*", markers = "platform_system == \"Windows\""} | ||||
|  | ||||
| [package.extras] | ||||
| dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"] | ||||
| notebook = ["ipywidgets (>=6)"] | ||||
| slack = ["slack-sdk"] | ||||
| telegram = ["requests"] | ||||
|  | ||||
| [[package]] | ||||
| name = "typing-extensions" | ||||
| version = "4.11.0" | ||||
| description = "Backported and Experimental Type Hints for Python 3.8+" | ||||
| optional = false | ||||
| python-versions = ">=3.8" | ||||
| files = [ | ||||
|     {file = "typing_extensions-4.11.0-py3-none-any.whl", hash = "sha256:c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a"}, | ||||
|     {file = "typing_extensions-4.11.0.tar.gz", hash = "sha256:83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0"}, | ||||
| ] | ||||
|  | ||||
| [[package]] | ||||
| name = "werkzeug" | ||||
| version = "3.0.3" | ||||
| description = "The comprehensive WSGI web application library." | ||||
| optional = false | ||||
| python-versions = ">=3.8" | ||||
| files = [ | ||||
|     {file = "werkzeug-3.0.3-py3-none-any.whl", hash = "sha256:fc9645dc43e03e4d630d23143a04a7f947a9a3b5727cd535fdfe155a17cc48c8"}, | ||||
|     {file = "werkzeug-3.0.3.tar.gz", hash = "sha256:097e5bfda9f0aba8da6b8545146def481d06aa7d3266e7448e2cccf67dd8bd18"}, | ||||
| ] | ||||
|  | ||||
| [package.dependencies] | ||||
| MarkupSafe = ">=2.1.1" | ||||
|  | ||||
| [package.extras] | ||||
| watchdog = ["watchdog (>=2.3)"] | ||||
|  | ||||
| [metadata] | ||||
| lock-version = "2.0" | ||||
| python-versions = "^3.12" | ||||
| content-hash = "b899d8f6658948d09567475bba732804cd1cdbbe5ee46a57c15ac8d7d7654a23" | ||||
							
								
								
									
										28
									
								
								ell/pyproject.toml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										28
									
								
								ell/pyproject.toml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,28 @@ | ||||
| [tool.poetry] | ||||
| name = "ell" | ||||
| version = "0.0.1" | ||||
| description = "ℓ is an language model programming library." | ||||
| authors = ["William Guss <carlostnb@gmail.com>"] | ||||
| license = "Modified BSD License" | ||||
|  | ||||
| packages = [ | ||||
|     { include = "ell", from = "src" } | ||||
| ] | ||||
|  | ||||
| [tool.poetry.dependencies] | ||||
| python = "^3.12" | ||||
| numpy = "^1.26.4" | ||||
| openai = "^1.28.0" | ||||
| coloredlogs = "^15.0.1" | ||||
| colorama = "^0.4.6" | ||||
| flask = "^3.0.3" | ||||
| flask-cors = "^4.0.1" | ||||
|  | ||||
|  | ||||
|  | ||||
| [tool.poetry.dev-dependencies] | ||||
| pytest = "^8.2.0" | ||||
|  | ||||
| [build-system] | ||||
| requires = ["poetry-core>=1.0.0"] | ||||
| build-backend = "poetry.core.masonry.api" | ||||
							
								
								
									
										48
									
								
								ell/src/ell/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										48
									
								
								ell/src/ell/__init__.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,48 @@ | ||||
| from ell.decorators import lm | ||||
| from ell.types import Message | ||||
|  | ||||
| # registers all of the mdoels. | ||||
| import ell.models | ||||
| from ell.configurator import config | ||||
|  | ||||
|  | ||||
| def system(content: str) -> Message: | ||||
|     """ | ||||
|     Create a system message with the given content. | ||||
|  | ||||
|     Args: | ||||
|     content (str): The content of the system message. | ||||
|  | ||||
|     Returns: | ||||
|     Message: A Message object with role set to 'system' and the provided content. | ||||
|     """ | ||||
|     return Message(role="system", content=content) | ||||
|  | ||||
|  | ||||
| def user(content: str) -> Message: | ||||
|     """ | ||||
|     Create a user message with the given content. | ||||
|  | ||||
|     Args: | ||||
|     content (str): The content of the user message. | ||||
|  | ||||
|     Returns: | ||||
|     Message: A Message object with role set to 'user' and the provided content. | ||||
|     """ | ||||
|     return Message(role="user", content=content) | ||||
|  | ||||
|  | ||||
| def assistant(content: str) -> Message: | ||||
|     """ | ||||
|     Create an assistant message with the given content. | ||||
|  | ||||
|     Args: | ||||
|     content (str): The content of the assistant message. | ||||
|  | ||||
|     Returns: | ||||
|     Message: A Message object with role set to 'assistant' and the provided content. | ||||
|     """ | ||||
|     return Message(role="assistant", content=content) | ||||
|  | ||||
|  | ||||
| __all__ = ["lm", "system", "user", "assistant", "config"] | ||||
							
								
								
									
										64
									
								
								ell/src/ell/configurator.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										64
									
								
								ell/src/ell/configurator.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,64 @@ | ||||
| from typing import Dict, List, Optional, Union, Set | ||||
| from dataclasses import dataclass, field | ||||
| import openai | ||||
| import logging | ||||
| from contextlib import contextmanager | ||||
| import threading | ||||
|  | ||||
| logger = logging.getLogger(__name__) | ||||
|  | ||||
| @dataclass | ||||
| class Config: | ||||
|     model_registry: Dict[str, openai.Client] = field(default_factory=dict) | ||||
|     verbose: bool = False | ||||
|     wrapped_logging: bool = True | ||||
|     override_wrapped_logging_width: Optional[int] = None | ||||
|     serializers: Set["Serializer"] = field(default_factory=set) | ||||
|  | ||||
|  | ||||
|     def __post_init__(self): | ||||
|         self._lock = threading.Lock() | ||||
|         self._local = threading.local() | ||||
|  | ||||
|     def register_model(self, model_name: str, client: openai.Client) -> None: | ||||
|         with self._lock: | ||||
|             self.model_registry[model_name] = client | ||||
|  | ||||
|     @property  | ||||
|     def has_serializers(self) -> bool: | ||||
|         return len(self.serializers) > 0 | ||||
|  | ||||
|     @contextmanager | ||||
|     def model_registry_override(self, overrides: Dict[str, openai.Client]): | ||||
|         if not hasattr(self._local, 'stack'): | ||||
|             self._local.stack = [] | ||||
|          | ||||
|         with self._lock: | ||||
|             current_registry = self._local.stack[-1] if self._local.stack else self.model_registry | ||||
|             new_registry = current_registry.copy() | ||||
|             new_registry.update(overrides) | ||||
|          | ||||
|         self._local.stack.append(new_registry) | ||||
|         try: | ||||
|             yield | ||||
|         finally: | ||||
|             self._local.stack.pop() | ||||
|  | ||||
|     def get_client_for(self, model_name: str) -> Optional[openai.Client]: | ||||
|         current_registry = self._local.stack[-1] if hasattr(self._local, 'stack') and self._local.stack else self.model_registry | ||||
|         client = current_registry.get(model_name) | ||||
|         if client is None: | ||||
|             logger.warning(f"Model '{model_name}' is not registered. Falling back to OpenAI client from environment variables.") | ||||
|         return client | ||||
|  | ||||
|     def reset(self) -> None: | ||||
|         with self._lock: | ||||
|             self.__init__() | ||||
|             if hasattr(self._local, 'stack'): | ||||
|                 del self._local.stack | ||||
|  | ||||
|     def register_serializer(self, serializer: "Serializer") -> None: | ||||
|         self.serializers.add(serializer) | ||||
|  | ||||
| # Singleton instance | ||||
| config = Config() | ||||
							
								
								
									
										236
									
								
								ell/src/ell/decorators.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										236
									
								
								ell/src/ell/decorators.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,236 @@ | ||||
| """ | ||||
| The core declarative functionality of the ell language model programming library. | ||||
| """ | ||||
|  | ||||
| # This isn't fully accurate because we should enable the user to apply images and other multimodal inputs but we can address this now. | ||||
| from collections import defaultdict | ||||
| from functools import wraps | ||||
| import hashlib | ||||
| import json | ||||
| import time | ||||
| import ell.util.closure | ||||
| import colorama | ||||
| from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union, cast | ||||
| from ell.configurator import  config | ||||
| from ell.lstr import lstr | ||||
| from ell.types import LMP, InvocableLM, LMPParams, Message, MessageOrDict, _lstr_generic | ||||
| from ell.util.verbosity import  model_usage_logger_post_end, model_usage_logger_post_intermediate, model_usage_logger_post_start, model_usage_logger_pre, compute_color | ||||
| import numpy as np | ||||
| import openai | ||||
|  | ||||
|  | ||||
| import logging | ||||
| colorama.Style | ||||
|  | ||||
|  | ||||
| logger = logging.getLogger(__name__) | ||||
|  | ||||
|  | ||||
| DEFAULT_SYSTEM_PROMPT = "You are a helpful AI assistant." | ||||
| DEFAULT_LM_PARAMS: Dict[str, Any] = dict() | ||||
|  | ||||
|  | ||||
| def _get_messages(res: Union[str, list[MessageOrDict]], fn: LMP) -> list[Message]: | ||||
|     """ | ||||
|     Helper function to convert the output of an LMP into a list of Messages. | ||||
|     """ | ||||
|     if isinstance(res, str): | ||||
|         return [ | ||||
|             Message(role="system", content=(fn.__doc__) or DEFAULT_SYSTEM_PROMPT), | ||||
|             Message(role="user", content=res), | ||||
|         ] | ||||
|     else: | ||||
|         assert isinstance( | ||||
|             res, list | ||||
|         ), "Need to pass a list of MessagesOrDict to the language model" | ||||
|         return res | ||||
|  | ||||
|  | ||||
| def _get_lm_kwargs(lm_kwargs: Dict[str, Any], lm_params: LMPParams) -> Dict[str, Any]: | ||||
|     """ | ||||
|     Helper function to combine the default LM parameters with the provided LM parameters and the parameters passed to the LMP. | ||||
|     """ | ||||
|     final_lm_kwargs = dict(**DEFAULT_LM_PARAMS) | ||||
|     final_lm_kwargs.update(**lm_kwargs) | ||||
|     final_lm_kwargs.update(**lm_params) | ||||
|     return final_lm_kwargs | ||||
|  | ||||
|  | ||||
| def _run_lm( | ||||
|     model: str, | ||||
|     messages: list[Message], | ||||
|     lm_kwargs: Dict[str, Any], | ||||
|     client: Optional[openai.Client] = None, | ||||
|     lmp_orginator="NOT IMPLEMENTED", | ||||
|     _logging_color=None, | ||||
| ) -> Union[lstr, Iterable[lstr]]: | ||||
|     """ | ||||
|     Helper function to run the language model with the provided messages and parameters. | ||||
|     """ | ||||
|     # Todo: Decide if the client specified via the context amanger default registry is the shit or if the cliennt specified via lmp invocation args are the hing. | ||||
|     client =   client or config.get_client_for(model) | ||||
|     if client is None: | ||||
|         raise ValueError(f"No client found for model '{model}'. Ensure the model is registered using 'register_model' in 'config.py' or specify a client directly using the 'client' argument in the decorator or function call.") | ||||
|      | ||||
|     model_result = client.chat.completions.create( | ||||
|         model=model, messages=messages, stream=True, **lm_kwargs | ||||
|     ) | ||||
|  | ||||
|      | ||||
|     choices_progress = defaultdict(list) | ||||
|     n = lm_kwargs.get("n", 1) | ||||
|  | ||||
|     if config.verbose: | ||||
|         model_usage_logger_post_start(_logging_color, n) | ||||
|  | ||||
|     with model_usage_logger_post_intermediate(_logging_color, n) as _logger: | ||||
|         for chunk in model_result: | ||||
|             for choice in chunk.choices: | ||||
|                 # print(choice) | ||||
|                 choices_progress[choice.index].append(choice) | ||||
|                 if config.verbose and choice.index == 0: | ||||
|                     _logger(choice.delta.content) | ||||
|  | ||||
|     if config.verbose: | ||||
|         model_usage_logger_post_end() | ||||
|     n_choices = len(choices_progress) | ||||
|  | ||||
|     tracked_results = [ | ||||
|         lstr( | ||||
|             content="".join((choice.delta.content or "" for choice in choice_deltas)), | ||||
|             # logits=( # | ||||
|             #     np.concatenate([np.array( | ||||
|             #         [c.logprob for c in choice.logprobs.content or []] | ||||
|             #     ) for choice in choice_deltas])  # mypy type hinting is dogshit. | ||||
|             # ), | ||||
|             # Todo: Properly implement log probs. | ||||
|             originator=lmp_orginator, | ||||
|         ) | ||||
|         for _, choice_deltas in sorted(choices_progress.items(), key= lambda x: x[0],) | ||||
|     ] | ||||
|  | ||||
|     return tracked_results[0] if n_choices == 1 else tracked_results | ||||
|  | ||||
|  | ||||
|  | ||||
| def lm(model: str, client: Optional[openai.Client] = None, **lm_kwargs): | ||||
|     """ | ||||
|     Defines a basic language model program (a parameterization of an existing foundation model using a particular prompt.) | ||||
|  | ||||
|     This is a decorator that can be applied to any LMP type. | ||||
|     """ | ||||
|     default_client_from_decorator = client  | ||||
|  | ||||
|     def decorator( | ||||
|         fn: LMP, | ||||
|     ) -> InvocableLM: | ||||
|         color = compute_color(fn) | ||||
|  | ||||
|         @wraps(fn) | ||||
|         def wrapper( | ||||
|             *fn_args, | ||||
|             client: Optional[openai.Client] = None, | ||||
|             lm_params: LMPParams = {}, | ||||
|             invocation_kwargs=False, | ||||
|             **fn_kwargs, | ||||
|         ) -> _lstr_generic: | ||||
|             res = fn(*fn_args, **fn_kwargs) | ||||
|              | ||||
|             messages = _get_messages(res, fn) | ||||
|             if config.verbose: model_usage_logger_pre(fn, fn_args, fn_kwargs, "notimplemented", messages, color) | ||||
|             final_lm_kwargs = _get_lm_kwargs(lm_kwargs, lm_params) | ||||
|             _invocation_kwargs = dict(model=model, messages=messages, lm_kwargs=final_lm_kwargs, client=client or default_client_from_decorator, _logging_color=color) | ||||
|             tracked_str = _run_lm(**_invocation_kwargs) | ||||
|             return tracked_str, _invocation_kwargs | ||||
|  | ||||
|         # TODO: # we'll deal with type safety here later | ||||
|         wrapper.__ell_lm_kwargs__ = lm_kwargs | ||||
|         wrapper.__ell_func__ = fn | ||||
|         return track(wrapper) | ||||
|  | ||||
|     return decorator | ||||
|  | ||||
|  | ||||
| def track(fn: Callable) -> Callable: | ||||
|     if hasattr(fn, "__ell_lm_kwargs__"): | ||||
|         func_to_track = fn | ||||
|         lm_kwargs = fn.__ell_lm_kwargs__ | ||||
|         lmp = True | ||||
|     else: | ||||
|         func_to_track = fn | ||||
|         lm_kwargs = None | ||||
|         lmp = False | ||||
|  | ||||
|  | ||||
|     # see if it exists | ||||
|     _name = func_to_track.__qualname__ | ||||
|     _time = time.time() | ||||
|     _has_serialized = False | ||||
|      | ||||
|  | ||||
|     @wraps(fn) | ||||
|     def wrapper(*fn_args, get_invocation=False, **fn_kwargs) -> str: | ||||
|         nonlocal _has_serialized | ||||
|         assert (get_invocation and config.has_serializers) or not get_invocation, "In order to get an invocation, you must have a serializer and get_invocation must be True." | ||||
|         # get the prompt | ||||
|         (result, invocation_kwargs) = ( | ||||
|             (fn(*fn_args, **fn_kwargs), None) | ||||
|             if not lmp | ||||
|             else fn(*fn_args, **fn_kwargs) | ||||
|             ) | ||||
|              | ||||
|              | ||||
|         if config.has_serializers and not _has_serialized: | ||||
|             fn_closure, _uses = ell.util.closure.lexically_closured_source(func_to_track) | ||||
|             fn_hash = func_to_track.__ell_hash__ | ||||
|             # print(fn_hash) | ||||
|              | ||||
|  | ||||
|             for serializer in config.serializers: | ||||
|                 serializer.write_lmp( | ||||
|                     lmp_id=fn_hash, | ||||
|                     name=_name, | ||||
|                     source=fn_closure[0], | ||||
|                     dependencies=fn_closure[1], | ||||
|                     created_at=_time, | ||||
|                     is_lmp=lmp, | ||||
|                     lm_kwargs=( | ||||
|                         json.dumps(lm_kwargs) | ||||
|                         if lm_kwargs | ||||
|                         else None | ||||
|                     ), | ||||
|                     uses=_uses, | ||||
|                 ) | ||||
|             _has_serialized = True | ||||
|  | ||||
|             # Let's add an invocation | ||||
|             invocation_params = dict( | ||||
|                 lmp_id=fn_hash, | ||||
|                 args=json.dumps(fn_args), | ||||
|                 kwargs=json.dumps(fn_kwargs), | ||||
|                 result=json.dumps(result), | ||||
|                 created_at=time.time(), | ||||
|                 invocation_kwargs=invocation_kwargs, | ||||
|             ) | ||||
|              | ||||
|             for serializer in config.serializers: | ||||
|                 serializer.write_invocation(**invocation_params) | ||||
|              | ||||
|             invoc = invocation_params  # For compatibility with existing code | ||||
|  | ||||
|         if get_invocation: | ||||
|             return result, invoc | ||||
|         else: | ||||
|             return result | ||||
|  | ||||
|     fn.__wrapper__ = wrapper | ||||
|     wrapper.__ell_lm_kwargs__ = lm_kwargs | ||||
|     wrapper.__ell_func__ = func_to_track | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
|     return wrapper | ||||
|  | ||||
|  | ||||
|  | ||||
							
								
								
									
										420
									
								
								ell/src/ell/lstr.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										420
									
								
								ell/src/ell/lstr.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,420 @@ | ||||
| """ | ||||
| LM string that supports logits and keeps track of it's originator even after mutation. | ||||
| """ | ||||
|  | ||||
| import numpy as np | ||||
| from typing import ( | ||||
|     Optional, | ||||
|     Set, | ||||
|     SupportsIndex, | ||||
|     Union, | ||||
|     FrozenSet, | ||||
|     Iterable, | ||||
|     List, | ||||
|     Tuple, | ||||
|     Any, | ||||
|     Callable, | ||||
|     override, | ||||
| ) | ||||
|  | ||||
|  | ||||
| class lstr(str): | ||||
|     """ | ||||
|     A string class that supports logits and keeps track of its originator even after mutation. | ||||
|     This class is designed to be used in prompt engineering libraries where it is essential to associate | ||||
|     logits with generated text and track the origin of the text. | ||||
|  | ||||
|     The `lstr` class inherits from the built-in `str` class and adds two additional attributes: `logits` and `originator`. | ||||
|     The `logits` attribute is an optional NumPy array that represents the logits associated with the string. | ||||
|     The `originator` attribute is a frozen set of strings that represents the originator(s) of the string. | ||||
|  | ||||
|     The class provides various methods for manipulating the string, such as concatenation, slicing, splitting, and joining. | ||||
|     These methods ensure that the logits and originator(s) are updated correctly based on the operation performed. | ||||
|  | ||||
|     The `lstr` class is particularly useful in LLM libraries for tracing the flow of prompts through various language model calls. | ||||
|     By tracking the originator of each string, it is possible to visualize how outputs from one language model program influence | ||||
|     the inputs of another, allowing for a detailed analysis of interactions between different large language models. This capability | ||||
|     is crucial for understanding the propagation of prompts in complex LLM workflows and for building visual graphs that depict these interactions. | ||||
|  | ||||
|     It is important to note that any modification to the string (such as concatenation or replacement) will invalidate the associated logits. | ||||
|     This is because the logits are specifically tied to the original string content, and any change would require a new computation of logits. | ||||
|     The logic behind this is detailed elsewhere in this file. | ||||
|  | ||||
|     Example usage: | ||||
|     ``` | ||||
|     # Create an lstr instance with logits and an originator | ||||
|     logits = np.array([1.0, 2.0, 3.0]) | ||||
|     originator = "4e9b7ec9" | ||||
|     lstr_instance = lstr("Hello", logits, originator) | ||||
|  | ||||
|     # Concatenate two lstr instances | ||||
|     lstr_instance2 = lstr("World", None, "7f4d2c3a") | ||||
|     concatenated_lstr = lstr_instance + lstr_instance2 | ||||
|  | ||||
|     # Get the logits and originator of the concatenated lstr | ||||
|     print(concatenated_lstr.logits)  # Output: None | ||||
|     print(concatenated_lstr.originator)  # Output: frozenset({'4e9b7ec9', '7f4d2c3a'}) | ||||
|  | ||||
|     # Split the concatenated lstr into two parts | ||||
|     parts = concatenated_lstr.split() | ||||
|     print(parts)  # Output: [lstr('Hello', None, frozenset({'4e9b7ec9', '7f4d2c3a'})), lstr('World', None, frozenset({'4e9b7ec9', '7f4d2c3a'}))] | ||||
|     ``` | ||||
|     Attributes: | ||||
|         logits (Optional[np.ndarray]): The logits associated with the string. | ||||
|         originator (FrozenSet[str]): A frozen set of strings representing the originator(s) of the string. | ||||
|  | ||||
|     Methods: | ||||
|         __new__: Create a new instance of lstr. | ||||
|         __repr__: Return a string representation of the lstr instance. | ||||
|         __add__: Concatenate this lstr instance with another string or lstr instance. | ||||
|         __mod__: Perform a modulo operation between this lstr instance and another string, lstr, or a tuple of strings and lstrs. | ||||
|         __mul__: Perform a multiplication operation between this lstr instance and an integer or another lstr. | ||||
|         __rmul__: Perform a right multiplication operation between an integer or another lstr and this lstr instance. | ||||
|         __getitem__: Get a slice or index of this lstr instance. | ||||
|         __getattr__: Get an attribute from this lstr instance. | ||||
|         join: Join a sequence of strings or lstr instances into a single lstr instance. | ||||
|         split: Split this lstr instance into a list of lstr instances based on a separator. | ||||
|         rsplit: Split this lstr instance into a list of lstr instances based on a separator, starting from the right. | ||||
|         splitlines: Split this lstr instance into a list of lstr instances based on line breaks. | ||||
|         partition: Partition this lstr instance into three lstr instances based on a separator. | ||||
|         rpartition: Partition this lstr instance into three lstr instances based on a separator, starting from the right. | ||||
|     """ | ||||
|  | ||||
|     def __new__( | ||||
|         cls, | ||||
|         content: str, | ||||
|         logits: Optional[np.ndarray] = None, | ||||
|         originator: Optional[Union[str, FrozenSet[str]]] = None, | ||||
|     ): | ||||
|         """ | ||||
|         Create a new instance of lstr. The `logits` should be a numpy array and `originator` should be a frozen set of strings or a single string. | ||||
|  | ||||
|         Args: | ||||
|         content (str): The string content of the lstr. | ||||
|         logits (np.ndarray, optional): The logits associated with this string. Defaults to None. | ||||
|         originator (Union[str, FrozenSet[str]], optional): The originator(s) of this string. Defaults to None. | ||||
|         """ | ||||
|         instance = super(lstr, cls).__new__(cls, content) | ||||
|         instance._logits = logits | ||||
|         if isinstance(originator, str): | ||||
|             instance._originator = frozenset({originator}) | ||||
|         else: | ||||
|             instance._originator = ( | ||||
|                 frozenset(originator) if originator is not None else frozenset() | ||||
|             ) | ||||
|         return instance | ||||
|  | ||||
|     _logits: Optional[np.ndarray] | ||||
|     _originator: FrozenSet[str] | ||||
|  | ||||
|     @property | ||||
|     def logits(self) -> Optional[np.ndarray]: | ||||
|         """ | ||||
|         Get the logits associated with this lstr instance. | ||||
|  | ||||
|         Returns: | ||||
|             Optional[np.ndarray]: The logits associated with this lstr instance, or None if no logits are available. | ||||
|         """ | ||||
|         return self._logits | ||||
|  | ||||
|     @property | ||||
|     def originator(self) -> FrozenSet[str]: | ||||
|         """ | ||||
|         Get the originator(s) of this lstr instance. | ||||
|  | ||||
|         Returns: | ||||
|             FrozenSet[str]: A frozen set of strings representing the originator(s) of this lstr instance. | ||||
|         """ | ||||
|         return self._originator | ||||
|  | ||||
|     ######################## | ||||
|     ## Overriding methods ## | ||||
|     ######################## | ||||
|     def __repr__(self) -> str: | ||||
|         """ | ||||
|         Return a string representation of this lstr instance. | ||||
|  | ||||
|         Returns: | ||||
|             str: A string representation of this lstr instance, including its content, logits, and originator(s). | ||||
|         """ | ||||
|         return super().__repr__() | ||||
|  | ||||
|     def __add__(self, other: Union[str, "lstr"]) -> "lstr": | ||||
|         """ | ||||
|         Concatenate this lstr instance with another string or lstr instance. | ||||
|  | ||||
|         Args: | ||||
|             other (Union[str, "lstr"]): The string or lstr instance to concatenate with this instance. | ||||
|  | ||||
|         Returns: | ||||
|             lstr: A new lstr instance containing the concatenated content, with the originator(s) updated accordingly. | ||||
|         """ | ||||
|         if type(other) is str: | ||||
|             new_content = super(lstr, self).__add__(other) | ||||
|             return lstr(new_content, None, self._originator) | ||||
|         elif isinstance(other, lstr): | ||||
|             new_content = super(lstr, self).__add__(other) | ||||
|             new_originator = self._originator.union(other._originator) | ||||
|             return lstr(new_content, None, new_originator) | ||||
|         return NotImplemented | ||||
|  | ||||
|     def __mod__( | ||||
|         self, other: Union[str, "lstr", Tuple[Union[str, "lstr"], ...]] | ||||
|     ) -> "lstr": | ||||
|         """ | ||||
|         Perform a modulo operation between this lstr instance and another string, lstr, or a tuple of strings and lstrs, | ||||
|         tracing the operation by logging the operands and the result. | ||||
|  | ||||
|         Args: | ||||
|             other (Union[str, "lstr", Tuple[Union[str, "lstr"], ...]]): The right operand in the modulo operation. | ||||
|  | ||||
|         Returns: | ||||
|             lstr: A new lstr instance containing the result of the modulo operation, with the originator(s) updated accordingly. | ||||
|         """ | ||||
|         # If 'other' is a tuple, we need to handle each element | ||||
|         if isinstance(other, tuple): | ||||
|             result_content = super(lstr, self).__mod__(tuple(str(o) for o in other)) | ||||
|             new_originators = set(self._originator) | ||||
|             for item in other: | ||||
|                 if isinstance(item, lstr): | ||||
|                     new_originators.update(item._originator) | ||||
|             new_originator = frozenset(new_originators) | ||||
|         else: | ||||
|             result_content = super(lstr, self).__mod__(other) | ||||
|             if isinstance(other, lstr): | ||||
|                 new_originator = self._originator.union(other._originator) | ||||
|             else: | ||||
|                 new_originator = self._originator | ||||
|  | ||||
|         return lstr(result_content, None, new_originator) | ||||
|  | ||||
|     def __mul__(self, other: SupportsIndex) -> "lstr": | ||||
|         """ | ||||
|         Perform a multiplication operation between this lstr instance and an integer or another lstr, | ||||
|         tracing the operation by logging the operands and the result. | ||||
|  | ||||
|         Args: | ||||
|             other (Union[SupportsIndex, "lstr"]): The right operand in the multiplication operation. | ||||
|  | ||||
|         Returns: | ||||
|             lstr: A new lstr instance containing the result of the multiplication operation, with the originator(s) updated accordingly. | ||||
|         """ | ||||
|         if isinstance(other, SupportsIndex): | ||||
|             result_content = super(lstr, self).__mul__(other) | ||||
|             new_originator = self._originator | ||||
|         else: | ||||
|             return NotImplemented | ||||
|  | ||||
|         return lstr(result_content, None, new_originator) | ||||
|  | ||||
|     def __rmul__(self, other: SupportsIndex) -> "lstr": | ||||
|         """ | ||||
|         Perform a right multiplication operation between an integer or another lstr and this lstr instance, | ||||
|         tracing the operation by logging the operands and the result. | ||||
|  | ||||
|         Args: | ||||
|             other (Union[SupportsIndex, "lstr"]): The left operand in the multiplication operation. | ||||
|  | ||||
|         Returns: | ||||
|             lstr: A new lstr instance containing the result of the multiplication operation, with the originator(s) updated accordingly. | ||||
|         """ | ||||
|         return self.__mul__(other)  # Multiplication is commutative in this context | ||||
|  | ||||
|     def __getitem__(self, key: Union[SupportsIndex, slice]) -> "lstr": | ||||
|         """ | ||||
|         Get a slice or index of this lstr instance. | ||||
|  | ||||
|         Args: | ||||
|             key (Union[SupportsIndex, slice]): The index or slice to retrieve. | ||||
|  | ||||
|         Returns: | ||||
|             lstr: A new lstr instance containing the sliced or indexed content, with the originator(s) preserved. | ||||
|         """ | ||||
|         result = super(lstr, self).__getitem__(key) | ||||
|         # This is a matter of opinon. I believe that when you Index into a language model output, you or divorcing the lodges of the indexed result from their contacts which produce them. Therefore, it is only reasonable to directly index into the lodges without changing the original context, and so any mutation on the string should invalidate the logits. | ||||
|         # try: | ||||
|         #     logit_subset = self._logits[key] if self._logits else None | ||||
|         # except: | ||||
|         #   logit_subset = None | ||||
|         logit_subset = None | ||||
|         return lstr(result, logit_subset, self._originator) | ||||
|  | ||||
|     def __getattribute__(self, name: str) -> Union[Callable, Any]: | ||||
|         """ | ||||
|         Get an attribute from this lstr instance. | ||||
|  | ||||
|         Args: | ||||
|             name (str): The name of the attribute to retrieve. | ||||
|  | ||||
|         Returns: | ||||
|             Union[Callable, Any]: The requested attribute, which may be a method or a value. | ||||
|         """ | ||||
|         # Get the attribute from the superclass (str) | ||||
|         # First, try to get the attribute from the current class instance | ||||
|  | ||||
|         # Get the attribute using the superclass method | ||||
|         attr = super().__getattribute__(name) | ||||
|  | ||||
|         # Check if the attribute is a callable and not defined in lstr class itself | ||||
|         if callable(attr) and name not in lstr.__dict__: | ||||
|  | ||||
|             def wrapped(*args: Any, **kwargs: Any) -> Any: | ||||
|                 result = attr(*args, **kwargs) | ||||
|                 # If the result is a string, return an lstr instance | ||||
|                 if isinstance(result, str): | ||||
|                     originators = self._originator | ||||
|                     for arg in args: | ||||
|                         if isinstance(arg, lstr): | ||||
|                             originators = originators.union(arg._originator) | ||||
|                     for key, value in kwargs.items(): | ||||
|                         if isinstance(value, lstr): | ||||
|                             originators = originators.union(value._originator) | ||||
|                     return lstr(result, None, originators) | ||||
|  | ||||
|                 return result | ||||
|  | ||||
|             return wrapped | ||||
|         return attr | ||||
|  | ||||
|     @override | ||||
|     def join(self, iterable: Iterable[Union[str, "lstr"]]) -> "lstr": | ||||
|         """ | ||||
|         Join a sequence of strings or lstr instances into a single lstr instance. | ||||
|  | ||||
|         Args: | ||||
|             iterable (Iterable[Union[str, "lstr"]]): The sequence of strings or lstr instances to join. | ||||
|  | ||||
|         Returns: | ||||
|             lstr: A new lstr instance containing the joined content, with the originator(s) updated accordingly. | ||||
|         """ | ||||
|         parts = [str(item) for item in iterable] | ||||
|         new_content = super(lstr, self).join(parts) | ||||
|         new_originator = self._originator | ||||
|         for item in iterable: | ||||
|             if isinstance(item, lstr): | ||||
|                 new_originator = new_originator.union(item._originator) | ||||
|         return lstr(new_content, None, new_originator) | ||||
|  | ||||
|     @override | ||||
|     def split( | ||||
|         self, sep: Optional[Union[str, "lstr"]] = None, maxsplit: SupportsIndex = -1 | ||||
|     ) -> List["lstr"]: | ||||
|         """ | ||||
|         Split this lstr instance into a list of lstr instances based on a separator. | ||||
|  | ||||
|         Args: | ||||
|             sep (Optional[Union[str, "lstr"]], optional): The separator to split on. Defaults to None. | ||||
|             maxsplit (SupportsIndex, optional): The maximum number of splits to perform. Defaults to -1. | ||||
|  | ||||
|         Returns: | ||||
|             List["lstr"]: A list of lstr instances containing the split content, with the originator(s) preserved. | ||||
|         """ | ||||
|         return self._split_helper(super(lstr, self).split, sep, maxsplit) | ||||
|  | ||||
|     @override | ||||
|     def rsplit( | ||||
|         self, sep: Optional[Union[str, "lstr"]] = None, maxsplit: SupportsIndex = -1 | ||||
|     ) -> List["lstr"]: | ||||
|         """ | ||||
|         Split this lstr instance into a list of lstr instances based on a separator, starting from the right. | ||||
|  | ||||
|         Args: | ||||
|             sep (Optional[Union[str, "lstr"]], optional): The separator to split on. Defaults to None. | ||||
|             maxsplit (SupportsIndex, optional): The maximum number of splits to perform. Defaults to -1. | ||||
|  | ||||
|         Returns: | ||||
|             List["lstr"]: A list of lstr instances containing the split content, with the originator(s) preserved. | ||||
|         """ | ||||
|         return self._split_helper(super(lstr, self).rsplit, sep, maxsplit) | ||||
|  | ||||
|     @override | ||||
|     def splitlines(self, keepends: bool = False) -> List["lstr"]: | ||||
|         """ | ||||
|         Split this lstr instance into a list of lstr instances based on line breaks. | ||||
|  | ||||
|         Args: | ||||
|             keepends (bool, optional): Whether to include the line breaks in the resulting lstr instances. Defaults to False. | ||||
|  | ||||
|         Returns: | ||||
|             List["lstr"]: A list of lstr instances containing the split content, with the originator(s) preserved. | ||||
|         """ | ||||
|         return [ | ||||
|             lstr(p, None, self._originator) | ||||
|             for p in super(lstr, self).splitlines(keepends=keepends) | ||||
|         ] | ||||
|  | ||||
|     @override | ||||
|     def partition(self, sep: Union[str, "lstr"]) -> Tuple["lstr", "lstr", "lstr"]: | ||||
|         """ | ||||
|         Partition this lstr instance into three lstr instances based on a separator. | ||||
|  | ||||
|         Args: | ||||
|             sep (Union[str, "lstr"]): The separator to partition on. | ||||
|  | ||||
|         Returns: | ||||
|             Tuple["lstr", "lstr", "lstr"]: A tuple of three lstr instances containing the content before the separator, the separator itself, and the content after the separator, with the originator(s) updated accordingly. | ||||
|         """ | ||||
|         return self._partition_helper(super(lstr, self).partition, sep) | ||||
|  | ||||
|     @override | ||||
|     def rpartition(self, sep: Union[str, "lstr"]) -> Tuple["lstr", "lstr", "lstr"]: | ||||
|         """ | ||||
|         Partition this lstr instance into three lstr instances based on a separator, starting from the right. | ||||
|  | ||||
|         Args: | ||||
|             sep (Union[str, "lstr"]): The separator to partition on. | ||||
|  | ||||
|         Returns: | ||||
|             Tuple["lstr", "lstr", "lstr"]: A tuple of three lstr instances containing the content before the separator, the separator itself, and the content after the separator, with the originator(s) updated accordingly. | ||||
|         """ | ||||
|         return self._partition_helper(super(lstr, self).rpartition, sep) | ||||
|  | ||||
|     def _partition_helper( | ||||
|         self, method, sep: Union[str, "lstr"] | ||||
|     ) -> Tuple["lstr", "lstr", "lstr"]: | ||||
|         """ | ||||
|         Helper method for partitioning this lstr instance based on a separator. | ||||
|  | ||||
|         Args: | ||||
|             method (Callable): The partitioning method to use (either partition or rpartition). | ||||
|             sep (Union[str, "lstr"]): The separator to partition on. | ||||
|  | ||||
|         Returns: | ||||
|             Tuple["lstr", "lstr", "lstr"]: A tuple of three lstr instances containing the content before the separator, the separator itself, and the content after the separator, with the originator(s) updated accordingly. | ||||
|         """ | ||||
|         part1, part2, part3 = method(sep) | ||||
|         new_originator = ( | ||||
|             self._originator | sep._originator | ||||
|             if isinstance(sep, lstr) | ||||
|             else self._originator | ||||
|         ) | ||||
|         return ( | ||||
|             lstr(part1, None, new_originator), | ||||
|             lstr(part2, None, new_originator), | ||||
|             lstr(part3, None, new_originator), | ||||
|         ) | ||||
|  | ||||
|     def _split_helper( | ||||
|         self, | ||||
|         method, | ||||
|         sep: Optional[Union[str, "lstr"]] = None, | ||||
|         maxsplit: SupportsIndex = -1, | ||||
|     ) -> List["lstr"]: | ||||
|         """ | ||||
|         Helper method for splitting this lstr instance based on a separator. | ||||
|  | ||||
|         Args: | ||||
|             method (Callable): The splitting method to use (either split or rsplit). | ||||
|             sep (Optional[Union[str, "lstr"]], optional): The separator to split on. Defaults to None. | ||||
|             maxsplit (SupportsIndex, optional): The maximum number of splits to perform. Defaults to -1. | ||||
|  | ||||
|         Returns: | ||||
|             List["lstr"]: A list of lstr instances containing the split content, with the originator(s) preserved. | ||||
|         """ | ||||
|         originators = ( | ||||
|             self._originator | sep._originator | ||||
|             if isinstance(sep, lstr) | ||||
|             else self._originator | ||||
|         ) | ||||
|         parts = method(sep, maxsplit) | ||||
|         return [lstr(part, None, originators) for part in parts] | ||||
							
								
								
									
										1
									
								
								ell/src/ell/models/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								ell/src/ell/models/__init__.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1 @@ | ||||
| import ell.models.openai | ||||
							
								
								
									
										19
									
								
								ell/src/ell/models/openai.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										19
									
								
								ell/src/ell/models/openai.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,19 @@ | ||||
| from ell.configurator import config | ||||
| import openai | ||||
|  | ||||
| import logging | ||||
|  | ||||
| logger = logging.getLogger(__name__) | ||||
|  | ||||
| def register_openai_models(client : openai.Client): | ||||
|     config.register_model("gpt-4o", client) | ||||
|     config.register_model("gpt-3.5-turbo", client) | ||||
|     config.register_model("gpt-4-turbo", client) | ||||
|  | ||||
|  | ||||
|  | ||||
| try: | ||||
|     default_client = openai.Client() | ||||
|     register_openai_models(default_client) | ||||
| except Exception as e: | ||||
|     logger.info(f"Error registering default OpenAI models: {e}. This is likely because you don't have an OpenAI key set. You need to reregister the models with a new client.") | ||||
							
								
								
									
										38
									
								
								ell/src/ell/serializer.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										38
									
								
								ell/src/ell/serializer.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,38 @@ | ||||
| from abc import ABC, abstractmethod | ||||
| from typing import Any, Optional, Dict, List | ||||
|  | ||||
| class Serializer(ABC): | ||||
|     @abstractmethod | ||||
|     def write_lmp(self, lmp_id: str, name: str, source: str, dependencies: List[str],  | ||||
|                   created_at: float, is_lmp: bool, lm_kwargs: Optional[str],  | ||||
|                   uses: Dict[str, Any]) -> Optional[Any]: | ||||
|         pass | ||||
|  | ||||
|     @abstractmethod | ||||
|     def write_invocation(self, lmp_id: str, args: str, kwargs: str, result: str,  | ||||
|                          created_at: float, invocation_kwargs: Dict[str, Any]) -> Optional[Any]: | ||||
|         pass | ||||
|  | ||||
|     @abstractmethod | ||||
|     def get_lmps(self, filters: Optional[Dict[str, Any]] = None) -> List[Dict[str, Any]]: | ||||
|         pass | ||||
|  | ||||
|     @abstractmethod | ||||
|     def get_invocations(self, lmp_id: str, filters: Optional[Dict[str, Any]] = None) -> List[Dict[str, Any]]: | ||||
|         pass | ||||
|  | ||||
|     @abstractmethod | ||||
|     def search_lmps(self, query: str) -> List[Dict[str, Any]]: | ||||
|         pass | ||||
|  | ||||
|     @abstractmethod | ||||
|     def search_invocations(self, query: str) -> List[Dict[str, Any]]: | ||||
|         pass | ||||
|  | ||||
|     @abstractmethod | ||||
|     def get_lmp_versions(self, lmp_id: str) -> List[Dict[str, Any]]: | ||||
|         pass | ||||
|  | ||||
|     @abstractmethod | ||||
|     def get_latest_lmps(self) -> List[Dict[str, Any]]: | ||||
|         pass | ||||
							
								
								
									
										0
									
								
								ell/src/ell/serializers/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										0
									
								
								ell/src/ell/serializers/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
								
								
									
										192
									
								
								ell/src/ell/serializers/filesystem.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										192
									
								
								ell/src/ell/serializers/filesystem.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,192 @@ | ||||
| import os | ||||
| import json | ||||
| from typing import Any, Optional, Dict, List | ||||
| import ell.serializer | ||||
| import numpy as np | ||||
| import glob | ||||
| from operator import itemgetter | ||||
|  | ||||
| class CustomJSONEncoder(json.JSONEncoder): | ||||
|     def default(self, obj): | ||||
|         if hasattr(obj, "to_json"): | ||||
|             return obj.to_json() | ||||
|         if isinstance(obj, str): | ||||
|             return obj | ||||
|         if isinstance(obj, np.ndarray): | ||||
|             return obj.tolist() | ||||
|  | ||||
|         try: | ||||
|             return super().default(obj) | ||||
|         except TypeError as e: | ||||
|             return str(obj) | ||||
|  | ||||
|  | ||||
| class FilesystemSerializer(ell.serializer.Serializer): | ||||
|     def __init__(self, storage_dir: str, max_file_size: int = 1024 * 1024):  # 1MB default | ||||
|         self.storage_dir = storage_dir | ||||
|         self.max_file_size = max_file_size | ||||
|         os.makedirs(storage_dir, exist_ok=True) | ||||
|         self.open_files = {} | ||||
|  | ||||
|     def write_lmp(self, lmp_id: str, name: str, source: str, dependencies: List[str],  | ||||
|                   created_at: float, is_lmp: bool, lm_kwargs: Optional[str],  | ||||
|                   uses: Dict[str, Any]) -> Optional[Any]: | ||||
|         """ | ||||
|         Write the LMP (Language Model Program) to a JSON file. | ||||
|         """ | ||||
|         file_path = os.path.join(self.storage_dir, 'programs', f"{name}_{lmp_id}.json") | ||||
|         os.makedirs(os.path.dirname(file_path), exist_ok=True) | ||||
|  | ||||
|         lmp_data = { | ||||
|             'lmp_id': lmp_id, | ||||
|             'name': name, | ||||
|             'source': source, | ||||
|             'dependencies': dependencies, | ||||
|             'created_at': created_at, | ||||
|             'is_lmp': is_lmp, | ||||
|             'lm_kwargs': lm_kwargs, | ||||
|             'uses': uses | ||||
|         } | ||||
|  | ||||
|         with open(file_path, 'w') as f: | ||||
|             json.dump(lmp_data, f, cls=CustomJSONEncoder) | ||||
|  | ||||
|         return None | ||||
|  | ||||
|     def write_invocation(self, lmp_id: str, args: str, kwargs: str, result: str,  | ||||
|                          created_at: float, invocation_kwargs: Dict[str, Any]) -> Optional[Any]: | ||||
|         """ | ||||
|         Write an LMP invocation to a JSONL file in a nested folder structure. | ||||
|         """ | ||||
|         dir_path = os.path.join(self.storage_dir, 'invocations', lmp_id[:2], lmp_id[2:4], lmp_id[4:]) | ||||
|         os.makedirs(dir_path, exist_ok=True) | ||||
|  | ||||
|         if lmp_id not in self.open_files: | ||||
|             index = 0 | ||||
|             while True: | ||||
|                 file_path = os.path.join(dir_path, f"invocations.{index}.jsonl") | ||||
|                 if not os.path.exists(file_path) or os.path.getsize(file_path) < self.max_file_size: | ||||
|                     self.open_files[lmp_id] = {'file': open(file_path, 'a'), 'path': file_path} | ||||
|                     break | ||||
|                 index += 1 | ||||
|  | ||||
|         invocation_data = { | ||||
|             'lmp_id': lmp_id, | ||||
|             'args': args, | ||||
|             'kwargs': kwargs, | ||||
|             'result': result, | ||||
|             'invocation_kwargs': invocation_kwargs, | ||||
|             'created_at': created_at | ||||
|         } | ||||
|  | ||||
|         file_info = self.open_files[lmp_id] | ||||
|         file_info['file'].write(json.dumps(invocation_data, cls=CustomJSONEncoder) + '\n') | ||||
|         file_info['file'].flush() | ||||
|  | ||||
|         if os.path.getsize(file_info['path']) >= self.max_file_size: | ||||
|             file_info['file'].close() | ||||
|             del self.open_files[lmp_id] | ||||
|          | ||||
|         return invocation_data | ||||
|  | ||||
|     def get_lmps(self, filters: Optional[Dict[str, Any]] = None) -> List[Dict[str, Any]]: | ||||
|         lmps = [] | ||||
|         for file_path in glob.glob(os.path.join(self.storage_dir, 'programs', '*.json')): | ||||
|             with open(file_path, 'r') as f: | ||||
|                 lmp = json.load(f) | ||||
|                 if filters: | ||||
|                     if all(lmp.get(k) == v for k, v in filters.items()): | ||||
|                         lmps.append(lmp) | ||||
|                 else: | ||||
|                     lmps.append(lmp) | ||||
|         return lmps | ||||
|  | ||||
|     def get_invocations(self, lmp_id: str, filters: Optional[Dict[str, Any]] = None) -> List[Dict[str, Any]]: | ||||
|         invocations = [] | ||||
|         dir_path = os.path.join(self.storage_dir, 'invocations', lmp_id[:2], lmp_id[2:4], lmp_id[4:]) | ||||
|         for file_path in glob.glob(os.path.join(dir_path, 'invocations.*.jsonl')): | ||||
|             with open(file_path, 'r') as f: | ||||
|                 for line in f: | ||||
|                     invocation = json.loads(line) | ||||
|                     if filters: | ||||
|                         if all(invocation.get(k) == v for k, v in filters.items()): | ||||
|                             invocations.append(invocation) | ||||
|                     else: | ||||
|                         invocations.append(invocation) | ||||
|         return invocations | ||||
|  | ||||
|     def get_lmp(self, lmp_id: str) -> Optional[Dict[str, Any]]: | ||||
|         """ | ||||
|         Get a specific LMP by its ID. | ||||
|         """ | ||||
|         for file_path in glob.glob(os.path.join(self.storage_dir, 'programs', '*.json')): | ||||
|             with open(file_path, 'r') as f: | ||||
|                 lmp = json.load(f) | ||||
|                 if lmp.get('lmp_id') == lmp_id: | ||||
|                     return lmp | ||||
|         return None | ||||
|  | ||||
|     def search_lmps(self, query: str) -> List[Dict[str, Any]]: | ||||
|         lmps = [] | ||||
|         for file_path in glob.glob(os.path.join(self.storage_dir, 'programs', '*.json')): | ||||
|             with open(file_path, 'r') as f: | ||||
|                 lmp = json.load(f) | ||||
|                 if query.lower() in json.dumps(lmp).lower(): | ||||
|                     lmps.append(lmp) | ||||
|         return lmps | ||||
|  | ||||
|     def search_invocations(self, query: str) -> List[Dict[str, Any]]: | ||||
|         invocations = [] | ||||
|         for dir_path in glob.glob(os.path.join(self.storage_dir, 'invocations', '*', '*', '*')): | ||||
|             for file_path in glob.glob(os.path.join(dir_path, 'invocations.*.jsonl')): | ||||
|                 with open(file_path, 'r') as f: | ||||
|                     for line in f: | ||||
|                         invocation = json.loads(line) | ||||
|                         if query.lower() in json.dumps(invocation).lower(): | ||||
|                             invocations.append(invocation) | ||||
|         return invocations | ||||
|  | ||||
|     def get_lmp_versions(self, lmp_id: str) -> List[Dict[str, Any]]: | ||||
|         """ | ||||
|         Get all versions of an LMP with the given lmp_id. | ||||
|         """ | ||||
|         target_lmp = self.get_lmp(lmp_id) | ||||
|         if not target_lmp: | ||||
|             return [] | ||||
|  | ||||
|         versions = [] | ||||
|         for file_path in glob.glob(os.path.join(self.storage_dir, 'programs', f"{target_lmp['name']}_*.json")): | ||||
|             with open(file_path, 'r') as f: | ||||
|                 lmp = json.load(f) | ||||
|                 versions.append(lmp) | ||||
|  | ||||
|         # Sort versions by created_at timestamp, newest first | ||||
|         return sorted(versions, key=lambda x: x['created_at'], reverse=True) | ||||
|  | ||||
|     def get_latest_lmps(self) -> List[Dict[str, Any]]: | ||||
|         """ | ||||
|         Get the latest version of each unique LMP. | ||||
|         """ | ||||
|         lmps_by_name = {} | ||||
|         for file_path in glob.glob(os.path.join(self.storage_dir, 'programs', '*.json')): | ||||
|             with open(file_path, 'r') as f: | ||||
|                 lmp = json.load(f) | ||||
|                 name = lmp['name'] | ||||
|                 if name not in lmps_by_name or lmp['created_at'] > lmps_by_name[name]['created_at']: | ||||
|                     lmps_by_name[name] = lmp | ||||
|  | ||||
|         # Return the list of latest LMPs, sorted by name | ||||
|         return sorted(lmps_by_name.values(), key=itemgetter('name')) | ||||
|  | ||||
|     def __del__(self): | ||||
|         """ | ||||
|         Close all open files when the serializer is destroyed. | ||||
|         """ | ||||
|         for file_info in self.open_files.values(): | ||||
|             file_info['file'].close() | ||||
|  | ||||
|     def install(self): | ||||
|         """ | ||||
|         Install the serializer into all invocations of the ell wrapper. | ||||
|         """ | ||||
|         ell.config.register_serializer(self) | ||||
							
								
								
									
										18
									
								
								ell/src/ell/studio/__main__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										18
									
								
								ell/src/ell/studio/__main__.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,18 @@ | ||||
| import os | ||||
| from argparse import ArgumentParser | ||||
| from ell.studio.data_server import app, serializer | ||||
|  | ||||
| def main(): | ||||
|     parser = ArgumentParser(description="ELL Studio Data Server") | ||||
|     parser.add_argument("--storage-dir", default=os.getcwd(), | ||||
|                         help="Directory for filesystem serializer storage (default: current directory)") | ||||
|     args = parser.parse_args() | ||||
|  | ||||
|     # Update the serializer's storage directory | ||||
|     serializer.storage_dir = args.storage_dir | ||||
|  | ||||
|     # Run the Flask app | ||||
|     app.run(debug=True) | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     main() | ||||
							
								
								
									
										56
									
								
								ell/src/ell/studio/data_server.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										56
									
								
								ell/src/ell/studio/data_server.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,56 @@ | ||||
| from flask import Flask, request, jsonify | ||||
| from flask_cors import CORS | ||||
| from ell.serializers.filesystem import FilesystemSerializer | ||||
| import os | ||||
|  | ||||
| app = Flask(__name__) | ||||
| CORS(app, resources={r"/api/*": {"origins": "*"}})  # Enable CORS for all /api routes | ||||
| serializer = FilesystemSerializer(os.environ.get('ELL_STORAGE_DIR', os.getcwd())) | ||||
|  | ||||
| @app.route('/api/lmps', methods=['GET']) | ||||
| def get_lmps(): | ||||
|     filters = request.args.to_dict() | ||||
|     lmps = serializer.get_lmps(filters) | ||||
|     return jsonify(lmps) | ||||
|  | ||||
| @app.route('/api/lmps/search', methods=['GET']) | ||||
| def search_lmps(): | ||||
|     query = request.args.get('q', '') | ||||
|     lmps = serializer.search_lmps(query) | ||||
|     return jsonify(lmps) | ||||
|  | ||||
| @app.route('/api/lmps/<lmp_id>', methods=['GET']) | ||||
| def get_lmp(lmp_id): | ||||
|     lmp = serializer.get_lmp(lmp_id) | ||||
|     if lmp: | ||||
|         return jsonify(lmp) | ||||
|     else: | ||||
|         return jsonify({"error": "LMP not found"}), 404 | ||||
|  | ||||
| @app.route('/api/invocations/<lmp_id>', methods=['GET']) | ||||
| def get_invocations(lmp_id): | ||||
|     filters = request.args.to_dict() | ||||
|     invocations = serializer.get_invocations(lmp_id, filters) | ||||
|     return jsonify(invocations) | ||||
|  | ||||
| @app.route('/api/invocations/search', methods=['GET']) | ||||
| def search_invocations(): | ||||
|     query = request.args.get('q', '') | ||||
|     invocations = serializer.search_invocations(query) | ||||
|     return jsonify(invocations) | ||||
|  | ||||
| @app.route('/api/lmps/<lmp_id>/versions', methods=['GET']) | ||||
| def get_lmp_versions(lmp_id): | ||||
|     versions = serializer.get_lmp_versions(lmp_id) | ||||
|     if versions: | ||||
|         return jsonify(versions) | ||||
|     else: | ||||
|         return jsonify({"error": "LMP versions not found"}), 404 | ||||
|  | ||||
| @app.route('/api/lmps/latest', methods=['GET']) | ||||
| def get_latest_lmps(): | ||||
|     latest_lmps = serializer.get_latest_lmps() | ||||
|     return jsonify(latest_lmps) | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|     app.run(debug=True) | ||||
							
								
								
									
										41
									
								
								ell/src/ell/types.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										41
									
								
								ell/src/ell/types.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,41 @@ | ||||
| # Let's define the core types. | ||||
| from dataclasses import dataclass | ||||
| from typing import Callable, Dict, List, Union | ||||
|  | ||||
| from typing import Any | ||||
| from ell.lstr import lstr | ||||
| from ell.util.dict_sync_meta import DictSyncMeta | ||||
|  | ||||
| _lstr_generic = Union[lstr, str] | ||||
|  | ||||
| OneTurn = Callable[..., _lstr_generic] | ||||
|  | ||||
| # want to enable a use case where the user can actually return a standrd oai chat format | ||||
| # This is a placehodler will likely come back later for this | ||||
| LMPParams = Dict[str, Any] | ||||
|  | ||||
|  | ||||
| @dataclass | ||||
| class Message(dict, metaclass=DictSyncMeta): | ||||
|     role: str | ||||
|     content: _lstr_generic | ||||
|  | ||||
|  | ||||
| # Well this is disappointing, I wanted to effectively type hint by doign that data sync meta, but eh, at elast we can still reference role or content this way. Probably wil lcan the dict sync meta. | ||||
| MessageOrDict = Union[Message, Dict[str, str]] | ||||
|  | ||||
| # Can support iamge prompts later. | ||||
| Chat = List[ | ||||
|     Message | ||||
| ]  # [{"role": "system", "content": "prompt"}, {"role": "user", "content": "message"}] | ||||
|  | ||||
| MultiTurnLMP = Callable[..., Chat] | ||||
| from typing import TypeVar, Any | ||||
|  | ||||
| # This is the specific LMP that must accept history as an argument and can take any additional arguments | ||||
| T = TypeVar("T", bound=Any) | ||||
| ChatLMP = Callable[[Chat, T], Chat] | ||||
|  | ||||
| LMP = Union[OneTurn, MultiTurnLMP, ChatLMP] | ||||
|  | ||||
| InvocableLM = Callable[..., _lstr_generic] | ||||
							
								
								
									
										0
									
								
								ell/src/ell/util/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										0
									
								
								ell/src/ell/util/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
								
								
									
										365
									
								
								ell/src/ell/util/closure.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										365
									
								
								ell/src/ell/util/closure.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,365 @@ | ||||
| """ | ||||
| This should do the following. | ||||
| # prompt_consts.py | ||||
| import math | ||||
| def test(): | ||||
|     return math.sin(10) | ||||
|  | ||||
| # lol3.py | ||||
| import prompt_consts | ||||
|  | ||||
| X = 7 | ||||
| def xD(): | ||||
|     print(X) | ||||
|     return prompt_consts.test() | ||||
|  | ||||
| ### | ||||
| Our goal is to use AST & dill to get a full lexical closured source of xD, with the exception of modules that are stored in site-packages. For example. | ||||
|  | ||||
| lexical_extration(xD) returns | ||||
| #closure.py | ||||
| import math | ||||
| def test(): | ||||
|     return math.sin(10) | ||||
|  | ||||
| X = 7  | ||||
| def xD(): | ||||
|     print(X) | ||||
|     return test() | ||||
|  | ||||
| """ | ||||
| import collections | ||||
| import ast | ||||
| import hashlib | ||||
| import os | ||||
| from typing import Any, Dict, Set, Tuple | ||||
| import dill | ||||
| import inspect | ||||
| import types | ||||
| from dill.source import getsource | ||||
|  | ||||
| import importlib.util | ||||
|  | ||||
| import ast | ||||
|  | ||||
| from collections import deque | ||||
| import inspect | ||||
|  | ||||
| import dill.source | ||||
| import re | ||||
|  | ||||
| DELIM = "$$$$$$$$$$$$$$$$$$$$$$$$$" | ||||
| SEPERATOR = "#------------------------" | ||||
| FORBIDDEN_NAMES = ["ell", "Ell", "lstr"] | ||||
|  | ||||
|  | ||||
| def should_import(module: types.ModuleType): | ||||
|     """ | ||||
|     This function checks if a module should be imported based on its origin. | ||||
|     It returns False if the module is in the local directory or if the module's spec is None. | ||||
|     Otherwise, it returns True. | ||||
|  | ||||
|     Parameters: | ||||
|     module (ModuleType): The module to check. | ||||
|  | ||||
|     Returns: | ||||
|     bool: True if the module should be imported, False otherwise. | ||||
|     """ | ||||
|     # Define the local directory | ||||
|     DIRECTORY_TO_WATCH = os.environ.get("DIRECTORY_TO_WATCH", os.getcwd()) | ||||
|  | ||||
|     # Get the module's spec | ||||
|     spec = importlib.util.find_spec(module.__name__) | ||||
|  | ||||
|     # Return False if the spec is None or if the spec's origin starts with the local directory | ||||
|     if spec is None or spec.origin.startswith(DIRECTORY_TO_WATCH): | ||||
|         return False | ||||
|  | ||||
|     # Otherwise, return True | ||||
|     return True | ||||
|  | ||||
|  | ||||
| import ast | ||||
|  | ||||
|  | ||||
| def get_referenced_names(code: str, module_name: str): | ||||
|     """ | ||||
|     This function takes a block of code and a module name as input. It parses the code into an Abstract Syntax Tree (AST) | ||||
|     and walks through the tree to find all instances where an attribute of the module is referenced in the code. | ||||
|  | ||||
|     Parameters: | ||||
|     code (str): The block of code to be parsed. | ||||
|     module_name (str): The name of the module to look for in the code. | ||||
|  | ||||
|     Returns: | ||||
|     list: A list of all attributes of the module that are referenced in the code. | ||||
|     """ | ||||
|     tree = ast.parse(code) | ||||
|     referenced_names = [] | ||||
|  | ||||
|     for node in ast.walk(tree): | ||||
|         if isinstance(node, ast.Attribute): | ||||
|             if isinstance(node.value, ast.Name) and node.value.id == module_name: | ||||
|                 referenced_names.append(node.attr) | ||||
|  | ||||
|     return referenced_names | ||||
|  | ||||
|  | ||||
| CLOSURE_SOURCE: Dict[str, str] = {} | ||||
|  | ||||
|  | ||||
| def lexical_closure(func: Any, already_closed=None) -> Tuple[str, Tuple[str, str], Set[str]]: | ||||
|     """ | ||||
|     This function takes a function or any callable as input and returns a string representation of its lexical closure. | ||||
|     The lexical closure includes the source code of the function itself, as well as the source code of any global variables, | ||||
|     free variables, and dependencies (other functions or classes) that it references. | ||||
|  | ||||
|     If the input function is a method of a class, this function will also find and include the source code of other methods | ||||
|     in the same class that are referenced by the input function. | ||||
|  | ||||
|     The resulting string can be used to recreate the function in a different context, with all of its dependencies. | ||||
|  | ||||
|     Parameters: | ||||
|     func (Callable): The function or callable whose lexical closure is to be found. | ||||
|  | ||||
|     Returns: | ||||
|     str: A string representation of the lexical closure of the input function. | ||||
|     """ | ||||
|  | ||||
|     already_closed = already_closed or set() | ||||
|     uses = set() | ||||
|  | ||||
|     if hash(func) in already_closed: | ||||
|         return "", ("", ""), {} | ||||
|  | ||||
|  | ||||
|     outer_ell_func = func | ||||
|     is_ell_func = hasattr(outer_ell_func, "__ell_func__") | ||||
|     while hasattr(func, "__ell_func__"): | ||||
|         func = func.__ell_func__ | ||||
|      | ||||
|  | ||||
|  | ||||
|     print(func, outer_ell_func) | ||||
|     print(hasattr(func, "__ell_func__")) | ||||
|     source = getsource(func, lstrip=True) | ||||
|     already_closed.add(hash(func)) | ||||
|     # if func is nested func | ||||
|     # Parse the source code into an AST | ||||
|     # tree = ast.parse(source) | ||||
|     # Find all the global variables and free variables in the function | ||||
|     global_vars = collections.OrderedDict(dill.detect.globalvars(func)) | ||||
|     free_vars = collections.OrderedDict(dill.detect.freevars(func)) | ||||
|  | ||||
|     # If func is a class we actually should check all the methods of the class for globalvars. Malekdiction (MSM) was here. | ||||
|     # Add the default aprameter tpes to depndencies if they are not builtins | ||||
|  | ||||
|     if isinstance(func, type): | ||||
|         # Now we need to get all the global vars in the class | ||||
|         for name, method in collections.OrderedDict(func.__dict__).items(): | ||||
|             if isinstance(method, types.FunctionType) or isinstance( | ||||
|                 method, types.MethodType | ||||
|             ): | ||||
|                 global_vars.update( | ||||
|                     collections.OrderedDict(dill.detect.globalvars(method)) | ||||
|                 ) | ||||
|                 free_vars.update(collections.OrderedDict(dill.detect.freevars(method))) | ||||
|  | ||||
|     # Initialize a list to store the source code of the dependencies | ||||
|     dependencies = [] | ||||
|     modules = deque() | ||||
|     imports = [] | ||||
|  | ||||
|     if isinstance(func, (types.FunctionType, types.MethodType)): | ||||
|         # Get all the the default kwargs | ||||
|         ps = inspect.signature(func).parameters | ||||
|         default_kwargs = collections.OrderedDict( | ||||
|             { | ||||
|                 k: v.default | ||||
|                 for k, v in ps.items() | ||||
|                 if v.default is not inspect.Parameter.empty | ||||
|             } | ||||
|         ) | ||||
|         for name, val in default_kwargs.items(): | ||||
|             try: | ||||
|                 if name not in FORBIDDEN_NAMES: | ||||
|                     dep, _,  dep_uses = lexical_closure( | ||||
|                         type(val), already_closed=already_closed, | ||||
|                     ) | ||||
|                     dependencies.append(dep) | ||||
|                     uses.update(dep_uses) | ||||
|  | ||||
|             except (IOError, TypeError): | ||||
|                 # If it's a builtin we can just ignore it | ||||
|                 pass | ||||
|  | ||||
|     # Iterate over the global variables | ||||
|     for var_name, var_value in global_vars.items(): | ||||
|         # If the variable is a function, get its source code | ||||
|         if isinstance(var_value, (types.FunctionType, type, types.MethodType)): | ||||
|             if var_name not in FORBIDDEN_NAMES: | ||||
|                 ret = lexical_closure( | ||||
|                     var_value, already_closed=already_closed, | ||||
|                 ) | ||||
|                 print(ret) | ||||
|                 dep, _, dep_uses = ret | ||||
|                 dependencies.append(dep) | ||||
|                 # See if the function was called at all in the source code of the func | ||||
|                 # This is wrong because if its a referred call it won't track the dependency; so we actually need to trace all dependencies that are not ell funcs to see if they call it as well. | ||||
|                 if is_function_called(var_name, source): | ||||
|                     uses.update(dep_uses) | ||||
|  | ||||
|         elif isinstance(var_value, types.ModuleType): | ||||
|             if should_import(var_value): | ||||
|                 imports += [dill.source.getimport(var_value, alias=var_name)] | ||||
|  | ||||
|             else: | ||||
|                 # Now we need to find all the variables in this module that were referenced | ||||
|                 modules.append((var_name, var_value)) | ||||
|         elif isinstance(var_value, types.BuiltinFunctionType): | ||||
|             # we need to get an import for it | ||||
|  | ||||
|             imports += [dill.source.getimport(var_value, alias=var_name)] | ||||
|  | ||||
|         else: | ||||
|             # FIXME: Doesn't work wit binary values. But does work with | ||||
|             # the actual current value of a fn so if a global changes a bunch duringexecution, | ||||
|             # Will have the repr value of the global at that time | ||||
|             # Ideally everything is static but this is indeed fucked :); | ||||
|             # That is this is nto a reserializeable representation of the prompt | ||||
|             # and we cannot use this in a produciton library. | ||||
|             dependencies.append(f"{var_name} = {repr(var_value)}") | ||||
|  | ||||
|     # We probably need to resovle things with topological sort & turn stuff into a dag but for now we can just do this | ||||
|  | ||||
|     cur_src = ( | ||||
|         DELIM | ||||
|         + "\n" | ||||
|         + f"\n{DELIM}\n".join(imports + dependencies) | ||||
|         + "\n" | ||||
|         + DELIM | ||||
|         + "\n" | ||||
|         + source | ||||
|         + "\n" | ||||
|         + DELIM | ||||
|         + "\n" | ||||
|     ) | ||||
|     reverse_module_src = deque() | ||||
|     while len(modules) > 0: | ||||
|         mname, mval = modules.popleft() | ||||
|         mdeps = [] | ||||
|         attrs_to_extract = get_referenced_names(cur_src.replace(DELIM, ""), mname) | ||||
|         for attr in attrs_to_extract: | ||||
|             val = getattr(mval, attr) | ||||
|             if isinstance(val, (types.FunctionType, type, types.MethodType)): | ||||
|                 dep, _,  dep_uses = lexical_closure( | ||||
|                     val, already_closed=already_closed | ||||
|                 ) | ||||
|                 mdeps.append(dep) | ||||
|                 uses.update(dep_uses) | ||||
|             elif isinstance(val, types.ModuleType): | ||||
|                 modules.append((attr, val)) | ||||
|             else: | ||||
|                 # If its another module we need to add it to the list of modules | ||||
|                 mdeps.append(f"{attr} = {repr(val)}") | ||||
|  | ||||
|         mdeps.insert(0, f"# Extracted from module: {mname}") | ||||
|  | ||||
|         # Now let's dereference all the module names in our cur_src | ||||
|         for attr in attrs_to_extract: | ||||
|             # Go throught hte dependencies and replace all the module names with the attr | ||||
|             source = source.replace(f"{mname}.{attr}", attr) | ||||
|             dependencies = [ | ||||
|                 dep.replace(f"{mname}.{attr}", attr) for dep in dependencies | ||||
|             ] | ||||
|         # Now add all the module dependencies to the top of the list | ||||
|         reverse_module_src.appendleft("\n".join(mdeps)) | ||||
|  | ||||
|     # Now we need to add the module dependencies to the top of the source | ||||
|     # Sort the dependencies | ||||
|     dependencies = sorted(dependencies) | ||||
|     imports = sorted(imports) | ||||
|     reverse_module_src = sorted(reverse_module_src) | ||||
|     seperated_dependencies = ( | ||||
|         imports | ||||
|         + list(reverse_module_src) | ||||
|         + dependencies | ||||
|         + [source] | ||||
|     ) | ||||
|     # Remove duplicates and preserve order | ||||
|     seperated_dependencies = list(dict.fromkeys(seperated_dependencies)) | ||||
|  | ||||
|     dirty_src = DELIM + "\n" + f"\n{DELIM}\n".join(seperated_dependencies) + "\n" + DELIM + "\n"  | ||||
|     dirty_src_without_func = DELIM + "\n" + f"\n{DELIM}\n".join(seperated_dependencies[:-1]) + "\n" + DELIM + "\n" | ||||
|  | ||||
|     CLOSURE_SOURCE[hash(func)] = dirty_src  | ||||
|  | ||||
|     dsrc = _clean_src(dirty_src_without_func) | ||||
|     fn_hash = hashlib.md5( | ||||
|             "\n".join((source, dsrc, func.__qualname__)).encode() | ||||
|         ).hexdigest() | ||||
|      | ||||
|     if hasattr(outer_ell_func, "__ell_func__"): | ||||
|         outer_ell_func.__ell_closure__ = (source, dsrc) | ||||
|         outer_ell_func.__ell_hash__ = fn_hash | ||||
|         outer_ell_func.__ell_uses__ = uses | ||||
|  | ||||
|     return (dirty_src, (source, dsrc), ({fn_hash} if hasattr(outer_ell_func, "__ell_func__") else uses)) | ||||
|  | ||||
| def lexically_closured_source(func): | ||||
|     _, fnclosure, uses = lexical_closure(func) | ||||
|     return fnclosure, uses | ||||
|  | ||||
|  | ||||
| import ast | ||||
|  | ||||
| def _clean_src(dirty_src): | ||||
|  | ||||
|     # Now remove all duplicates and preserve order | ||||
|     split_by_setion = filter(lambda x: len(x.strip()) > 0, dirty_src.split(DELIM)) | ||||
|  | ||||
|     # Now we need to remove all the duplicates | ||||
|     split_by_setion = list(dict.fromkeys(split_by_setion)) | ||||
|  | ||||
|     # Now we need to concat all together | ||||
|     all_imports = [] | ||||
|     final_src = "\n".join(split_by_setion) | ||||
|     out_final_src = final_src[:] | ||||
|     for line in final_src.split("\n"): | ||||
|         if line.startswith("import") or line.startswith("from"): | ||||
|             all_imports.append(line) | ||||
|             out_final_src = out_final_src.replace(line, "") | ||||
|  | ||||
|     all_imports = "\n".join(sorted(all_imports)) | ||||
|     final_src = all_imports + "\n" + out_final_src | ||||
|  | ||||
|     # now replace all "\n\n\n" or longer with "\n\n" | ||||
|     final_src = re.sub(r"\n{3,}", "\n\n", final_src) | ||||
|  | ||||
|     return final_src | ||||
|  | ||||
|  | ||||
| def is_function_called(func_name, source_code): | ||||
|     """ | ||||
|     Check if a function is called in the given source code. | ||||
|  | ||||
|     Parameters: | ||||
|     func_name (str): The name of the function to check. | ||||
|     source_code (str): The source code to check. | ||||
|  | ||||
|     Returns: | ||||
|     bool: True if the function is called, False otherwise. | ||||
|     """ | ||||
|     # Parse the source code into an AST | ||||
|     tree = ast.parse(source_code) | ||||
|  | ||||
|     # Walk through all the nodes in the AST | ||||
|     for node in ast.walk(tree): | ||||
|         # If the node is a function call | ||||
|         if isinstance(node, ast.Call): | ||||
|             # If the function being called is the function we're looking for | ||||
|             if isinstance(node.func, ast.Name) and node.func.id == func_name: | ||||
|                 return True | ||||
|  | ||||
|     # If we've gone through all the nodes and haven't found a call to the function, it's not called | ||||
|     return False | ||||
							
								
								
									
										38
									
								
								ell/src/ell/util/dict_sync_meta.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										38
									
								
								ell/src/ell/util/dict_sync_meta.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,38 @@ | ||||
| from dataclasses import dataclass, fields | ||||
|  | ||||
|  | ||||
| class DictSyncMeta(type): | ||||
|     def __new__(cls, name, bases, attrs): | ||||
|         new_class = super().__new__(cls, name, bases, attrs) | ||||
|         original_init = getattr(new_class, "__init__") | ||||
|  | ||||
|         def new_init(self, *args, **kwargs): | ||||
|             # Set the fields first before the original __init__ is called | ||||
|             for name, field in attrs.get("__annotations__", {}).items(): | ||||
|                 if name in kwargs: | ||||
|                     setattr(self, name, kwargs[name]) | ||||
|                 else: | ||||
|                     # Set default values for fields not provided in kwargs | ||||
|                     default = field.default if hasattr(field, "default") else None | ||||
|                     setattr(self, name, default) | ||||
|  | ||||
|             # Call the original __init__ | ||||
|             original_init(self, *args, **kwargs) | ||||
|  | ||||
|             # Now sync the fields to the dictionary | ||||
|             for field in fields(self): | ||||
|                 self[field.name] = getattr(self, field.name) | ||||
|  | ||||
|         setattr(new_class, "__init__", new_init) | ||||
|  | ||||
|         original_setattr = getattr(new_class, "__setattr__") | ||||
|  | ||||
|         def new_setattr(self, key, value): | ||||
|             original_setattr(self, key, value) | ||||
|             if ( | ||||
|                 key in self.__annotations__ | ||||
|             ):  # Only sync if it's a defined dataclass field | ||||
|                 self[key] = value | ||||
|  | ||||
|         setattr(new_class, "__setattr__", new_setattr) | ||||
|         return new_class | ||||
							
								
								
									
										173
									
								
								ell/src/ell/util/verbosity.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										173
									
								
								ell/src/ell/util/verbosity.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,173 @@ | ||||
| import sys | ||||
| import hashlib | ||||
| import shutil | ||||
| import textwrap | ||||
| from typing import Dict, Tuple, List, Any, Optional | ||||
| from colorama import Fore, Style, init | ||||
| from ell.types import LMP, Message | ||||
| from ell.configurator import config | ||||
| import logging | ||||
| from functools import lru_cache | ||||
|  | ||||
| # Initialize colorama | ||||
| init(autoreset=True) | ||||
|  | ||||
| # Define colors and styles | ||||
| ELL_COLORS = {k: v for k, v in vars(Fore).items() if k not in ['RESET', 'BLACK', 'LIGHTBLACK_EX']} | ||||
| BOLD = Style.BRIGHT | ||||
| UNDERLINE = '\033[4m' | ||||
| RESET = Style.RESET_ALL | ||||
| SYSTEM_COLOR = Fore.CYAN | ||||
| USER_COLOR = Fore.GREEN | ||||
| ASSISTANT_COLOR = Fore.YELLOW | ||||
| PIPE_COLOR = Fore.BLUE | ||||
|  | ||||
| # Set up logging | ||||
| # logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') | ||||
| logger = logging.getLogger(__name__) | ||||
|  | ||||
| @lru_cache(maxsize=128) | ||||
| def compute_color(invoking_lmp: LMP) -> str: | ||||
|     """Compute and cache a consistent color for a given LMP.""" | ||||
|     name_hash = hashlib.md5(invoking_lmp.__name__.encode()).hexdigest() | ||||
|     color_index = int(name_hash, 16) % len(ELL_COLORS) | ||||
|     return list(ELL_COLORS.values())[color_index] | ||||
|  | ||||
| def format_arg(arg: Any, max_length: int = 8) -> str: | ||||
|     """Format an argument for display with customizable max length.""" | ||||
|     str_arg = str(arg) | ||||
|     return f"{Fore.MAGENTA}{str_arg[:max_length]}..{Style.RESET_ALL}" if len(str_arg) > max_length else f"{Fore.MAGENTA}{str_arg}{Style.RESET_ALL}" | ||||
|  | ||||
| def format_kwarg(key: str, value: Any, max_length: int = 8) -> str: | ||||
|     """Format a keyword argument for display with customizable max length.""" | ||||
|     return f"{Style.DIM}{key}{Style.RESET_ALL}={Fore.MAGENTA}{str(value)[:max_length]}..{Style.RESET_ALL}" | ||||
|  | ||||
| def get_terminal_width() -> int: | ||||
|     """Get the terminal width, defaulting to 80 if it can't be determined.""" | ||||
|     try: | ||||
|         return shutil.get_terminal_size((80, 20)).columns | ||||
|     except Exception: | ||||
|         logger.warning("Unable to determine terminal size. Defaulting to 80 columns.") | ||||
|         return 80 | ||||
|  | ||||
| def wrap_text_with_prefix(text: str, width: int, prefix: str, subsequent_prefix: str, text_color: str) -> List[str]: | ||||
|     """Wrap text while preserving the prefix and color for each line.""" | ||||
|     paragraphs = text.split('\n') | ||||
|     wrapped_paragraphs = [textwrap.wrap(p, width=width - len(prefix)) for p in paragraphs] | ||||
|     wrapped_lines = [line for paragraph in wrapped_paragraphs for line in paragraph] | ||||
|     result = [f"{prefix}{RESET}{wrapped_lines[0]}{RESET}" if wrapped_lines else f"{prefix}{text_color}{RESET}"] | ||||
|     result.extend([f"{subsequent_prefix}{RESET}{line}{RESET}" for line in wrapped_lines[1:]]) | ||||
|     return result | ||||
|  | ||||
| def print_wrapped_messages(messages: List[Message], max_role_length: int, color: str, wrap_width: Optional[int] = None): | ||||
|     """Print wrapped messages with proper indentation, customizable wrap width, and consistent ASCII piping.""" | ||||
|     terminal_width = get_terminal_width() | ||||
|     prefix = f"{PIPE_COLOR}│   " | ||||
|     role_prefix = ' ' * (max_role_length + 2) | ||||
|     subsequent_prefix = f"{PIPE_COLOR}│   {role_prefix}" | ||||
|     wrapping_width = wrap_width or (terminal_width - len(prefix)) | ||||
|  | ||||
|     for i, message in enumerate(messages): | ||||
|         role = message["role"] | ||||
|         text = message["content"] | ||||
|         role_color = SYSTEM_COLOR if role == "system" else USER_COLOR if role == "user" else ASSISTANT_COLOR | ||||
|          | ||||
|         role_line = f"{prefix}{role_color}{role.rjust(max_role_length)}: {RESET}" | ||||
|         wrapped_lines = wrap_text_with_prefix(text, wrapping_width - len(role_prefix), '', subsequent_prefix, role_color) | ||||
|          | ||||
|         print(f"{role_line}{wrapped_lines[0]}") | ||||
|         for line in wrapped_lines[1:]: | ||||
|             print(line) | ||||
|          | ||||
|         if i < len(messages) - 1: | ||||
|             print(f"{PIPE_COLOR}│{RESET}") | ||||
|  | ||||
| def model_usage_logger_pre( | ||||
|     invoking_lmp: LMP, | ||||
|     lmp_args: Tuple, | ||||
|     lmp_kwargs: Dict, | ||||
|     lmp_hash: str, | ||||
|     messages: List[Message], | ||||
|     color: str = "", | ||||
|     arg_max_length: int = 8 | ||||
| ): | ||||
|     """Log model usage before execution with customizable argument display length and ASCII box.""" | ||||
|     color = color or compute_color(invoking_lmp) | ||||
|     formatted_args = [format_arg(arg, arg_max_length) for arg in lmp_args] | ||||
|     formatted_kwargs = [format_kwarg(key, lmp_kwargs[key], arg_max_length) for key in lmp_kwargs] | ||||
|     formatted_params = ', '.join(formatted_args + formatted_kwargs) | ||||
|      | ||||
|     terminal_width = get_terminal_width() | ||||
|      | ||||
|     logger.info(f"Invoking LMP: {invoking_lmp.__name__} (hash: {lmp_hash[:8]})") | ||||
|      | ||||
|     print(f"{PIPE_COLOR}╔{'═' * (terminal_width - 2)}╗{RESET}") | ||||
|     print(f"{PIPE_COLOR}║ {color}{BOLD}{UNDERLINE}{invoking_lmp.__name__}{RESET}{color}({formatted_params}) # ({lmp_hash[:8]}...){RESET}") | ||||
|     print(f"{PIPE_COLOR}╠{'═' * (terminal_width - 2)}╣{RESET}") | ||||
|     print(f"{PIPE_COLOR}║ {BOLD}Prompt:{RESET}") | ||||
|     print(f"{PIPE_COLOR}╟{'─' * (terminal_width - 2)}╢{RESET}") | ||||
|  | ||||
|     max_role_length = max(len("assistant"), max(len(message["role"]) for message in messages)) | ||||
|     print_wrapped_messages(messages, max_role_length, color) | ||||
|  | ||||
| def model_usage_logger_post_start(color: str = "", n: int = 1): | ||||
|     """Log the start of model output with ASCII box.""" | ||||
|     terminal_width = get_terminal_width() | ||||
|     print(f"{PIPE_COLOR}╟{'─' * (terminal_width - 2)}╢{RESET}") | ||||
|     print(f"{PIPE_COLOR}║ {BOLD}Output{f'[0 of {n}]' if n > 1 else ''}:{RESET}") | ||||
|     print(f"{PIPE_COLOR}╟{'─' * (terminal_width - 2)}╢{RESET}") | ||||
|     print(f"{PIPE_COLOR}│   {ASSISTANT_COLOR}assistant: {RESET}", end='') | ||||
|     sys.stdout.flush() | ||||
|  | ||||
| from contextlib import contextmanager | ||||
|  | ||||
| @contextmanager | ||||
| def model_usage_logger_post_intermediate(color: str = "", n: int = 1): | ||||
|     """Context manager to log intermediate model output without wrapping, only indenting if necessary.""" | ||||
|     terminal_width = get_terminal_width() | ||||
|     prefix = f"{PIPE_COLOR}│   " | ||||
|     subsequent_prefix = f"{PIPE_COLOR}│   {' ' * (len('assistant: '))}" | ||||
|     chars_printed = len(subsequent_prefix) | ||||
|  | ||||
|     def log_stream_chunk(stream_chunk: str): | ||||
|         nonlocal chars_printed | ||||
|         if stream_chunk: | ||||
|             lines = stream_chunk.split('\n') | ||||
|             for i, line in enumerate(lines): | ||||
|                 if chars_printed + len(line) > terminal_width - 6: | ||||
|                     print() | ||||
|                     if i == 0: | ||||
|                         print(subsequent_prefix, end='') | ||||
|                         chars_printed = len(prefix) | ||||
|                     else: | ||||
|                         print(subsequent_prefix, end='') | ||||
|                         chars_printed = len(subsequent_prefix) | ||||
|                     print(line.lstrip(), end='') | ||||
|                 else: | ||||
|                     print(line, end='') | ||||
|                 chars_printed += len(line) | ||||
|  | ||||
|                 if i < len(lines) - 1: | ||||
|                     print() | ||||
|                     print(subsequent_prefix, end='') | ||||
|                     chars_printed = len(subsequent_prefix)  # Reset for new line | ||||
|             sys.stdout.flush() | ||||
|  | ||||
|     try: | ||||
|         yield log_stream_chunk | ||||
|     finally: | ||||
|         pass | ||||
| def model_usage_logger_post_end(): | ||||
|     """Log the end of model output with ASCII box closure.""" | ||||
|     terminal_width = get_terminal_width() | ||||
|     print(f"\n{PIPE_COLOR}╚{'═' * (terminal_width - 2)}╝{RESET}") | ||||
|  | ||||
| def set_log_level(level: str): | ||||
|     """Set the logging level.""" | ||||
|     numeric_level = getattr(logging, level.upper(), None) | ||||
|     if not isinstance(numeric_level, int): | ||||
|         raise ValueError(f'Invalid log level: {level}') | ||||
|     logger.setLevel(numeric_level) | ||||
|  | ||||
| # Example usage | ||||
| # set_log_level('DEBUG') | ||||
							
								
								
									
										40
									
								
								ell/tests/test_dict_sync_meta.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										40
									
								
								ell/tests/test_dict_sync_meta.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,40 @@ | ||||
| import pytest | ||||
| from ell.types import Message | ||||
|  | ||||
|  | ||||
| @pytest.fixture | ||||
| def message(): | ||||
|     return Message(role="user", content="Initial content") | ||||
|  | ||||
|  | ||||
| def test_initialization(): | ||||
|     """Test that the dictionary is correctly initialized with dataclass fields.""" | ||||
|     msg = Message(role="admin", content="Hello, world!") | ||||
|     assert msg["role"] == "admin" | ||||
|     assert msg["content"] == "Hello, world!" | ||||
|     assert msg.role == "admin" | ||||
|     assert msg.content == "Hello, world!" | ||||
|  | ||||
|  | ||||
| def test_attribute_modification(message): | ||||
|     """Test that modifications to attributes update the dictionary.""" | ||||
|     # Modify the attributes | ||||
|     message.role = "moderator" | ||||
|     message.content = "Updated content" | ||||
|     # Check dictionary synchronization | ||||
|     assert message["role"] == "moderator" | ||||
|     assert message["content"] == "Updated content" | ||||
|     assert message.role == "moderator" | ||||
|     assert message.content == "Updated content" | ||||
|  | ||||
|  | ||||
| def test_dictionary_modification(message): | ||||
|     """Test that direct dictionary modifications do not break attribute access.""" | ||||
|     # Directly modify the dictionary | ||||
|     message["role"] = "admin" | ||||
|     message["content"] = "New content" | ||||
|     # Check if the attributes are not affected (they should not be, as this is one-way sync) | ||||
|     assert message.role == "user" | ||||
|     assert message.content == "Initial content" | ||||
|     assert message["role"] == "admin" | ||||
|     assert message["content"] == "New content" | ||||
							
								
								
									
										97
									
								
								ell/tests/test_lmp_to_prompt.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										97
									
								
								ell/tests/test_lmp_to_prompt.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,97 @@ | ||||
| """ | ||||
| Pytest for the LM function (mocks the openai api so we can pretend to generate completions through te typoical approach taken in the decorators (and adapters file.)) | ||||
| """ | ||||
|  | ||||
| import pytest | ||||
| from unittest.mock import patch, MagicMock | ||||
| from ell.decorators import DEFAULT_SYSTEM_PROMPT, lm | ||||
| from ell.types import Message, LMPParams | ||||
|  | ||||
|  | ||||
| @lm(model="gpt-4-turbo", provider=None, temperature=0.1, max_tokens=5) | ||||
| def lmp_with_default_system_prompt(*args, **kwargs): | ||||
|     return "Test user prompt" | ||||
|  | ||||
|  | ||||
| @lm(model="gpt-4-turbo", provider=None, temperature=0.1, max_tokens=5) | ||||
| def lmp_with_docstring_system_prompt(*args, **kwargs): | ||||
|     """Test system prompt"""  # I personally prefer this sysntax but it's nto formattable so I'm not sure if it's the best approach. I think we can leave this in as a legacy feature but the default docs should be using the ell.system, ell.user, ... | ||||
|  | ||||
|     return "Test user prompt" | ||||
|  | ||||
|  | ||||
| @lm(model="gpt-4-turbo", provider=None, temperature=0.1, max_tokens=5) | ||||
| def lmp_with_message_fmt(*args, **kwargs): | ||||
|     """Just a normal doc stirng""" | ||||
|  | ||||
|     return [ | ||||
|         Message(role="system", content="Test system prompt from message fmt"), | ||||
|         Message(role="user", content="Test user prompt 3"), | ||||
|     ] | ||||
|  | ||||
|  | ||||
| @pytest.fixture | ||||
| def client_mock(): | ||||
|     with patch("ell.adapter.client.chat.completions.create") as mock: | ||||
|         yield mock | ||||
|  | ||||
|  | ||||
| def test_lm_decorator_with_params(client_mock): | ||||
|     client_mock.return_value = MagicMock( | ||||
|         choices=[MagicMock(message=MagicMock(content="Mocked content"))] | ||||
|     ) | ||||
|     result = lmp_with_default_system_prompt("input", lm_params=dict(temperature=0.5)) | ||||
|     # It should have been called twice | ||||
|     print("client_mock was called with:", client_mock.call_args) | ||||
|     client_mock.assert_called_with( | ||||
|         model="gpt-4-turbo", | ||||
|         messages=[ | ||||
|             Message(role="system", content=DEFAULT_SYSTEM_PROMPT), | ||||
|             Message(role="user", content="Test user prompt"), | ||||
|         ], | ||||
|         temperature=0.5, | ||||
|         max_tokens=5, | ||||
|     ) | ||||
|     assert isinstance(result, str) | ||||
|     assert result == "Mocked content" | ||||
|  | ||||
|  | ||||
| def test_lm_decorator_with_docstring_system_prompt(client_mock): | ||||
|     client_mock.return_value = MagicMock( | ||||
|         choices=[MagicMock(message=MagicMock(content="Mocked content"))] | ||||
|     ) | ||||
|     result = lmp_with_docstring_system_prompt("input", lm_params=dict(temperature=0.5)) | ||||
|     print("client_mock was called with:", client_mock.call_args) | ||||
|     client_mock.assert_called_with( | ||||
|         model="gpt-4-turbo", | ||||
|         messages=[ | ||||
|             Message(role="system", content="Test system prompt"), | ||||
|             Message(role="user", content="Test user prompt"), | ||||
|         ], | ||||
|         temperature=0.5, | ||||
|         max_tokens=5, | ||||
|     ) | ||||
|     assert isinstance(result, str) | ||||
|     assert result == "Mocked content" | ||||
|  | ||||
|     def test_lm_decorator_with_msg_fmt_system_prompt(client_mock): | ||||
|         client_mock.return_value = MagicMock( | ||||
|             choices=[ | ||||
|                 MagicMock(message=MagicMock(content="Mocked content from msg fmt")) | ||||
|             ] | ||||
|         ) | ||||
|         result = lmp_with_default_system_prompt( | ||||
|             "input", lm_params=dict(temperature=0.5), message_format="msg fmt" | ||||
|         ) | ||||
|         print("client_mock was called with:", client_mock.call_args) | ||||
|         client_mock.assert_called_with( | ||||
|             model="gpt-4-turbo", | ||||
|             messages=[ | ||||
|                 Message(role="system", content="Test system prompt from message fmt"), | ||||
|                 Message(role="user", content="Test user prompt 3"),  # come on cursor. | ||||
|             ], | ||||
|             temperature=0.5, | ||||
|             max_tokens=5, | ||||
|         ) | ||||
|         assert isinstance(result, str) | ||||
|         assert result == "Mocked content from msg fmt" | ||||
							
								
								
									
										107
									
								
								ell/tests/test_lstr.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										107
									
								
								ell/tests/test_lstr.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,107 @@ | ||||
| import numpy as np | ||||
| import pytest | ||||
| from ell.lstr import lstr | ||||
|  | ||||
|  | ||||
| class TestLstr: | ||||
|     def test_init(self): | ||||
|         # Test initialization with string content only | ||||
|         s = lstr("hello") | ||||
|         assert str(s) == "hello" | ||||
|         assert s.logits is None | ||||
|         assert s.originator == frozenset() | ||||
|  | ||||
|         # Test initialization with logits and originator | ||||
|         logits = np.array([0.1, 0.2]) | ||||
|         originator = "model1" | ||||
|         s = lstr("world", logits=logits, originator=originator) | ||||
|         assert str(s) == "world" | ||||
|         assert np.array_equal(s.logits, logits) | ||||
|         assert s.originator == frozenset({originator}) | ||||
|  | ||||
|     def test_add(self): | ||||
|         s1 = lstr("hello") | ||||
|         s2 = lstr("world", originator="model2") | ||||
|         assert isinstance(s1 + s2, str) | ||||
|         result = s1 + s2 | ||||
|         assert str(result) == "helloworld" | ||||
|         assert result.logits is None | ||||
|         assert result.originator == frozenset({"model2"}) | ||||
|  | ||||
|     def test_mod(self): | ||||
|         s = lstr("hello %s") | ||||
|         result = s % "world" | ||||
|         assert str(result) == "hello world" | ||||
|         assert result.logits is None | ||||
|         assert result.originator == frozenset() | ||||
|  | ||||
|     def test_mul(self): | ||||
|         s = lstr("ha", originator="model3") | ||||
|         result = s * 3 | ||||
|         assert str(result) == "hahaha" | ||||
|         assert result.logits is None | ||||
|         assert result.originator == frozenset({"model3"}) | ||||
|  | ||||
|     def test_getitem(self): | ||||
|         s = lstr( | ||||
|             "hello", logits=np.array([0.1, 0.2, 0.3, 0.4, 0.5]), originator="model4" | ||||
|         ) | ||||
|         result = s[1:4] | ||||
|         assert str(result) == "ell" | ||||
|         assert result.logits is None | ||||
|         assert result.originator == frozenset({"model4"}) | ||||
|  | ||||
|     def test_upper(self): | ||||
|         # Test upper method without originator and logits | ||||
|         s = lstr("hello") | ||||
|         result = s.upper() | ||||
|         assert str(result) == "HELLO" | ||||
|         assert result.logits is None | ||||
|         assert result.originator == frozenset() | ||||
|  | ||||
|         # Test upper method with originator | ||||
|         s = lstr("world", originator="model11") | ||||
|         result = s.upper() | ||||
|         assert str(result) == "WORLD" | ||||
|         assert result.logits is None | ||||
|         assert result.originator == frozenset({"model11"}) | ||||
|  | ||||
|     def test_join(self): | ||||
|         s = lstr(", ", originator="model5") | ||||
|         parts = [lstr("hello"), lstr("world", originator="model6")] | ||||
|         result = s.join(parts) | ||||
|         assert str(result) == "hello, world" | ||||
|         assert result.logits is None | ||||
|         assert result.originator == frozenset({"model5", "model6"}) | ||||
|  | ||||
|     def test_split(self): | ||||
|         s = lstr("hello world", originator="model7") | ||||
|         parts = s.split() | ||||
|         assert [str(p) for p in parts] == ["hello", "world"] | ||||
|         assert all(p.logits is None for p in parts) | ||||
|         assert all(p.originator == frozenset({"model7"}) for p in parts) | ||||
|  | ||||
|     def test_partition(self): | ||||
|         s = lstr("hello, world", originator="model8") | ||||
|         part1, sep, part2 = s.partition(", ") | ||||
|         assert (str(part1), str(sep), str(part2)) == ("hello", ", ", "world") | ||||
|         assert all(p.logits is None for p in (part1, sep, part2)) | ||||
|         assert all(p.originator == frozenset({"model8"}) for p in (part1, sep, part2)) | ||||
|  | ||||
|     def test_formatting(self): | ||||
|         s = lstr("Hello {}!") | ||||
|         filled = s.format(lstr("world", originator="model9")) | ||||
|         print(filled) | ||||
|         assert str(filled) == "Hello world!" | ||||
|         assert filled.logits is None | ||||
|         assert filled.originator == frozenset({"model9"}) | ||||
|  | ||||
|     def test_repr(self): | ||||
|         s = lstr("test", logits=np.array([1.0]), originator="model10") | ||||
|         assert "test" in repr(s) | ||||
|         assert "model10" in repr(s.originator) | ||||
|  | ||||
|  | ||||
| # Run the tests | ||||
| if __name__ == "__main__": | ||||
|     pytest.main() | ||||
							
								
								
									
										1
									
								
								examples/.gitignore
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								examples/.gitignore
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1 @@ | ||||
| .mid | ||||
							
								
								
									
										
											BIN
										
									
								
								examples/chord_progression.mid
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								examples/chord_progression.mid
									
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										70
									
								
								examples/chord_progression_writer.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										70
									
								
								examples/chord_progression_writer.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,70 @@ | ||||
| from typing import List, Optional | ||||
| import ell | ||||
| from midiutil import MIDIFile | ||||
| import pygame | ||||
| import time | ||||
|  | ||||
| ell.config.verbose = True | ||||
|  | ||||
| CHORD_FORMAT = "| Chord | Chord | ... |" | ||||
|  | ||||
| def get_chord_progression_prompt(genre: Optional[str], key: Optional[str]) -> str: | ||||
|     return ell.user(f"Write a chord progression for a song {'in ' + genre if genre else ''} {'in the key of ' + key if key else ''}.") | ||||
|  | ||||
| @ell.lm(model="gpt-4o", temperature=0.5) | ||||
| def write_a_chord_progression_for_song(genre: Optional[str], key : Optional[str]) -> str: | ||||
|     return [ | ||||
|         ell.system(f"You are a world class music theorist and composer. Your goal is to write chord progressions to songs given parameters. They should be fully featured and compositionally sound. Feel free to use advanced chords of your choosing. Only answer with the chord progression in {CHORD_FORMAT} format. Do not provide any additional text."), | ||||
|         get_chord_progression_prompt(genre, key) | ||||
|     ] | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
| @ell.lm(model="gpt-4o", temperature=0.0) | ||||
| def parse_chords_to_midi(chords : List[str]) -> str: | ||||
|     """You are MusicGPT. You are extremely skilled at all music related tasks.""" | ||||
|  | ||||
|     return f"Convert the following chord symbols to its composite Midi Note representation. Only answer with the Midi Note representation per chord in the format 'Note,Note,Note' seperating chords by newlines.\n{'\n'.join(chord.strip() for chord in chords)}" | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
| def create_midi_file(parsed_chords, output_file="chord_progression.mid"): | ||||
|     midi = MIDIFile(1) | ||||
|     track = 0 | ||||
|     time = 0 | ||||
|     midi.addTrackName(track, time, "Chord Progression") | ||||
|     midi.addTempo(track, time, 120) | ||||
|  | ||||
|     for chord in parsed_chords: | ||||
|         notes = [int(note) for note in chord.split(',')] | ||||
|         for note in notes: | ||||
|             midi.addNote(track, 0, note, time, 1, 100) | ||||
|         time += 1 | ||||
|  | ||||
|     with open(output_file, "wb") as output_file: | ||||
|         midi.writeFile(output_file) | ||||
|  | ||||
| def play_midi_file(file_path): | ||||
|     pygame.mixer.init() | ||||
|     pygame.mixer.music.load(file_path) | ||||
|     pygame.mixer.music.play() | ||||
|     while pygame.mixer.music.get_busy(): | ||||
|         time.sleep(1) | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     progression = write_a_chord_progression_for_song(genre="rnb", key="C") | ||||
|     parsed_chords = parse_chords_to_midi(chord for chord in progression.split("|") if chord.strip()).split('\n') | ||||
|     parsed_chords_with_midi = [ | ||||
|         [int(note) for note in chord.split(',')] for chord in parsed_chords | ||||
|     ] | ||||
|  | ||||
|     midi_file = "chord_progression.mid" | ||||
|     create_midi_file(parsed_chords, midi_file) | ||||
|     print(f"MIDI file created: {midi_file}") | ||||
|      | ||||
|     print("Playing chord progression...") | ||||
|     play_midi_file(midi_file) | ||||
							
								
								
									
										20
									
								
								examples/client_example.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										20
									
								
								examples/client_example.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,20 @@ | ||||
| import ell | ||||
| import os | ||||
| import openai | ||||
|  | ||||
|  | ||||
| ell.config.verbose = True | ||||
|  | ||||
| client = openai.Client(api_key=open(os.path.expanduser("~/.oaikey")).read().strip()) | ||||
|  | ||||
| @ell.lm(model="gpt-4o", temperature=0.1, n=1) | ||||
| def number_to_words(number: int): | ||||
|     """You are an expert in the english language and convert any number to its word representation, for example 123456 would be one hundred and twenty three thousand four hundred fifty six.  | ||||
| You must always return the word representation and nothing else.""" | ||||
|     return f"Convert {number} to its word representation." | ||||
|  | ||||
| # with ell.config.model_registry_override({"gpt-3.5-turbo": client}): | ||||
|  | ||||
|  | ||||
|  | ||||
| (number_to_words(123456, client=client)) | ||||
							
								
								
									
										28
									
								
								examples/hello_world.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										28
									
								
								examples/hello_world.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,28 @@ | ||||
| import ell | ||||
|  | ||||
| ell.config.verbose = True | ||||
|  | ||||
| @ell.lm(model="gpt-3.5-turbo", temperature=1.0, max_tokens=30) | ||||
| def write_a_premise_for_a_story(about: str): | ||||
|     """You're an extrtemely adept story teller. Your goal is to write a short premise to a story about the given topic. The premsie should be no longer than 20 words.""" | ||||
|     return f"Write a short premise for a story about {about}." | ||||
|  | ||||
|  | ||||
|  | ||||
| @ell.lm(model="gpt-4-turbo", temperature=0.1, stop="REASON") | ||||
| def choose_best_story(stories: list[str]): | ||||
|     """You're an extremely adept story teller. Your goal is to choose the best story from the given list of stories. Only output the best story. Format your answer as '<story>\nREASON\n<reason why it's the best>.'""" | ||||
|     return "STORIES\n" + "\n".join(stories) | ||||
|  | ||||
|  | ||||
| @ell.lm(model="gpt-4-turbo", temperature=0.1) | ||||
| def write_a_story(about: str): | ||||
|     """You're an extremely adept story teller. Your goal is to write a short story based on the given premise. The story should be no loinger than 300 words.""" | ||||
|     premise = write_a_premise_for_a_story(about, lm_params=(dict(n=5))) | ||||
|     best_premise = choose_best_story(premise).strip() | ||||
|  | ||||
|     return f"Write a short story based on the premise: {best_premise}." | ||||
|  | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     print(write_a_story("a dog")) | ||||
							
								
								
									
										21
									
								
								examples/lexical_closure_example.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										21
									
								
								examples/lexical_closure_example.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,21 @@ | ||||
| from ell.util.closure import lexically_closured_source | ||||
|  | ||||
| class Test: | ||||
|     pass | ||||
|  | ||||
| X = 6 | ||||
|  | ||||
|  | ||||
| def another_function(): | ||||
|     return X | ||||
|  | ||||
| def test(): | ||||
|     return another_function() | ||||
|  | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     (src, depencied) , etc = (lexically_closured_source(test)) | ||||
|     print("Source:") | ||||
|     print(src) | ||||
|     print("Dependencies:") | ||||
|     print(depencied) | ||||
							
								
								
									
										33
									
								
								examples/serialization_example.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										33
									
								
								examples/serialization_example.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,33 @@ | ||||
| """ | ||||
| An example of how to utilize the serializer to save and load invocations from the model. | ||||
| """ | ||||
|  | ||||
| import ell | ||||
| from ell.serializer import Serializer | ||||
| from ell.serializers.filesystem import FilesystemSerializer | ||||
|  | ||||
|  | ||||
| @ell.lm(model="gpt-4-turbo",  temperature=0.1, max_tokens=5) | ||||
| def some_lmp(*args, **kwargs): | ||||
|     """Just a normal doc string""" | ||||
|     return [ | ||||
|         ell.system("Test system prompt from message fmt"), | ||||
|         ell.user("Test user prompt 3"), | ||||
|     ] | ||||
|  | ||||
|  | ||||
| @ell.lm(model="gpt-4-turbo",  temperature=0.1, max_tokens=5) | ||||
| def some_lmp_2(output_from_some_lmp_1): | ||||
|     """Just a normal doc string""" | ||||
|     return [ | ||||
|         ell.system("Test system prompt from message fmt"), | ||||
|         ell.user("Test user prompt 3"), | ||||
|     ] | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     serializer = FilesystemSerializer("./filesystem_serializer_example") | ||||
|     serializer.install()  # Any invocation hereafter will be saved. | ||||
|      | ||||
|     # Example usage | ||||
|     result = some_lmp() | ||||
|     print(result) | ||||
		Reference in New Issue
	
	Block a user
	 William Guss
					William Guss