Compare commits

..

30 Commits

Author SHA1 Message Date
Kyle Corbitt
213a00a8e6 Fix typescript hints for max_tokens 2023-07-21 12:04:58 -07:00
Kyle Corbitt
af9943eefc Merge pull request #77 from OpenPipe/provider-types
Slightly better typings for ModelProviders
2023-07-21 11:51:25 -07:00
Kyle Corbitt
741128e0f4 Better division of labor between frontend and backend model providers
A bit better thinking on which types go where.
2023-07-21 11:49:35 -07:00
David Corbitt
aff14539d8 Add comment to .env.example 2023-07-21 11:29:21 -07:00
David Corbitt
1af81a50a9 Add REPLICATE_API_TOKEN to .env.example 2023-07-21 11:28:14 -07:00
Kyle Corbitt
7e1fbb3767 Slightly better typings for ModelProviders
Still not great because the `any`s loosen some call sites up more than I'd like, but better than the broken types before.
2023-07-21 06:50:05 -07:00
David Corbitt
a5d972005e Add user's current prompt to prompt derivation 2023-07-21 00:43:39 -07:00
arcticfly
a180b5bef2 Show prompt diff when changing models (#76)
* Make CompareFunctions more configurable

* Change RefinePromptModal styles

* Accept newModel in getModifiedPromptFn

* Show prompt comparison in SelectModelModal

* Pass variant to SelectModelModal

* Update instructions

* Properly use isDisabled
2023-07-20 23:26:49 -07:00
Kyle Corbitt
55c697223e Merge pull request #74 from OpenPipe/model-providers
replicate/llama2 provider
2023-07-20 23:21:42 -07:00
arcticfly
9978075867 Fix auth flicker (#75)
* Remove experiments flicker for unauthenticated users

* Decrease size of NewScenarioButton spinner
2023-07-20 20:46:31 -07:00
Kyle Corbitt
847753c32b replicate/llama2 provider
Still need to fix the types but it runs
2023-07-20 19:55:03 -07:00
Kyle Corbitt
372c2512c9 Merge pull request #73 from OpenPipe/model-providers
More work on modelProviders
2023-07-20 18:56:14 -07:00
Kyle Corbitt
332a2101c0 More work on modelProviders
I think everything that's OpenAI-specific is inside modelProviders at this point, so we can get started adding more providers.
2023-07-20 18:54:26 -07:00
arcticfly
1822fe198e Initially render AutoResizeTextArea without overflow (#72)
* Rerender resized text area with scroll

* Remove default hidden overflow
2023-07-20 15:00:09 -07:00
Kyle Corbitt
f06e1db3db Merge pull request #71 from OpenPipe/model-providers
Prep for more model providers
2023-07-20 14:55:31 -07:00
Kyle Corbitt
ded6678e97 Prep for more model providers
Adds a `modelProvider` field to `promptVariants`, currently just set to "openai/ChatCompletion" for all variants for now.

Adds a `modelProviders/` directory where we can define and store pluggable model providers. Currently just OpenAI. Not everything is pluggable yet -- notably the code to actually generate completions hasn't been migrated to this setup yet.

Does a lot of work to get the types working. Prompts are now defined with a function `definePrompt(modelProvider, config)` instead of `prompt = config`. Added a script to migrate old prompt definitions.

This is still partial work, but the diff is large enough that I want to get it in. I don't think anything is broken but I haven't tested thoroughly.
2023-07-20 14:49:22 -07:00
arcticfly
9314a86857 Use translation in initial scenarios (#70) 2023-07-20 14:28:48 -07:00
David Corbitt
54dcb4a567 Prevent text input labels from overlaying scenarios header 2023-07-20 14:28:36 -07:00
David Corbitt
2c8c8d07cf Merge branch 'main' of github.com:corbt/prompt-lab 2023-07-20 13:38:58 -07:00
David Corbitt
e885bdd365 Fix ScenarioEditor padding 2023-07-20 13:38:46 -07:00
arcticfly
86dc36a656 Improve refinement (#69)
* Format construction function on return

* Add more refinement examples

* Treat 503 like 429

* Define prompt as object

* Fix prettier
2023-07-20 13:05:27 -07:00
arcticfly
55c077d604 Create FloatingLabelInput for scenario variables (#68)
* Create FloatingLabelInput

* Fix prettier

* Simplify changes
2023-07-20 12:20:12 -07:00
arcticfly
e598e454d0 Add new predefined refinement options (#67)
* Add new predefined refinement options

* Fix prettier

* Add icon to SelectModelModal title
2023-07-19 20:10:08 -07:00
David Corbitt
6e3f90cd2f Add more info to refinement 2023-07-19 18:10:23 -07:00
David Corbitt
eec894e101 Allow multiline instructions 2023-07-19 18:10:04 -07:00
David Corbitt
f797fc3fa4 Eliminate spinner flicker in OutputCell 2023-07-19 18:09:47 -07:00
David Corbitt
335dc0357f Fix CompareFunctions for mobile 2023-07-19 17:24:19 -07:00
arcticfly
e6e2c706c2 Change up refinement UI (#66)
* Remove unused ScenarioVariantCell fields

* Refine deriveNewConstructFn

* Fix prettier

* Remove migration script

* Add refine modal

* Fix prettier

* Fix diff checker overflow

* Decrease diff height

* Add more context to prompt refining

* Auto-expand prompt when refining
2023-07-19 17:19:45 -07:00
Kyle Corbitt
7d2166b305 Merge pull request #65 from OpenPipe/no-model
Cache cost on ModelOutput
2023-07-19 16:22:35 -07:00
arcticfly
2c4ba6eb9b Update README.md (#64) 2023-07-19 15:39:21 -07:00
75 changed files with 2455 additions and 3554 deletions

View File

@@ -17,6 +17,9 @@ DATABASE_URL="postgresql://postgres:postgres@localhost:5432/openpipe?schema=publ
# https://help.openai.com/en/articles/4936850-where-do-i-find-my-secret-api-key # https://help.openai.com/en/articles/4936850-where-do-i-find-my-secret-api-key
OPENAI_API_KEY="" OPENAI_API_KEY=""
# Replicate API token. Create a token here: https://replicate.com/account/api-tokens
REPLICATE_API_TOKEN=""
NEXT_PUBLIC_SOCKET_URL="http://localhost:3318" NEXT_PUBLIC_SOCKET_URL="http://localhost:3318"
# Next Auth # Next Auth

View File

@@ -19,7 +19,6 @@ FROM base as builder
# Include all NEXT_PUBLIC_* env vars here # Include all NEXT_PUBLIC_* env vars here
ARG NEXT_PUBLIC_POSTHOG_KEY ARG NEXT_PUBLIC_POSTHOG_KEY
ARG NEXT_PUBLIC_IS_PUBLIC_PLAYGROUND
ARG NEXT_PUBLIC_SOCKET_URL ARG NEXT_PUBLIC_SOCKET_URL
WORKDIR /app WORKDIR /app

View File

@@ -8,10 +8,10 @@ OpenPipe is a flexible playground for comparing and optimizing LLM prompts. It l
These are simple experiments users have created that show how OpenPipe works. These are simple experiments users have created that show how OpenPipe works.
- [Country Capitals](https://openpipe.ai/experiments/11111111-1111-1111-1111-111111111111) - [Country Capitals](https://app.openpipe.ai/experiments/11111111-1111-1111-1111-111111111111)
- [Reddit User Needs](https://openpipe.ai/experiments/22222222-2222-2222-2222-222222222222) - [Reddit User Needs](https://app.openpipe.ai/experiments/22222222-2222-2222-2222-222222222222)
- [OpenAI Function Calls](https://openpipe.ai/experiments/2ebbdcb3-ed51-456e-87dc-91f72eaf3e2b) - [OpenAI Function Calls](https://app.openpipe.ai/experiments/2ebbdcb3-ed51-456e-87dc-91f72eaf3e2b)
- [Activity Classification](https://openpipe.ai/experiments/3950940f-ab6b-4b74-841d-7e9dbc4e4ff8) - [Activity Classification](https://app.openpipe.ai/experiments/3950940f-ab6b-4b74-841d-7e9dbc4e4ff8)
<img src="https://github.com/openpipe/openpipe/assets/176426/fc7624c6-5b65-4d4d-82b7-4a816f3e5678" alt="demo" height="400px"> <img src="https://github.com/openpipe/openpipe/assets/176426/fc7624c6-5b65-4d4d-82b7-4a816f3e5678" alt="demo" height="400px">

View File

@@ -21,6 +21,7 @@
"check": "concurrently 'pnpm lint' 'pnpm tsc' 'pnpm prettier . --check'" "check": "concurrently 'pnpm lint' 'pnpm tsc' 'pnpm prettier . --check'"
}, },
"dependencies": { "dependencies": {
"@apidevtools/json-schema-ref-parser": "^10.1.0",
"@babel/preset-typescript": "^7.22.5", "@babel/preset-typescript": "^7.22.5",
"@babel/standalone": "^7.22.9", "@babel/standalone": "^7.22.9",
"@chakra-ui/next-js": "^2.1.4", "@chakra-ui/next-js": "^2.1.4",
@@ -39,6 +40,7 @@
"@trpc/next": "^10.26.0", "@trpc/next": "^10.26.0",
"@trpc/react-query": "^10.26.0", "@trpc/react-query": "^10.26.0",
"@trpc/server": "^10.26.0", "@trpc/server": "^10.26.0",
"ast-types": "^0.14.2",
"chroma-js": "^2.4.2", "chroma-js": "^2.4.2",
"concurrently": "^8.2.0", "concurrently": "^8.2.0",
"cors": "^2.8.5", "cors": "^2.8.5",
@@ -51,7 +53,9 @@
"graphile-worker": "^0.13.0", "graphile-worker": "^0.13.0",
"immer": "^10.0.2", "immer": "^10.0.2",
"isolated-vm": "^4.5.0", "isolated-vm": "^4.5.0",
"json-schema-to-typescript": "^13.0.2",
"json-stringify-pretty-compact": "^4.0.0", "json-stringify-pretty-compact": "^4.0.0",
"jsonschema": "^1.4.1",
"lodash-es": "^4.17.21", "lodash-es": "^4.17.21",
"next": "^13.4.2", "next": "^13.4.2",
"next-auth": "^4.22.1", "next-auth": "^4.22.1",
@@ -68,10 +72,14 @@
"react-select": "^5.7.4", "react-select": "^5.7.4",
"react-syntax-highlighter": "^15.5.0", "react-syntax-highlighter": "^15.5.0",
"react-textarea-autosize": "^8.5.0", "react-textarea-autosize": "^8.5.0",
"recast": "^0.23.3",
"replicate": "^0.12.3",
"socket.io": "^4.7.1", "socket.io": "^4.7.1",
"socket.io-client": "^4.7.1", "socket.io-client": "^4.7.1",
"superjson": "1.12.2", "superjson": "1.12.2",
"tsx": "^3.12.7", "tsx": "^3.12.7",
"type-fest": "^4.0.0",
"vite-tsconfig-paths": "^4.2.0",
"zod": "^3.21.4", "zod": "^3.21.4",
"zustand": "^4.3.9" "zustand": "^4.3.9"
}, },
@@ -83,6 +91,7 @@
"@types/cors": "^2.8.13", "@types/cors": "^2.8.13",
"@types/eslint": "^8.37.0", "@types/eslint": "^8.37.0",
"@types/express": "^4.17.17", "@types/express": "^4.17.17",
"@types/json-schema": "^7.0.12",
"@types/lodash-es": "^4.17.8", "@types/lodash-es": "^4.17.8",
"@types/node": "^18.16.0", "@types/node": "^18.16.0",
"@types/pluralize": "^0.0.30", "@types/pluralize": "^0.0.30",

408
pnpm-lock.yaml generated
View File

@@ -5,6 +5,9 @@ settings:
excludeLinksFromLockfile: false excludeLinksFromLockfile: false
dependencies: dependencies:
'@apidevtools/json-schema-ref-parser':
specifier: ^10.1.0
version: 10.1.0
'@babel/preset-typescript': '@babel/preset-typescript':
specifier: ^7.22.5 specifier: ^7.22.5
version: 7.22.5(@babel/core@7.22.9) version: 7.22.5(@babel/core@7.22.9)
@@ -59,6 +62,9 @@ dependencies:
'@trpc/server': '@trpc/server':
specifier: ^10.26.0 specifier: ^10.26.0
version: 10.26.0 version: 10.26.0
ast-types:
specifier: ^0.14.2
version: 0.14.2
chroma-js: chroma-js:
specifier: ^2.4.2 specifier: ^2.4.2
version: 2.4.2 version: 2.4.2
@@ -95,9 +101,15 @@ dependencies:
isolated-vm: isolated-vm:
specifier: ^4.5.0 specifier: ^4.5.0
version: 4.5.0 version: 4.5.0
json-schema-to-typescript:
specifier: ^13.0.2
version: 13.0.2
json-stringify-pretty-compact: json-stringify-pretty-compact:
specifier: ^4.0.0 specifier: ^4.0.0
version: 4.0.0 version: 4.0.0
jsonschema:
specifier: ^1.4.1
version: 1.4.1
lodash-es: lodash-es:
specifier: ^4.17.21 specifier: ^4.17.21
version: 4.17.21 version: 4.17.21
@@ -146,6 +158,12 @@ dependencies:
react-textarea-autosize: react-textarea-autosize:
specifier: ^8.5.0 specifier: ^8.5.0
version: 8.5.0(@types/react@18.2.6)(react@18.2.0) version: 8.5.0(@types/react@18.2.6)(react@18.2.0)
recast:
specifier: ^0.23.3
version: 0.23.3
replicate:
specifier: ^0.12.3
version: 0.12.3
socket.io: socket.io:
specifier: ^4.7.1 specifier: ^4.7.1
version: 4.7.1 version: 4.7.1
@@ -158,6 +176,12 @@ dependencies:
tsx: tsx:
specifier: ^3.12.7 specifier: ^3.12.7
version: 3.12.7 version: 3.12.7
type-fest:
specifier: ^4.0.0
version: 4.0.0
vite-tsconfig-paths:
specifier: ^4.2.0
version: 4.2.0(typescript@5.0.4)
zod: zod:
specifier: ^3.21.4 specifier: ^3.21.4
version: 3.21.4 version: 3.21.4
@@ -187,6 +211,9 @@ devDependencies:
'@types/express': '@types/express':
specifier: ^4.17.17 specifier: ^4.17.17
version: 4.17.17 version: 4.17.17
'@types/json-schema':
specifier: ^7.0.12
version: 7.0.12
'@types/lodash-es': '@types/lodash-es':
specifier: ^4.17.8 specifier: ^4.17.8
version: 4.17.8 version: 4.17.8
@@ -259,6 +286,17 @@ packages:
'@jridgewell/gen-mapping': 0.3.3 '@jridgewell/gen-mapping': 0.3.3
'@jridgewell/trace-mapping': 0.3.18 '@jridgewell/trace-mapping': 0.3.18
/@apidevtools/json-schema-ref-parser@10.1.0:
resolution: {integrity: sha512-3e+viyMuXdrcK8v5pvP+SDoAQ77FH6OyRmuK48SZKmdHJRFm87RsSs8qm6kP39a/pOPURByJw+OXzQIqcfmKtA==}
engines: {node: '>= 16'}
dependencies:
'@jsdevtools/ono': 7.1.3
'@types/json-schema': 7.0.12
'@types/lodash.clonedeep': 4.5.7
js-yaml: 4.1.0
lodash.clonedeep: 4.5.0
dev: false
/@babel/code-frame@7.22.5: /@babel/code-frame@7.22.5:
resolution: {integrity: sha512-Xmwn266vad+6DAqEB2A6V/CcZVp62BbwVmcOJc2RPuwih1kw02TjQvWVWlcKGbBPd+8/0V5DEkOcizRGYsspYQ==} resolution: {integrity: sha512-Xmwn266vad+6DAqEB2A6V/CcZVp62BbwVmcOJc2RPuwih1kw02TjQvWVWlcKGbBPd+8/0V5DEkOcizRGYsspYQ==}
engines: {node: '>=6.9.0'} engines: {node: '>=6.9.0'}
@@ -564,6 +602,16 @@ packages:
'@babel/helper-validator-identifier': 7.22.5 '@babel/helper-validator-identifier': 7.22.5
to-fast-properties: 2.0.0 to-fast-properties: 2.0.0
/@bcherny/json-schema-ref-parser@10.0.5-fork:
resolution: {integrity: sha512-E/jKbPoca1tfUPj3iSbitDZTGnq6FUFjkH6L8U2oDwSuwK1WhnnVtCG7oFOTg/DDnyoXbQYUiUiGOibHqaGVnw==}
engines: {node: '>= 16'}
dependencies:
'@jsdevtools/ono': 7.1.3
'@types/json-schema': 7.0.12
call-me-maybe: 1.0.2
js-yaml: 4.1.0
dev: false
/@chakra-ui/accordion@2.2.0(@chakra-ui/system@2.5.8)(framer-motion@10.12.17)(react@18.2.0): /@chakra-ui/accordion@2.2.0(@chakra-ui/system@2.5.8)(framer-motion@10.12.17)(react@18.2.0):
resolution: {integrity: sha512-2IK1iLzTZ22u8GKPPPn65mqJdZidn4AvkgAbv17ISdKA07VHJ8jSd4QF1T5iCXjKfZ0XaXozmhP4kDhjwF2IbQ==} resolution: {integrity: sha512-2IK1iLzTZ22u8GKPPPn65mqJdZidn4AvkgAbv17ISdKA07VHJ8jSd4QF1T5iCXjKfZ0XaXozmhP4kDhjwF2IbQ==}
peerDependencies: peerDependencies:
@@ -2385,6 +2433,10 @@ packages:
'@jridgewell/resolve-uri': 3.1.0 '@jridgewell/resolve-uri': 3.1.0
'@jridgewell/sourcemap-codec': 1.4.14 '@jridgewell/sourcemap-codec': 1.4.14
/@jsdevtools/ono@7.1.3:
resolution: {integrity: sha512-4JQNk+3mVzK3xh2rqd6RB4J46qUR19azEHBneZyTZM+c456qOrbbM/5xcR8huNCCcbVt7+UmizG6GuUvPvKUYg==}
dev: false
/@monaco-editor/loader@1.3.3(monaco-editor@0.40.0): /@monaco-editor/loader@1.3.3(monaco-editor@0.40.0):
resolution: {integrity: sha512-6KKF4CTzcJiS8BJwtxtfyYt9shBiEv32ateQ9T4UVogwn4HM/uPo9iJd2Dmbkpz8CM6Y0PDUpjnZzCwC+eYo2Q==} resolution: {integrity: sha512-6KKF4CTzcJiS8BJwtxtfyYt9shBiEv32ateQ9T4UVogwn4HM/uPo9iJd2Dmbkpz8CM6Y0PDUpjnZzCwC+eYo2Q==}
peerDependencies: peerDependencies:
@@ -2806,6 +2858,13 @@ packages:
'@types/serve-static': 1.15.2 '@types/serve-static': 1.15.2
dev: true dev: true
/@types/glob@7.2.0:
resolution: {integrity: sha512-ZUxbzKl0IfJILTS6t7ip5fQQM/J3TJYubDm3nMbgubNNYS62eXeUpoLUC8/7fJNiFYHTrGPQn7hspDUzIHX3UA==}
dependencies:
'@types/minimatch': 5.1.2
'@types/node': 18.16.0
dev: false
/@types/hast@2.3.5: /@types/hast@2.3.5:
resolution: {integrity: sha512-SvQi0L/lNpThgPoleH53cdjB3y9zpLlVjRbqB3rH8hx1jiRSBGAhyjV3H+URFjNVRqt2EdYNrbZE5IsGlNfpRg==} resolution: {integrity: sha512-SvQi0L/lNpThgPoleH53cdjB3y9zpLlVjRbqB3rH8hx1jiRSBGAhyjV3H+URFjNVRqt2EdYNrbZE5IsGlNfpRg==}
dependencies: dependencies:
@@ -2818,7 +2877,6 @@ packages:
/@types/json-schema@7.0.12: /@types/json-schema@7.0.12:
resolution: {integrity: sha512-Hr5Jfhc9eYOQNPYO5WLDq/n4jqijdHNlDXjuAQkkt+mWdQR+XJToOHrsD4cPaMXpn6KO7y2+wM8AZEs8VpBLVA==} resolution: {integrity: sha512-Hr5Jfhc9eYOQNPYO5WLDq/n4jqijdHNlDXjuAQkkt+mWdQR+XJToOHrsD4cPaMXpn6KO7y2+wM8AZEs8VpBLVA==}
dev: true
/@types/json5@0.0.29: /@types/json5@0.0.29:
resolution: {integrity: sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ==} resolution: {integrity: sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ==}
@@ -2830,6 +2888,12 @@ packages:
'@types/lodash': 4.14.195 '@types/lodash': 4.14.195
dev: true dev: true
/@types/lodash.clonedeep@4.5.7:
resolution: {integrity: sha512-ccNqkPptFIXrpVqUECi60/DFxjNKsfoQxSQsgcBJCX/fuX1wgyQieojkcWH/KpE3xzLoWN/2k+ZeGqIN3paSvw==}
dependencies:
'@types/lodash': 4.14.195
dev: false
/@types/lodash.mergewith@4.6.7: /@types/lodash.mergewith@4.6.7:
resolution: {integrity: sha512-3m+lkO5CLRRYU0fhGRp7zbsGi6+BZj0uTVSwvcKU+nSlhjA9/QRNfuSGnD2mX6hQA7ZbmcCkzk5h4ZYGOtk14A==} resolution: {integrity: sha512-3m+lkO5CLRRYU0fhGRp7zbsGi6+BZj0uTVSwvcKU+nSlhjA9/QRNfuSGnD2mX6hQA7ZbmcCkzk5h4ZYGOtk14A==}
dependencies: dependencies:
@@ -2847,6 +2911,10 @@ packages:
resolution: {integrity: sha512-Y4XFY5VJAuw0FgAqPNd6NNoV44jbq9Bz2L7Rh/J6jLTiHBSBJa9fxqQIvkIld4GsoDOcCbvzOUAbLPsSKKg+uA==} resolution: {integrity: sha512-Y4XFY5VJAuw0FgAqPNd6NNoV44jbq9Bz2L7Rh/J6jLTiHBSBJa9fxqQIvkIld4GsoDOcCbvzOUAbLPsSKKg+uA==}
dev: true dev: true
/@types/minimatch@5.1.2:
resolution: {integrity: sha512-K0VQKziLUWkVKiRVrx4a40iPaxTUefQmjtkQofBkYRcoaaL/8rhwDWww9qWbrgicNOgnpIsMxyNIUM4+n6dUIA==}
dev: false
/@types/ms@0.7.31: /@types/ms@0.7.31:
resolution: {integrity: sha512-iiUgKzV9AuaEkZqkOLDIvlQiL6ltuZd9tGcW3gwpnX8JbuiuhFlEGmmFXEXkN50Cvq7Os88IY2v0dkDqXYWVgA==} resolution: {integrity: sha512-iiUgKzV9AuaEkZqkOLDIvlQiL6ltuZd9tGcW3gwpnX8JbuiuhFlEGmmFXEXkN50Cvq7Os88IY2v0dkDqXYWVgA==}
dev: false dev: false
@@ -2881,6 +2949,10 @@ packages:
resolution: {integrity: sha512-kVww6xZrW/db5BR9OqiT71J9huRdQ+z/r+LbDuT7/EK50mCmj5FoaIARnVv0rvjUS/YpDox0cDU9lpQT011VBA==} resolution: {integrity: sha512-kVww6xZrW/db5BR9OqiT71J9huRdQ+z/r+LbDuT7/EK50mCmj5FoaIARnVv0rvjUS/YpDox0cDU9lpQT011VBA==}
dev: true dev: true
/@types/prettier@2.7.3:
resolution: {integrity: sha512-+68kP9yzs4LMp7VNh8gdzMSPZFL44MLGqiHWvttYJe+6qnuVr4Ek9wSBQoveqY/r+LwjCcU29kNVkidwim+kYA==}
dev: false
/@types/prismjs@1.26.0: /@types/prismjs@1.26.0:
resolution: {integrity: sha512-ZTaqn/qSqUuAq1YwvOFQfVW1AR/oQJlLSZVustdjwI+GZ8kr0MSHBj0tsXPW1EqHubx50gtBEjbPGsdZwQwCjQ==} resolution: {integrity: sha512-ZTaqn/qSqUuAq1YwvOFQfVW1AR/oQJlLSZVustdjwI+GZ8kr0MSHBj0tsXPW1EqHubx50gtBEjbPGsdZwQwCjQ==}
dev: true dev: true
@@ -3332,6 +3404,10 @@ packages:
engines: {node: '>=10'} engines: {node: '>=10'}
dev: true dev: true
/any-promise@1.3.0:
resolution: {integrity: sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==}
dev: false
/anymatch@3.1.3: /anymatch@3.1.3:
resolution: {integrity: sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==} resolution: {integrity: sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==}
engines: {node: '>= 8'} engines: {node: '>= 8'}
@@ -3342,7 +3418,6 @@ packages:
/argparse@2.0.1: /argparse@2.0.1:
resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==}
dev: true
/aria-hidden@1.2.3: /aria-hidden@1.2.3:
resolution: {integrity: sha512-xcLxITLe2HYa1cnYnwCjkOO1PqUHQpozB8x9AR0OgWN2woOBi5kSDVxKfd0b7sb1hw5qFeJhXm9H1nu3xSfLeQ==} resolution: {integrity: sha512-xcLxITLe2HYa1cnYnwCjkOO1PqUHQpozB8x9AR0OgWN2woOBi5kSDVxKfd0b7sb1hw5qFeJhXm9H1nu3xSfLeQ==}
@@ -3426,6 +3501,15 @@ packages:
is-shared-array-buffer: 1.0.2 is-shared-array-buffer: 1.0.2
dev: true dev: true
/assert@2.0.0:
resolution: {integrity: sha512-se5Cd+js9dXJnu6Ag2JFc00t+HmHOen+8Q+L7O9zI0PqQXr20uk2J0XQqMxZEeo5U50o8Nvmmx7dZrl+Ufr35A==}
dependencies:
es6-object-assign: 1.1.0
is-nan: 1.3.2
object-is: 1.1.5
util: 0.12.5
dev: false
/assertion-error@1.1.0: /assertion-error@1.1.0:
resolution: {integrity: sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==} resolution: {integrity: sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==}
dev: true dev: true
@@ -3434,6 +3518,20 @@ packages:
resolution: {integrity: sha512-eBvWn1lvIApYMhzQMsu9ciLfkBY499mFZlNqG+/9WR7PVlroQw0vG30cOQQbaKz3sCEc44TAOu2ykzqXSNnwag==} resolution: {integrity: sha512-eBvWn1lvIApYMhzQMsu9ciLfkBY499mFZlNqG+/9WR7PVlroQw0vG30cOQQbaKz3sCEc44TAOu2ykzqXSNnwag==}
dev: true dev: true
/ast-types@0.14.2:
resolution: {integrity: sha512-O0yuUDnZeQDL+ncNGlJ78BiO4jnYI3bvMsD5prT0/nsgijG/LpNBIr63gTjVTNsiGkgQhiyCShTgxt8oXOrklA==}
engines: {node: '>=4'}
dependencies:
tslib: 2.6.0
dev: false
/ast-types@0.16.1:
resolution: {integrity: sha512-6t10qk83GOG8p0vKmaCr8eiilZwO171AvbROMtvvNiwrTly62t+7XkA8RdIIVbpMhCASAsxgAzdRSwh6nw/5Dg==}
engines: {node: '>=4'}
dependencies:
tslib: 2.6.0
dev: false
/asynckit@0.4.0: /asynckit@0.4.0:
resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==} resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==}
dev: false dev: false
@@ -3441,7 +3539,6 @@ packages:
/available-typed-arrays@1.0.5: /available-typed-arrays@1.0.5:
resolution: {integrity: sha512-DMD0KiN46eipeziST1LPP/STfDU0sufISXmjSgvVsoU2tqxctQeASejWcfNtxYKqETM1UxQ8sp2OrSBWpHY6sw==} resolution: {integrity: sha512-DMD0KiN46eipeziST1LPP/STfDU0sufISXmjSgvVsoU2tqxctQeASejWcfNtxYKqETM1UxQ8sp2OrSBWpHY6sw==}
engines: {node: '>= 0.4'} engines: {node: '>= 0.4'}
dev: true
/axe-core@4.7.2: /axe-core@4.7.2:
resolution: {integrity: sha512-zIURGIS1E1Q4pcrMjp+nnEh+16G56eG/MUllJH8yEvw7asDo7Ac9uhC9KIH5jzpITueEZolfYglnCGIuSBz39g==} resolution: {integrity: sha512-zIURGIS1E1Q4pcrMjp+nnEh+16G56eG/MUllJH8yEvw7asDo7Ac9uhC9KIH5jzpITueEZolfYglnCGIuSBz39g==}
@@ -3492,7 +3589,6 @@ packages:
/balanced-match@1.0.2: /balanced-match@1.0.2:
resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==}
dev: true
/base-64@0.1.0: /base-64@0.1.0:
resolution: {integrity: sha512-Y5gU45svrR5tI2Vt/X9GPd3L0HNIKzGu202EjxrXMpuc2V2CiKgemAbUUsqYmZJvPtCXoUKjNZwBJzsNScUbXA==} resolution: {integrity: sha512-Y5gU45svrR5tI2Vt/X9GPd3L0HNIKzGu202EjxrXMpuc2V2CiKgemAbUUsqYmZJvPtCXoUKjNZwBJzsNScUbXA==}
@@ -3553,7 +3649,6 @@ packages:
dependencies: dependencies:
balanced-match: 1.0.2 balanced-match: 1.0.2
concat-map: 0.0.1 concat-map: 0.0.1
dev: true
/braces@3.0.2: /braces@3.0.2:
resolution: {integrity: sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==} resolution: {integrity: sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==}
@@ -3612,6 +3707,10 @@ packages:
function-bind: 1.1.1 function-bind: 1.1.1
get-intrinsic: 1.2.1 get-intrinsic: 1.2.1
/call-me-maybe@1.0.2:
resolution: {integrity: sha512-HpX65o1Hnr9HH25ojC1YGs7HCQLq0GCOibSaWER0eNpgJ/Z1MZv2mTc7+xh6WOPxbRVcmgbv4hGU+uSQ/2xFZQ==}
dev: false
/callsites@3.1.0: /callsites@3.1.0:
resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==} resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==}
engines: {node: '>=6'} engines: {node: '>=6'}
@@ -3695,6 +3794,17 @@ packages:
resolution: {integrity: sha512-CSbhY4cFEJRe6/GQzIk5qXZ4Jeg5pcsP7b5peFSDpffpe1cqjASH/n9UTjBwOp6XpMSTwQ8Za2K5V02ueA7Tmw==} resolution: {integrity: sha512-CSbhY4cFEJRe6/GQzIk5qXZ4Jeg5pcsP7b5peFSDpffpe1cqjASH/n9UTjBwOp6XpMSTwQ8Za2K5V02ueA7Tmw==}
dev: false dev: false
/cli-color@2.0.3:
resolution: {integrity: sha512-OkoZnxyC4ERN3zLzZaY9Emb7f/MhBOIpePv0Ycok0fJYT+Ouo00UBEIwsVsr0yoow++n5YWlSUgST9GKhNHiRQ==}
engines: {node: '>=0.10'}
dependencies:
d: 1.0.1
es5-ext: 0.10.62
es6-iterator: 2.0.3
memoizee: 0.4.15
timers-ext: 0.1.7
dev: false
/client-only@0.0.1: /client-only@0.0.1:
resolution: {integrity: sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==} resolution: {integrity: sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==}
dev: false dev: false
@@ -3758,7 +3868,6 @@ packages:
/concat-map@0.0.1: /concat-map@0.0.1:
resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==} resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==}
dev: true
/concurrently@8.2.0: /concurrently@8.2.0:
resolution: {integrity: sha512-nnLMxO2LU492mTUj9qX/az/lESonSZu81UznYDoXtz1IQf996ixVqPAgHXwvHiHCAef/7S8HIK+fTFK7Ifk8YA==} resolution: {integrity: sha512-nnLMxO2LU492mTUj9qX/az/lESonSZu81UznYDoXtz1IQf996ixVqPAgHXwvHiHCAef/7S8HIK+fTFK7Ifk8YA==}
@@ -3887,6 +3996,13 @@ packages:
/csstype@3.1.2: /csstype@3.1.2:
resolution: {integrity: sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ==} resolution: {integrity: sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ==}
/d@1.0.1:
resolution: {integrity: sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA==}
dependencies:
es5-ext: 0.10.62
type: 1.2.0
dev: false
/damerau-levenshtein@1.0.8: /damerau-levenshtein@1.0.8:
resolution: {integrity: sha512-sdQSFB7+llfUcQHUQO3+B8ERRj0Oa4w9POWMI/puGtuf7gFywGmkaLCElnudfTiKZV+NvHqL0ifzdrI8Ro7ESA==} resolution: {integrity: sha512-sdQSFB7+llfUcQHUQO3+B8ERRj0Oa4w9POWMI/puGtuf7gFywGmkaLCElnudfTiKZV+NvHqL0ifzdrI8Ro7ESA==}
dev: true dev: true
@@ -3985,7 +4101,6 @@ packages:
dependencies: dependencies:
has-property-descriptors: 1.0.0 has-property-descriptors: 1.0.0
object-keys: 1.1.1 object-keys: 1.1.1
dev: true
/delayed-stream@1.0.0: /delayed-stream@1.0.0:
resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==}
@@ -4225,6 +4340,44 @@ packages:
is-symbol: 1.0.4 is-symbol: 1.0.4
dev: true dev: true
/es5-ext@0.10.62:
resolution: {integrity: sha512-BHLqn0klhEpnOKSrzn/Xsz2UIW8j+cGmo9JLzr8BiUapV8hPL9+FliFqjwr9ngW7jWdnxv6eO+/LqyhJVqgrjA==}
engines: {node: '>=0.10'}
requiresBuild: true
dependencies:
es6-iterator: 2.0.3
es6-symbol: 3.1.3
next-tick: 1.1.0
dev: false
/es6-iterator@2.0.3:
resolution: {integrity: sha512-zw4SRzoUkd+cl+ZoE15A9o1oQd920Bb0iOJMQkQhl3jNc03YqVjAhG7scf9C5KWRU/R13Orf588uCC6525o02g==}
dependencies:
d: 1.0.1
es5-ext: 0.10.62
es6-symbol: 3.1.3
dev: false
/es6-object-assign@1.1.0:
resolution: {integrity: sha512-MEl9uirslVwqQU369iHNWZXsI8yaZYGg/D65aOgZkeyFJwHYSxilf7rQzXKI7DdDuBPrBXbfk3sl9hJhmd5AUw==}
dev: false
/es6-symbol@3.1.3:
resolution: {integrity: sha512-NJ6Yn3FuDinBaBRWl/q5X/s4koRHBrgKAu+yGI6JCBeiu3qrcbJhwT2GeR/EXVfylRk8dpQVJoLEFhK+Mu31NA==}
dependencies:
d: 1.0.1
ext: 1.7.0
dev: false
/es6-weak-map@2.0.3:
resolution: {integrity: sha512-p5um32HOTO1kP+w7PRnB+5lQ43Z6muuMuIMffvDN8ZB4GcnjLBV6zGStpbASIMk4DCAvEaamhe2zhyCb/QXXsA==}
dependencies:
d: 1.0.1
es5-ext: 0.10.62
es6-iterator: 2.0.3
es6-symbol: 3.1.3
dev: false
/esbuild@0.17.19: /esbuild@0.17.19:
resolution: {integrity: sha512-XQ0jAPFkK/u3LcVRcvVHQcTIqD6E2H1fvZMA5dQPSOWb3suUbWbfbRf94pjc0bNzRYLfIrDRQXr7X+LHIm5oHw==} resolution: {integrity: sha512-XQ0jAPFkK/u3LcVRcvVHQcTIqD6E2H1fvZMA5dQPSOWb3suUbWbfbRf94pjc0bNzRYLfIrDRQXr7X+LHIm5oHw==}
engines: {node: '>=12'} engines: {node: '>=12'}
@@ -4580,6 +4733,12 @@ packages:
eslint-visitor-keys: 3.4.1 eslint-visitor-keys: 3.4.1
dev: true dev: true
/esprima@4.0.1:
resolution: {integrity: sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==}
engines: {node: '>=4'}
hasBin: true
dev: false
/esquery@1.5.0: /esquery@1.5.0:
resolution: {integrity: sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==} resolution: {integrity: sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==}
engines: {node: '>=0.10'} engines: {node: '>=0.10'}
@@ -4614,6 +4773,13 @@ packages:
engines: {node: '>= 0.6'} engines: {node: '>= 0.6'}
dev: false dev: false
/event-emitter@0.3.5:
resolution: {integrity: sha512-D9rRn9y7kLPnJ+hMq7S/nhvoKwwvVJahBi2BPmx3bvbsEdK3W9ii8cBSGjP+72/LnM4n6fo3+dkCX5FeTQruXA==}
dependencies:
d: 1.0.1
es5-ext: 0.10.62
dev: false
/event-target-shim@5.0.1: /event-target-shim@5.0.1:
resolution: {integrity: sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==} resolution: {integrity: sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==}
engines: {node: '>=6'} engines: {node: '>=6'}
@@ -4693,6 +4859,12 @@ packages:
- supports-color - supports-color
dev: false dev: false
/ext@1.7.0:
resolution: {integrity: sha512-6hxeJYaL110a9b5TEJSj0gojyHQAmA2ch5Os+ySCiA1QGdS697XWY1pzsrSjqA9LDEEgdB/KypIlR59RcLuHYw==}
dependencies:
type: 2.7.2
dev: false
/fast-deep-equal@3.1.3: /fast-deep-equal@3.1.3:
resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==}
dev: true dev: true
@@ -4795,7 +4967,6 @@ packages:
resolution: {integrity: sha512-jqYfLp7mo9vIyQf8ykW2v7A+2N4QjeCeI5+Dz9XraiO1ign81wjiH7Fb9vSOWvQfNtmSa4H2RoQTrrXivdUZmw==} resolution: {integrity: sha512-jqYfLp7mo9vIyQf8ykW2v7A+2N4QjeCeI5+Dz9XraiO1ign81wjiH7Fb9vSOWvQfNtmSa4H2RoQTrrXivdUZmw==}
dependencies: dependencies:
is-callable: 1.2.7 is-callable: 1.2.7
dev: true
/form-data-encoder@1.7.2: /form-data-encoder@1.7.2:
resolution: {integrity: sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==} resolution: {integrity: sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==}
@@ -4859,7 +5030,6 @@ packages:
/fs.realpath@1.0.0: /fs.realpath@1.0.0:
resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==} resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==}
dev: true
/fsevents@2.3.2: /fsevents@2.3.2:
resolution: {integrity: sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==} resolution: {integrity: sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==}
@@ -4911,6 +5081,11 @@ packages:
engines: {node: '>=6'} engines: {node: '>=6'}
dev: false dev: false
/get-stdin@8.0.0:
resolution: {integrity: sha512-sY22aA6xchAzprjyqmSEQv4UbAAzRN0L2dQB0NlN5acTTK9Don6nhoc3eAbUnpZiCANAMfd/+40kVdKfFygohg==}
engines: {node: '>=10'}
dev: false
/get-stream@6.0.1: /get-stream@6.0.1:
resolution: {integrity: sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==} resolution: {integrity: sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==}
engines: {node: '>=10'} engines: {node: '>=10'}
@@ -4942,6 +5117,16 @@ packages:
is-glob: 4.0.3 is-glob: 4.0.3
dev: true dev: true
/glob-promise@4.2.2(glob@7.2.3):
resolution: {integrity: sha512-xcUzJ8NWN5bktoTIX7eOclO1Npxd/dyVqUJxlLIDasT4C7KZyqlPIwkdJ0Ypiy3p2ZKahTjK4M9uC3sNSfNMzw==}
engines: {node: '>=12'}
peerDependencies:
glob: ^7.1.6
dependencies:
'@types/glob': 7.2.0
glob: 7.2.3
dev: false
/glob-to-regexp@0.4.1: /glob-to-regexp@0.4.1:
resolution: {integrity: sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==} resolution: {integrity: sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==}
dev: true dev: true
@@ -4966,7 +5151,6 @@ packages:
minimatch: 3.1.2 minimatch: 3.1.2
once: 1.4.0 once: 1.4.0
path-is-absolute: 1.0.1 path-is-absolute: 1.0.1
dev: true
/globals@11.12.0: /globals@11.12.0:
resolution: {integrity: sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==} resolution: {integrity: sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==}
@@ -5015,13 +5199,11 @@ packages:
/globrex@0.1.2: /globrex@0.1.2:
resolution: {integrity: sha512-uHJgbwAMwNFf5mLst7IWLNg14x1CkeqglJb/K3doi4dw6q2IvAAmM/Y81kevy83wP+Sst+nutFTYOGg3d1lsxg==} resolution: {integrity: sha512-uHJgbwAMwNFf5mLst7IWLNg14x1CkeqglJb/K3doi4dw6q2IvAAmM/Y81kevy83wP+Sst+nutFTYOGg3d1lsxg==}
dev: true
/gopd@1.0.1: /gopd@1.0.1:
resolution: {integrity: sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==} resolution: {integrity: sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==}
dependencies: dependencies:
get-intrinsic: 1.2.1 get-intrinsic: 1.2.1
dev: true
/gpt-tokens@1.0.10: /gpt-tokens@1.0.10:
resolution: {integrity: sha512-DNWfqhu+ZAbjTUT76Xc5UBE+e7L0WejsrbiJy+/zgvA2C4697OFN6TLfQY7zaWlay8bNUKqLzbStz0VI0thDtQ==} resolution: {integrity: sha512-DNWfqhu+ZAbjTUT76Xc5UBE+e7L0WejsrbiJy+/zgvA2C4697OFN6TLfQY7zaWlay8bNUKqLzbStz0VI0thDtQ==}
@@ -5072,7 +5254,6 @@ packages:
resolution: {integrity: sha512-62DVLZGoiEBDHQyqG4w9xCuZ7eJEwNmJRWw2VY84Oedb7WFcA27fiEVe8oUQx9hAUJ4ekurquucTGwsyO1XGdQ==} resolution: {integrity: sha512-62DVLZGoiEBDHQyqG4w9xCuZ7eJEwNmJRWw2VY84Oedb7WFcA27fiEVe8oUQx9hAUJ4ekurquucTGwsyO1XGdQ==}
dependencies: dependencies:
get-intrinsic: 1.2.1 get-intrinsic: 1.2.1
dev: true
/has-proto@1.0.1: /has-proto@1.0.1:
resolution: {integrity: sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==} resolution: {integrity: sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==}
@@ -5087,7 +5268,6 @@ packages:
engines: {node: '>= 0.4'} engines: {node: '>= 0.4'}
dependencies: dependencies:
has-symbols: 1.0.3 has-symbols: 1.0.3
dev: true
/has@1.0.3: /has@1.0.3:
resolution: {integrity: sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==} resolution: {integrity: sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==}
@@ -5190,7 +5370,6 @@ packages:
dependencies: dependencies:
once: 1.4.0 once: 1.4.0
wrappy: 1.0.2 wrappy: 1.0.2
dev: true
/inherits@2.0.4: /inherits@2.0.4:
resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==}
@@ -5226,6 +5405,14 @@ packages:
is-decimal: 1.0.4 is-decimal: 1.0.4
dev: false dev: false
/is-arguments@1.1.1:
resolution: {integrity: sha512-8Q7EARjzEnKpt/PCD7e1cgUS0a6X8u5tdSiMqXhojOdoV9TsMsiO+9VLC5vAmO8N7/GmXn7yjR8qnA6bVAEzfA==}
engines: {node: '>= 0.4'}
dependencies:
call-bind: 1.0.2
has-tostringtag: 1.0.0
dev: false
/is-array-buffer@3.0.2: /is-array-buffer@3.0.2:
resolution: {integrity: sha512-y+FyyR/w8vfIRq4eQcM1EYgSTnmHXPqaF+IgzgraytCFq5Xh8lllDVmAZolPJiZttZLeFSINPYMaEJ7/vWUa1w==} resolution: {integrity: sha512-y+FyyR/w8vfIRq4eQcM1EYgSTnmHXPqaF+IgzgraytCFq5Xh8lllDVmAZolPJiZttZLeFSINPYMaEJ7/vWUa1w==}
dependencies: dependencies:
@@ -5266,7 +5453,6 @@ packages:
/is-callable@1.2.7: /is-callable@1.2.7:
resolution: {integrity: sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==} resolution: {integrity: sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==}
engines: {node: '>= 0.4'} engines: {node: '>= 0.4'}
dev: true
/is-core-module@2.12.1: /is-core-module@2.12.1:
resolution: {integrity: sha512-Q4ZuBAe2FUsKtyQJoQHlvP8OvBERxO3jEmy1I7hcRXcJBGGHFh/aJBswbXuS9sgrDH2QUO8ilkwNPHvHMd8clg==} resolution: {integrity: sha512-Q4ZuBAe2FUsKtyQJoQHlvP8OvBERxO3jEmy1I7hcRXcJBGGHFh/aJBswbXuS9sgrDH2QUO8ilkwNPHvHMd8clg==}
@@ -5305,6 +5491,13 @@ packages:
engines: {node: '>=8'} engines: {node: '>=8'}
dev: false dev: false
/is-generator-function@1.0.10:
resolution: {integrity: sha512-jsEjy9l3yiXEQ+PsXdmBwEPcOxaXWLspKdplFUVI9vq1iZgIekeC0L167qeu86czQaxed3q/Uzuw0swL0irL8A==}
engines: {node: '>= 0.4'}
dependencies:
has-tostringtag: 1.0.0
dev: false
/is-glob@4.0.3: /is-glob@4.0.3:
resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==}
engines: {node: '>=0.10.0'} engines: {node: '>=0.10.0'}
@@ -5323,6 +5516,14 @@ packages:
is-docker: 3.0.0 is-docker: 3.0.0
dev: true dev: true
/is-nan@1.3.2:
resolution: {integrity: sha512-E+zBKpQ2t6MEo1VsonYmluk9NxGrbzpeeLC2xIViuO2EjU2xsXsBPwTr3Ykv9l08UYEVEdWeRZNouaZqF6RN0w==}
engines: {node: '>= 0.4'}
dependencies:
call-bind: 1.0.2
define-properties: 1.2.0
dev: false
/is-negative-zero@2.0.2: /is-negative-zero@2.0.2:
resolution: {integrity: sha512-dqJvarLawXsFbNDeJW7zAz8ItJ9cd28YufuuFzh0G8pNHjJMnY08Dv7sYX2uF5UpQOwieAeOExEYAWWfu7ZZUA==} resolution: {integrity: sha512-dqJvarLawXsFbNDeJW7zAz8ItJ9cd28YufuuFzh0G8pNHjJMnY08Dv7sYX2uF5UpQOwieAeOExEYAWWfu7ZZUA==}
engines: {node: '>= 0.4'} engines: {node: '>= 0.4'}
@@ -5344,6 +5545,10 @@ packages:
engines: {node: '>=8'} engines: {node: '>=8'}
dev: true dev: true
/is-promise@2.2.2:
resolution: {integrity: sha512-+lP4/6lKUBfQjZ2pdxThZvLUAafmZb8OAxFb8XXtiQmS35INgr85hdOGoEs124ez1FCnZJt6jau/T+alh58QFQ==}
dev: false
/is-regex@1.1.4: /is-regex@1.1.4:
resolution: {integrity: sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==} resolution: {integrity: sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==}
engines: {node: '>= 0.4'} engines: {node: '>= 0.4'}
@@ -5387,7 +5592,6 @@ packages:
engines: {node: '>= 0.4'} engines: {node: '>= 0.4'}
dependencies: dependencies:
which-typed-array: 1.1.11 which-typed-array: 1.1.11
dev: true
/is-weakref@1.0.2: /is-weakref@1.0.2:
resolution: {integrity: sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==} resolution: {integrity: sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==}
@@ -5460,7 +5664,6 @@ packages:
hasBin: true hasBin: true
dependencies: dependencies:
argparse: 2.0.1 argparse: 2.0.1
dev: true
/jsesc@2.5.2: /jsesc@2.5.2:
resolution: {integrity: sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==} resolution: {integrity: sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==}
@@ -5470,6 +5673,27 @@ packages:
/json-parse-even-better-errors@2.3.1: /json-parse-even-better-errors@2.3.1:
resolution: {integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==} resolution: {integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==}
/json-schema-to-typescript@13.0.2:
resolution: {integrity: sha512-TCaEVW4aI2FmMQe7f98mvr3/oiVmXEC1xZjkTZ9L/BSoTXFlC7p64mD5AD2d8XWycNBQZUnHwXL5iVXt1HWwNQ==}
engines: {node: '>=12.0.0'}
hasBin: true
dependencies:
'@bcherny/json-schema-ref-parser': 10.0.5-fork
'@types/json-schema': 7.0.12
'@types/lodash': 4.14.195
'@types/prettier': 2.7.3
cli-color: 2.0.3
get-stdin: 8.0.0
glob: 7.2.3
glob-promise: 4.2.2(glob@7.2.3)
is-glob: 4.0.3
lodash: 4.17.21
minimist: 1.2.8
mkdirp: 1.0.4
mz: 2.7.0
prettier: 2.8.8
dev: false
/json-schema-traverse@0.4.1: /json-schema-traverse@0.4.1:
resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==} resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==}
dev: true dev: true
@@ -5498,6 +5722,10 @@ packages:
resolution: {integrity: sha512-gfFQZrcTc8CnKXp6Y4/CBT3fTc0OVuDofpre4aEeEpSBPV5X5v4+Vmx+8snU7RLPrNHPKSgLxGo9YuQzz20o+w==} resolution: {integrity: sha512-gfFQZrcTc8CnKXp6Y4/CBT3fTc0OVuDofpre4aEeEpSBPV5X5v4+Vmx+8snU7RLPrNHPKSgLxGo9YuQzz20o+w==}
dev: true dev: true
/jsonschema@1.4.1:
resolution: {integrity: sha512-S6cATIPVv1z0IlxdN+zUk5EPjkGCdnhN4wVSBlvoUO1tOLJootbo9CquNJmbIh4yikWHiUedhRYrNPn1arpEmQ==}
dev: false
/jsx-ast-utils@3.3.4: /jsx-ast-utils@3.3.4:
resolution: {integrity: sha512-fX2TVdCViod6HwKEtSWGHs57oFhVfCMwieb9PuRDgjDPh5XeqJiHFFFJCHxU5cnTc3Bu/GRL+kPiFmw8XWOfKw==} resolution: {integrity: sha512-fX2TVdCViod6HwKEtSWGHs57oFhVfCMwieb9PuRDgjDPh5XeqJiHFFFJCHxU5cnTc3Bu/GRL+kPiFmw8XWOfKw==}
engines: {node: '>=4.0'} engines: {node: '>=4.0'}
@@ -5560,6 +5788,10 @@ packages:
resolution: {integrity: sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==} resolution: {integrity: sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==}
dev: false dev: false
/lodash.clonedeep@4.5.0:
resolution: {integrity: sha512-H5ZhCF25riFd9uB5UCkVKo61m3S/xZk1x4wA6yp/L3RFP6Z/eHH1ymQcGLo7J3GMPfm0V/7m1tryHuGVxpqEBQ==}
dev: false
/lodash.merge@4.6.2: /lodash.merge@4.6.2:
resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==} resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==}
dev: true dev: true
@@ -5602,6 +5834,12 @@ packages:
dependencies: dependencies:
yallist: 4.0.0 yallist: 4.0.0
/lru-queue@0.1.0:
resolution: {integrity: sha512-BpdYkt9EvGl8OfWHDQPISVpcl5xZthb+XPsbELj5AQXxIC8IriDZIQYjBJPEm5rS420sjZ0TLEzRcq5KdBhYrQ==}
dependencies:
es5-ext: 0.10.62
dev: false
/magic-string@0.30.1: /magic-string@0.30.1:
resolution: {integrity: sha512-mbVKXPmS0z0G4XqFDCTllmDQ6coZzn94aMlb0o/A4HEHJCKcanlDZwYJgwnkmgD3jyWhUgj9VsPrfd972yPffA==} resolution: {integrity: sha512-mbVKXPmS0z0G4XqFDCTllmDQ6coZzn94aMlb0o/A4HEHJCKcanlDZwYJgwnkmgD3jyWhUgj9VsPrfd972yPffA==}
engines: {node: '>=12'} engines: {node: '>=12'}
@@ -5630,6 +5868,19 @@ packages:
resolution: {integrity: sha512-rkpe71W0N0c0Xz6QD0eJETuWAJGnJ9afsl1srmwPrI+yBCkge5EycXXbYRyvL29zZVUWQCY7InPRCv3GDXuZNw==} resolution: {integrity: sha512-rkpe71W0N0c0Xz6QD0eJETuWAJGnJ9afsl1srmwPrI+yBCkge5EycXXbYRyvL29zZVUWQCY7InPRCv3GDXuZNw==}
dev: false dev: false
/memoizee@0.4.15:
resolution: {integrity: sha512-UBWmJpLZd5STPm7PMUlOw/TSy972M+z8gcyQ5veOnSDRREz/0bmpyTfKt3/51DhEBqCZQn1udM/5flcSPYhkdQ==}
dependencies:
d: 1.0.1
es5-ext: 0.10.62
es6-weak-map: 2.0.3
event-emitter: 0.3.5
is-promise: 2.2.2
lru-queue: 0.1.0
next-tick: 1.1.0
timers-ext: 0.1.7
dev: false
/merge-descriptors@1.0.1: /merge-descriptors@1.0.1:
resolution: {integrity: sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==} resolution: {integrity: sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==}
dev: false dev: false
@@ -5692,11 +5943,16 @@ packages:
resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==} resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==}
dependencies: dependencies:
brace-expansion: 1.1.11 brace-expansion: 1.1.11
dev: true
/minimist@1.2.8: /minimist@1.2.8:
resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==} resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==}
/mkdirp@1.0.4:
resolution: {integrity: sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==}
engines: {node: '>=10'}
hasBin: true
dev: false
/mlly@1.4.0: /mlly@1.4.0:
resolution: {integrity: sha512-ua8PAThnTwpprIaU47EPeZ/bPUVp2QYBbWMphUQpVdBI3Lgqzm5KZQ45Agm3YJedHXaIHl6pBGabaLSUPPSptg==} resolution: {integrity: sha512-ua8PAThnTwpprIaU47EPeZ/bPUVp2QYBbWMphUQpVdBI3Lgqzm5KZQ45Agm3YJedHXaIHl6pBGabaLSUPPSptg==}
dependencies: dependencies:
@@ -5726,6 +5982,14 @@ packages:
object-assign: 4.1.1 object-assign: 4.1.1
dev: false dev: false
/mz@2.7.0:
resolution: {integrity: sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==}
dependencies:
any-promise: 1.3.0
object-assign: 4.1.1
thenify-all: 1.6.0
dev: false
/nanoid@3.3.6: /nanoid@3.3.6:
resolution: {integrity: sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==} resolution: {integrity: sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==}
engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1}
@@ -5773,6 +6037,10 @@ packages:
uuid: 8.3.2 uuid: 8.3.2
dev: false dev: false
/next-tick@1.1.0:
resolution: {integrity: sha512-CXdUiJembsNjuToQvxayPZF9Vqht7hewsvy2sOWafLvi2awflj9mOC6bHIg50orX8IJvWKY9wYQ/zB2kogPslQ==}
dev: false
/next@13.4.2(@babel/core@7.22.9)(react-dom@18.2.0)(react@18.2.0): /next@13.4.2(@babel/core@7.22.9)(react-dom@18.2.0)(react@18.2.0):
resolution: {integrity: sha512-aNFqLs3a3nTGvLWlO9SUhCuMUHVPSFQC0+tDNGAsDXqx+WJDFSbvc233gOJ5H19SBc7nw36A9LwQepOJ2u/8Kg==} resolution: {integrity: sha512-aNFqLs3a3nTGvLWlO9SUhCuMUHVPSFQC0+tDNGAsDXqx+WJDFSbvc233gOJ5H19SBc7nw36A9LwQepOJ2u/8Kg==}
engines: {node: '>=16.8.0'} engines: {node: '>=16.8.0'}
@@ -5883,6 +6151,14 @@ packages:
/object-inspect@1.12.3: /object-inspect@1.12.3:
resolution: {integrity: sha512-geUvdk7c+eizMNUDkRpW1wJwgfOiOeHbxBR/hLXK1aT6zmVSO0jsQcs7fj6MGw89jC/cjGfLcNOrtMYtGqm81g==} resolution: {integrity: sha512-geUvdk7c+eizMNUDkRpW1wJwgfOiOeHbxBR/hLXK1aT6zmVSO0jsQcs7fj6MGw89jC/cjGfLcNOrtMYtGqm81g==}
/object-is@1.1.5:
resolution: {integrity: sha512-3cyDsyHgtmi7I7DfSSI2LDp6SK2lwvtbg0p0R1e0RvTqF5ceGx+K2dfSjm1bKDMVCFEDAQvy+o8c6a7VujOddw==}
engines: {node: '>= 0.4'}
dependencies:
call-bind: 1.0.2
define-properties: 1.2.0
dev: false
/object-keys@0.4.0: /object-keys@0.4.0:
resolution: {integrity: sha512-ncrLw+X55z7bkl5PnUvHwFK9FcGuFYo9gtjws2XtSzL+aZ8tm830P60WJ0dSmFVaSalWieW5MD7kEdnXda9yJw==} resolution: {integrity: sha512-ncrLw+X55z7bkl5PnUvHwFK9FcGuFYo9gtjws2XtSzL+aZ8tm830P60WJ0dSmFVaSalWieW5MD7kEdnXda9yJw==}
dev: false dev: false
@@ -5890,7 +6166,6 @@ packages:
/object-keys@1.1.1: /object-keys@1.1.1:
resolution: {integrity: sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==} resolution: {integrity: sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==}
engines: {node: '>= 0.4'} engines: {node: '>= 0.4'}
dev: true
/object.assign@4.1.4: /object.assign@4.1.4:
resolution: {integrity: sha512-1mxKf0e58bvyjSCtKYY4sRe9itRk3PJpquJOjeIkz885CczcI4IvJJDLPS72oowuSh+pBxUFROpX+TU++hxhZQ==} resolution: {integrity: sha512-1mxKf0e58bvyjSCtKYY4sRe9itRk3PJpquJOjeIkz885CczcI4IvJJDLPS72oowuSh+pBxUFROpX+TU++hxhZQ==}
@@ -5956,7 +6231,6 @@ packages:
resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==}
dependencies: dependencies:
wrappy: 1.0.2 wrappy: 1.0.2
dev: true
/onetime@5.1.2: /onetime@5.1.2:
resolution: {integrity: sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==} resolution: {integrity: sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==}
@@ -6111,7 +6385,6 @@ packages:
/path-is-absolute@1.0.1: /path-is-absolute@1.0.1:
resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==} resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==}
engines: {node: '>=0.10.0'} engines: {node: '>=0.10.0'}
dev: true
/path-key@3.1.1: /path-key@3.1.1:
resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==} resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==}
@@ -6338,7 +6611,6 @@ packages:
resolution: {integrity: sha512-tdN8qQGvNjw4CHbY+XXk0JgCXn9QiF21a55rBe5LJAU+kDyC4WQn4+awm2Xfk2lQMk5fKup9XgzTZtGkjBdP9Q==} resolution: {integrity: sha512-tdN8qQGvNjw4CHbY+XXk0JgCXn9QiF21a55rBe5LJAU+kDyC4WQn4+awm2Xfk2lQMk5fKup9XgzTZtGkjBdP9Q==}
engines: {node: '>=10.13.0'} engines: {node: '>=10.13.0'}
hasBin: true hasBin: true
dev: true
/prettier@3.0.0: /prettier@3.0.0:
resolution: {integrity: sha512-zBf5eHpwHOGPC47h0zrPyNn+eAEIdEzfywMoYn2XPi0P44Zp0tSq64rq0xAREh4auw2cJZHo9QUob+NqCQky4g==} resolution: {integrity: sha512-zBf5eHpwHOGPC47h0zrPyNn+eAEIdEzfywMoYn2XPi0P44Zp0tSq64rq0xAREh4auw2cJZHo9QUob+NqCQky4g==}
@@ -6688,6 +6960,17 @@ packages:
picomatch: 2.3.1 picomatch: 2.3.1
dev: false dev: false
/recast@0.23.3:
resolution: {integrity: sha512-HbCVFh2ANP6a09nzD4lx7XthsxMOJWKX5pIcUwtLrmeEIl3I0DwjCoVXDE0Aobk+7k/mS3H50FK4iuYArpcT6Q==}
engines: {node: '>= 4'}
dependencies:
assert: 2.0.0
ast-types: 0.16.1
esprima: 4.0.1
source-map: 0.6.1
tslib: 2.6.0
dev: false
/refractor@3.6.0: /refractor@3.6.0:
resolution: {integrity: sha512-MY9W41IOWxxk31o+YvFCNyNzdkc9M20NoZK5vq6jkv4I/uh2zkWcfudj0Q1fovjUQJrNewS9NMzeTtqPf+n5EA==} resolution: {integrity: sha512-MY9W41IOWxxk31o+YvFCNyNzdkc9M20NoZK5vq6jkv4I/uh2zkWcfudj0Q1fovjUQJrNewS9NMzeTtqPf+n5EA==}
dependencies: dependencies:
@@ -6708,6 +6991,11 @@ packages:
functions-have-names: 1.2.3 functions-have-names: 1.2.3
dev: true dev: true
/replicate@0.12.3:
resolution: {integrity: sha512-HVWKPoVhWVTONlWk+lUXmq9Vy2J8MxBJMtDBQq3dA5uq71ZzKTh0xvJfvzW4+VLBjhBeL7tkdua6hZJmKfzAPQ==}
engines: {git: '>=2.11.0', node: '>=16.6.0', npm: '>=7.19.0', yarn: '>=1.7.0'}
dev: false
/require-directory@2.1.1: /require-directory@2.1.1:
resolution: {integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==} resolution: {integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==}
engines: {node: '>=0.10.0'} engines: {node: '>=0.10.0'}
@@ -7216,6 +7504,19 @@ packages:
resolution: {integrity: sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==} resolution: {integrity: sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==}
dev: true dev: true
/thenify-all@1.6.0:
resolution: {integrity: sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==}
engines: {node: '>=0.8'}
dependencies:
thenify: 3.3.1
dev: false
/thenify@3.3.1:
resolution: {integrity: sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==}
dependencies:
any-promise: 1.3.0
dev: false
/through2@0.4.2: /through2@0.4.2:
resolution: {integrity: sha512-45Llu+EwHKtAZYTPPVn3XZHBgakWMN3rokhEv5hu596XP+cNgplMg+Gj+1nmAvj+L0K7+N49zBKx5rah5u0QIQ==} resolution: {integrity: sha512-45Llu+EwHKtAZYTPPVn3XZHBgakWMN3rokhEv5hu596XP+cNgplMg+Gj+1nmAvj+L0K7+N49zBKx5rah5u0QIQ==}
dependencies: dependencies:
@@ -7227,6 +7528,13 @@ packages:
resolution: {integrity: sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==} resolution: {integrity: sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==}
dev: false dev: false
/timers-ext@0.1.7:
resolution: {integrity: sha512-b85NUNzTSdodShTIbky6ZF02e8STtVVfD+fu4aXXShEELpozH+bCpJLYMPZbsABN2wDH7fJpqIoXxJpzbf0NqQ==}
dependencies:
es5-ext: 0.10.62
next-tick: 1.1.0
dev: false
/tiny-glob@0.2.9: /tiny-glob@0.2.9:
resolution: {integrity: sha512-g/55ssRPUjShh+xkfx9UPDXqhckHEsHr4Vd9zX55oSdGZc/MD0m3sferOkwWtp98bv+kcVfEHtRJgBVJzelrzg==} resolution: {integrity: sha512-g/55ssRPUjShh+xkfx9UPDXqhckHEsHr4Vd9zX55oSdGZc/MD0m3sferOkwWtp98bv+kcVfEHtRJgBVJzelrzg==}
dependencies: dependencies:
@@ -7285,6 +7593,19 @@ packages:
hasBin: true hasBin: true
dev: false dev: false
/tsconfck@2.1.2(typescript@5.0.4):
resolution: {integrity: sha512-ghqN1b0puy3MhhviwO2kGF8SeMDNhEbnKxjK7h6+fvY9JAxqvXi8y5NAHSQv687OVboS2uZIByzGd45/YxrRHg==}
engines: {node: ^14.13.1 || ^16 || >=18}
hasBin: true
peerDependencies:
typescript: ^4.3.5 || ^5.0.0
peerDependenciesMeta:
typescript:
optional: true
dependencies:
typescript: 5.0.4
dev: false
/tsconfig-paths@3.14.2: /tsconfig-paths@3.14.2:
resolution: {integrity: sha512-o/9iXgCYc5L/JxCHPe3Hvh8Q/2xm5Z+p18PESBU6Ff33695QnCHBEjcytY2q19ua7Mbl/DavtBOLq+oG0RCL+g==} resolution: {integrity: sha512-o/9iXgCYc5L/JxCHPe3Hvh8Q/2xm5Z+p18PESBU6Ff33695QnCHBEjcytY2q19ua7Mbl/DavtBOLq+oG0RCL+g==}
dependencies: dependencies:
@@ -7343,6 +7664,11 @@ packages:
engines: {node: '>=10'} engines: {node: '>=10'}
dev: true dev: true
/type-fest@4.0.0:
resolution: {integrity: sha512-d/oYtUnPM9zar2fqqGLYPzgcY0qUlYK0evgNVti93xpzfjGkMgZHu9Lvgrkn0rqGXTgsFRxFamzjGoD9Uo+dgw==}
engines: {node: '>=16'}
dev: false
/type-is@1.6.18: /type-is@1.6.18:
resolution: {integrity: sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==} resolution: {integrity: sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==}
engines: {node: '>= 0.6'} engines: {node: '>= 0.6'}
@@ -7351,6 +7677,14 @@ packages:
mime-types: 2.1.35 mime-types: 2.1.35
dev: false dev: false
/type@1.2.0:
resolution: {integrity: sha512-+5nt5AAniqsCnu2cEQQdpzCAh33kVx8n0VoFidKpB1dVVLAN/F+bgVOqOJqOnEnrhp222clB5p3vUlD+1QAnfg==}
dev: false
/type@2.7.2:
resolution: {integrity: sha512-dzlvlNlt6AXU7EBSfpAscydQ7gXB+pPGsPnfJnZpiNJBDj7IaJzQlBZYGdEi4R9HmPdBv2XmWJ6YUtoTa7lmCw==}
dev: false
/typed-array-buffer@1.0.0: /typed-array-buffer@1.0.0:
resolution: {integrity: sha512-Y8KTSIglk9OZEr8zywiIHG/kmQ7KWyjseXs1CbSo8vC42w7hg2HgYTxSWwP0+is7bWDc1H+Fo026CpHFwm8tkw==} resolution: {integrity: sha512-Y8KTSIglk9OZEr8zywiIHG/kmQ7KWyjseXs1CbSo8vC42w7hg2HgYTxSWwP0+is7bWDc1H+Fo026CpHFwm8tkw==}
engines: {node: '>= 0.4'} engines: {node: '>= 0.4'}
@@ -7518,6 +7852,16 @@ packages:
resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==}
dev: false dev: false
/util@0.12.5:
resolution: {integrity: sha512-kZf/K6hEIrWHI6XqOFUiiMa+79wE/D8Q+NCNAWclkyg3b4d2k7s0QGepNjiABc+aR3N1PAyHL7p6UcLY6LmrnA==}
dependencies:
inherits: 2.0.4
is-arguments: 1.1.1
is-generator-function: 1.0.10
is-typed-array: 1.1.12
which-typed-array: 1.1.11
dev: false
/utils-merge@1.0.1: /utils-merge@1.0.1:
resolution: {integrity: sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==} resolution: {integrity: sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==}
engines: {node: '>= 0.4.0'} engines: {node: '>= 0.4.0'}
@@ -7555,6 +7899,22 @@ packages:
- terser - terser
dev: true dev: true
/vite-tsconfig-paths@4.2.0(typescript@5.0.4):
resolution: {integrity: sha512-jGpus0eUy5qbbMVGiTxCL1iB9ZGN6Bd37VGLJU39kTDD6ZfULTTb1bcc5IeTWqWJKiWV5YihCaibeASPiGi8kw==}
peerDependencies:
vite: '*'
peerDependenciesMeta:
vite:
optional: true
dependencies:
debug: 4.3.4
globrex: 0.1.2
tsconfck: 2.1.2(typescript@5.0.4)
transitivePeerDependencies:
- supports-color
- typescript
dev: false
/vite@4.4.4(@types/node@18.16.0): /vite@4.4.4(@types/node@18.16.0):
resolution: {integrity: sha512-4mvsTxjkveWrKDJI70QmelfVqTm+ihFAb6+xf4sjEU2TmUCTlVX87tmg/QooPEMQb/lM9qGHT99ebqPziEd3wg==} resolution: {integrity: sha512-4mvsTxjkveWrKDJI70QmelfVqTm+ihFAb6+xf4sjEU2TmUCTlVX87tmg/QooPEMQb/lM9qGHT99ebqPziEd3wg==}
engines: {node: ^14.18.0 || >=16.0.0} engines: {node: ^14.18.0 || >=16.0.0}
@@ -7744,7 +8104,6 @@ packages:
for-each: 0.3.3 for-each: 0.3.3
gopd: 1.0.1 gopd: 1.0.1
has-tostringtag: 1.0.0 has-tostringtag: 1.0.0
dev: true
/which@2.0.2: /which@2.0.2:
resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==}
@@ -7774,7 +8133,6 @@ packages:
/wrappy@1.0.2: /wrappy@1.0.2:
resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==}
dev: true
/ws@8.11.0: /ws@8.11.0:
resolution: {integrity: sha512-HPG3wQd9sNQoT9xHyNCXoDUa+Xw/VevmY9FoHyQ+g+rrMn4j6FB4np7Z0OhdTgjx6MgQLK7jwSy1YecU1+4Asg==} resolution: {integrity: sha512-HPG3wQd9sNQoT9xHyNCXoDUa+Xw/VevmY9FoHyQ+g+rrMn4j6FB4np7Z0OhdTgjx6MgQLK7jwSy1YecU1+4Asg==}

View File

@@ -0,0 +1,17 @@
-- Add new columns allowing NULL values
ALTER TABLE "PromptVariant"
ADD COLUMN "constructFnVersion" INTEGER,
ADD COLUMN "modelProvider" TEXT;
-- Update existing records to have the default values
UPDATE "PromptVariant"
SET "constructFnVersion" = 1,
"modelProvider" = 'openai/ChatCompletion'
WHERE "constructFnVersion" IS NULL OR "modelProvider" IS NULL;
-- Alter table to set NOT NULL constraint
ALTER TABLE "PromptVariant"
ALTER COLUMN "constructFnVersion" SET NOT NULL,
ALTER COLUMN "modelProvider" SET NOT NULL;
ALTER TABLE "ScenarioVariantCell" ADD COLUMN "prompt" JSONB;

View File

@@ -31,9 +31,11 @@ model Experiment {
model PromptVariant { model PromptVariant {
id String @id @default(uuid()) @db.Uuid id String @id @default(uuid()) @db.Uuid
label String label String
constructFn String constructFn String
model String constructFnVersion Int
model String
modelProvider String
uiId String @default(uuid()) @db.Uuid uiId String @default(uuid()) @db.Uuid
visible Boolean @default(true) visible Boolean @default(true)
@@ -98,6 +100,7 @@ model ScenarioVariantCell {
promptVariantId String @db.Uuid promptVariantId String @db.Uuid
promptVariant PromptVariant @relation(fields: [promptVariantId], references: [id], onDelete: Cascade) promptVariant PromptVariant @relation(fields: [promptVariantId], references: [id], onDelete: Cascade)
prompt Json?
testScenarioId String @db.Uuid testScenarioId String @db.Uuid
testScenario TestScenario @relation(fields: [testScenarioId], references: [id], onDelete: Cascade) testScenario TestScenario @relation(fields: [testScenarioId], references: [id], onDelete: Cascade)

View File

@@ -46,8 +46,10 @@ await prisma.promptVariant.createMany({
label: "Prompt Variant 1", label: "Prompt Variant 1",
sortIndex: 0, sortIndex: 0,
model: "gpt-3.5-turbo-0613", model: "gpt-3.5-turbo-0613",
modelProvider: "openai/ChatCompletion",
constructFnVersion: 1,
constructFn: dedent` constructFn: dedent`
prompt = { definePrompt("openai/ChatCompletion", {
model: "gpt-3.5-turbo-0613", model: "gpt-3.5-turbo-0613",
messages: [ messages: [
{ {
@@ -56,15 +58,17 @@ await prisma.promptVariant.createMany({
} }
], ],
temperature: 0, temperature: 0,
}`, })`,
}, },
{ {
experimentId: defaultId, experimentId: defaultId,
label: "Prompt Variant 2", label: "Prompt Variant 2",
sortIndex: 1, sortIndex: 1,
model: "gpt-3.5-turbo-0613", model: "gpt-3.5-turbo-0613",
modelProvider: "openai/ChatCompletion",
constructFnVersion: 1,
constructFn: dedent` constructFn: dedent`
prompt = { definePrompt("openai/ChatCompletion", {
model: "gpt-3.5-turbo-0613", model: "gpt-3.5-turbo-0613",
messages: [ messages: [
{ {
@@ -73,7 +77,7 @@ await prisma.promptVariant.createMany({
} }
], ],
temperature: 0, temperature: 0,
}`, })`,
}, },
], ],
}); });

View File

@@ -1,48 +0,0 @@
/* eslint-disable @typescript-eslint/no-var-requires */
import YAML from "yaml";
import fs from "fs";
import path from "path";
import { openapiSchemaToJsonSchema } from "@openapi-contrib/openapi-schema-to-json-schema";
import assert from "assert";
import { type AcceptibleInputSchema } from "@openapi-contrib/openapi-schema-to-json-schema/dist/mjs/openapi-schema-types";
const OPENAPI_URL =
"https://raw.githubusercontent.com/openai/openai-openapi/0c432eb66fd0c758fd8b9bd69db41c1096e5f4db/openapi.yaml";
const convertOpenApiToJsonSchema = async (url: string) => {
// Fetch the openapi document
const response = await fetch(url);
const openApiYaml = await response.text();
// Parse the yaml document
const openApiDocument = YAML.parse(openApiYaml) as AcceptibleInputSchema;
// Convert the openapi schema to json schema
const jsonSchema = openapiSchemaToJsonSchema(openApiDocument);
const modelProperty = jsonSchema.components.schemas.CreateChatCompletionRequest.properties.model;
assert(modelProperty.oneOf.length === 2, "Expected model to have oneOf length of 2");
// We need to do a bit of surgery here since the Monaco editor doesn't like
// the fact that the schema says `model` can be either a string or an enum,
// and displays a warning in the editor. Let's stick with just an enum for
// now and drop the string option.
modelProperty.type = "string";
modelProperty.enum = modelProperty.oneOf[1].enum;
modelProperty.oneOf = undefined;
// Get the directory of the current script
const currentDirectory = path.dirname(import.meta.url).replace("file://", "");
// Write the JSON schema to a file in the current directory
fs.writeFileSync(
path.join(currentDirectory, "openai.schema.json"),
JSON.stringify(jsonSchema, null, 2),
);
};
convertOpenApiToJsonSchema(OPENAPI_URL)
.then(() => console.log("JSON schema has been written successfully."))
.catch((err) => console.error(err));

View File

@@ -1,52 +0,0 @@
import fs from "fs";
import path from "path";
import openapiTS, { type OpenAPI3 } from "openapi-typescript";
import YAML from "yaml";
import { pick } from "lodash-es";
import assert from "assert";
const OPENAPI_URL =
"https://raw.githubusercontent.com/openai/openai-openapi/0c432eb66fd0c758fd8b9bd69db41c1096e5f4db/openapi.yaml";
// Generate TypeScript types from OpenAPI
const schema = await fetch(OPENAPI_URL)
.then((res) => res.text())
.then((txt) => YAML.parse(txt) as OpenAPI3);
console.log(schema.components?.schemas?.CreateChatCompletionRequest);
// @ts-expect-error just assume this works, the assert will catch it if it doesn't
const modelProperty = schema.components?.schemas?.CreateChatCompletionRequest?.properties?.model;
assert(modelProperty.oneOf.length === 2, "Expected model to have oneOf length of 2");
// We need to do a bit of surgery here since the Monaco editor doesn't like
// the fact that the schema says `model` can be either a string or an enum,
// and displays a warning in the editor. Let's stick with just an enum for
// now and drop the string option.
modelProperty.type = "string";
modelProperty.enum = modelProperty.oneOf[1].enum;
modelProperty.oneOf = undefined;
delete schema["paths"];
assert(schema.components?.schemas);
schema.components.schemas = pick(schema.components?.schemas, [
"CreateChatCompletionRequest",
"ChatCompletionRequestMessage",
"ChatCompletionFunctions",
"ChatCompletionFunctionParameters",
]);
console.log(schema);
let openApiTypes = await openapiTS(schema);
// Remove the `export` from any line that starts with `export`
openApiTypes = openApiTypes.replaceAll("\nexport ", "\n");
// Get the directory of the current script
const currentDirectory = path.dirname(import.meta.url).replace("file://", "");
// Write the TypeScript types. We only want to use this in our in-app editor, so
// save as a .txt so VS Code doesn't try to auto-import definitions from it.
fs.writeFileSync(path.join(currentDirectory, "openai.types.ts.txt"), openApiTypes);

File diff suppressed because it is too large Load Diff

View File

@@ -1,148 +0,0 @@
/**
* This file was auto-generated by openapi-typescript.
* Do not make direct changes to the file.
*/
/** OneOf type helpers */
type Without<T, U> = { [P in Exclude<keyof T, keyof U>]?: never };
type XOR<T, U> = (T | U) extends object ? (Without<T, U> & U) | (Without<U, T> & T) : T | U;
type OneOf<T extends any[]> = T extends [infer Only] ? Only : T extends [infer A, infer B, ...infer Rest] ? OneOf<[XOR<A, B>, ...Rest]> : never;
type paths = Record<string, never>;
type webhooks = Record<string, never>;
interface components {
schemas: {
CreateChatCompletionRequest: {
/**
* @description ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.
* @example gpt-3.5-turbo
* @enum {string}
*/
model: "gpt-4" | "gpt-4-0613" | "gpt-4-32k" | "gpt-4-32k-0613" | "gpt-3.5-turbo" | "gpt-3.5-turbo-16k" | "gpt-3.5-turbo-0613" | "gpt-3.5-turbo-16k-0613";
/** @description A list of messages comprising the conversation so far. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). */
messages: (components["schemas"]["ChatCompletionRequestMessage"])[];
/** @description A list of functions the model may generate JSON inputs for. */
functions?: (components["schemas"]["ChatCompletionFunctions"])[];
/** @description Controls how the model responds to function calls. "none" means the model does not call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name":\ "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. */
function_call?: OneOf<["none" | "auto", {
/** @description The name of the function to call. */
name: string;
}]>;
/**
* @description What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
*
* We generally recommend altering this or `top_p` but not both.
*
* @default 1
* @example 1
*/
temperature?: number | null;
/**
* @description An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
*
* We generally recommend altering this or `temperature` but not both.
*
* @default 1
* @example 1
*/
top_p?: number | null;
/**
* @description How many chat completion choices to generate for each input message.
* @default 1
* @example 1
*/
n?: number | null;
/**
* @description If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb).
*
* @default false
*/
stream?: boolean | null;
/**
* @description Up to 4 sequences where the API will stop generating further tokens.
*
* @default null
*/
stop?: (string | null) | (string)[];
/**
* @description The maximum number of [tokens](/tokenizer) to generate in the chat completion.
*
* The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
*
* @default inf
*/
max_tokens?: number;
/**
* @description Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
*
* [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
*
* @default 0
*/
presence_penalty?: number | null;
/**
* @description Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
*
* [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
*
* @default 0
*/
frequency_penalty?: number | null;
/**
* @description Modify the likelihood of specified tokens appearing in the completion.
*
* Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
*
* @default null
*/
logit_bias?: Record<string, unknown> | null;
/**
* @description A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
*
* @example user-1234
*/
user?: string;
};
ChatCompletionRequestMessage: {
/**
* @description The role of the messages author. One of `system`, `user`, `assistant`, or `function`.
* @enum {string}
*/
role: "system" | "user" | "assistant" | "function";
/** @description The contents of the message. `content` is required for all messages except assistant messages with function calls. */
content?: string;
/** @description The name of the author of this message. `name` is required if role is `function`, and it should be the name of the function whose response is in the `content`. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters. */
name?: string;
/** @description The name and arguments of a function that should be called, as generated by the model. */
function_call?: {
/** @description The name of the function to call. */
name?: string;
/** @description The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. */
arguments?: string;
};
};
ChatCompletionFunctions: {
/** @description The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. */
name: string;
/** @description The description of what the function does. */
description?: string;
parameters?: components["schemas"]["ChatCompletionFunctionParameters"];
};
/** @description The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. */
ChatCompletionFunctionParameters: {
[key: string]: unknown;
};
};
responses: never;
parameters: never;
requestBodies: never;
headers: never;
pathItems: never;
}
type external = Record<string, never>;
type operations = Record<string, never>;

View File

@@ -1,6 +0,0 @@
{
"compilerOptions": {
"target": "esnext",
"moduleResolution": "nodenext"
}
}

View File

@@ -1,19 +1,22 @@
import { Textarea, type TextareaProps } from "@chakra-ui/react"; import { Textarea, type TextareaProps } from "@chakra-ui/react";
import ResizeTextarea from "react-textarea-autosize"; import ResizeTextarea from "react-textarea-autosize";
import React from "react"; import React, { useLayoutEffect, useState } from "react";
export const AutoResizeTextarea: React.ForwardRefRenderFunction< export const AutoResizeTextarea: React.ForwardRefRenderFunction<
HTMLTextAreaElement, HTMLTextAreaElement,
TextareaProps & { minRows?: number } TextareaProps & { minRows?: number }
> = (props, ref) => { > = ({ minRows = 1, overflowY = "hidden", ...props }, ref) => {
const [isRerendered, setIsRerendered] = useState(false);
useLayoutEffect(() => setIsRerendered(true), []);
return ( return (
<Textarea <Textarea
minH="unset" minH="unset"
overflow="hidden" minRows={minRows}
overflowY={isRerendered ? overflowY : "hidden"}
w="100%" w="100%"
resize="none" resize="none"
ref={ref} ref={ref}
minRows={1}
transition="height none" transition="height none"
as={ResizeTextarea} as={ResizeTextarea}
{...props} {...props}

View File

@@ -0,0 +1,47 @@
import { FormLabel, FormControl, type TextareaProps } from "@chakra-ui/react";
import { useState } from "react";
import AutoResizeTextArea from "../AutoResizeTextArea";
export const FloatingLabelInput = ({
label,
value,
...props
}: { label: string; value: string } & TextareaProps) => {
const [isFocused, setIsFocused] = useState(false);
return (
<FormControl position="relative">
<FormLabel
position="absolute"
left="10px"
top={isFocused || !!value ? 0 : 3}
transform={isFocused || !!value ? "translateY(-50%)" : "translateY(0)"}
fontSize={isFocused || !!value ? "12px" : "16px"}
transition="all 0.15s"
zIndex="5"
bg="white"
px={1}
lineHeight="1"
pointerEvents="none"
color={isFocused ? "blue.500" : "gray.500"}
>
{label}
</FormLabel>
<AutoResizeTextArea
px={3}
pt={3}
pb={2}
onFocus={() => setIsFocused(true)}
onBlur={() => setIsFocused(false)}
borderRadius="md"
borderColor={isFocused ? "blue.500" : "gray.400"}
autoComplete="off"
value={value}
maxHeight={32}
overflowY="auto"
overflowX="hidden"
{...props}
/>
</FormControl>
);
};

View File

@@ -49,7 +49,11 @@ export default function NewScenarioButton() {
Add Scenario Add Scenario
</StyledButton> </StyledButton>
<StyledButton onClick={onAutogenerate}> <StyledButton onClick={onAutogenerate}>
<Icon as={autogenerating ? Spinner : BsPlus} boxSize={6} mr={autogenerating ? 1 : 0} /> <Icon
as={autogenerating ? Spinner : BsPlus}
boxSize={autogenerating ? 4 : 6}
mr={autogenerating ? 2 : 0}
/>
Autogenerate Scenario Autogenerate Scenario
</StyledButton> </StyledButton>
</HStack> </HStack>

View File

@@ -6,11 +6,11 @@ import SyntaxHighlighter from "react-syntax-highlighter";
import { docco } from "react-syntax-highlighter/dist/cjs/styles/hljs"; import { docco } from "react-syntax-highlighter/dist/cjs/styles/hljs";
import stringify from "json-stringify-pretty-compact"; import stringify from "json-stringify-pretty-compact";
import { type ReactElement, useState, useEffect } from "react"; import { type ReactElement, useState, useEffect } from "react";
import { type ChatCompletion } from "openai/resources/chat";
import useSocket from "~/utils/useSocket"; import useSocket from "~/utils/useSocket";
import { OutputStats } from "./OutputStats"; import { OutputStats } from "./OutputStats";
import { ErrorHandler } from "./ErrorHandler"; import { ErrorHandler } from "./ErrorHandler";
import { CellOptions } from "./CellOptions"; import { CellOptions } from "./CellOptions";
import frontendModelProviders from "~/modelProviders/frontendModelProviders";
export default function OutputCell({ export default function OutputCell({
scenario, scenario,
@@ -33,18 +33,19 @@ export default function OutputCell({
if (!templateHasVariables) disabledReason = "Add a value to the scenario variables to see output"; if (!templateHasVariables) disabledReason = "Add a value to the scenario variables to see output";
// if (variant.config === null || Object.keys(variant.config).length === 0)
// disabledReason = "Save your prompt variant to see output";
const [refetchInterval, setRefetchInterval] = useState(0); const [refetchInterval, setRefetchInterval] = useState(0);
const { data: cell, isLoading: queryLoading } = api.scenarioVariantCells.get.useQuery( const { data: cell, isLoading: queryLoading } = api.scenarioVariantCells.get.useQuery(
{ scenarioId: scenario.id, variantId: variant.id }, { scenarioId: scenario.id, variantId: variant.id },
{ refetchInterval }, { refetchInterval },
); );
const { mutateAsync: hardRefetchMutate, isLoading: refetchingOutput } = const provider =
api.scenarioVariantCells.forceRefetch.useMutation(); frontendModelProviders[variant.modelProvider as keyof typeof frontendModelProviders];
const [hardRefetch] = useHandledAsyncCallback(async () => {
type OutputSchema = Parameters<typeof provider.normalizeOutput>[0];
const { mutateAsync: hardRefetchMutate } = api.scenarioVariantCells.forceRefetch.useMutation();
const [hardRefetch, hardRefetching] = useHandledAsyncCallback(async () => {
await hardRefetchMutate({ scenarioId: scenario.id, variantId: variant.id }); await hardRefetchMutate({ scenarioId: scenario.id, variantId: variant.id });
await utils.scenarioVariantCells.get.invalidate({ await utils.scenarioVariantCells.get.invalidate({
scenarioId: scenario.id, scenarioId: scenario.id,
@@ -55,20 +56,19 @@ export default function OutputCell({
}); });
}, [hardRefetchMutate, scenario.id, variant.id]); }, [hardRefetchMutate, scenario.id, variant.id]);
const fetchingOutput = queryLoading || refetchingOutput; const fetchingOutput = queryLoading || hardRefetching;
const awaitingOutput = const awaitingOutput =
!cell || !cell ||
cell.retrievalStatus === "PENDING" || cell.retrievalStatus === "PENDING" ||
cell.retrievalStatus === "IN_PROGRESS" || cell.retrievalStatus === "IN_PROGRESS" ||
refetchingOutput; hardRefetching;
useEffect(() => setRefetchInterval(awaitingOutput ? 1000 : 0), [awaitingOutput]); useEffect(() => setRefetchInterval(awaitingOutput ? 1000 : 0), [awaitingOutput]);
const modelOutput = cell?.modelOutput; const modelOutput = cell?.modelOutput;
// Disconnect from socket if we're not streaming anymore // Disconnect from socket if we're not streaming anymore
const streamedMessage = useSocket(cell?.streamingChannel); const streamedMessage = useSocket<OutputSchema>(cell?.streamingChannel);
const streamedContent = streamedMessage?.choices?.[0]?.message?.content;
if (!vars) return null; if (!vars) return null;
@@ -87,19 +87,13 @@ export default function OutputCell({
return <ErrorHandler cell={cell} refetchOutput={hardRefetch} />; return <ErrorHandler cell={cell} refetchOutput={hardRefetch} />;
} }
const response = modelOutput?.output as unknown as ChatCompletion; const normalizedOutput = modelOutput
const message = response?.choices?.[0]?.message; ? provider.normalizeOutput(modelOutput.output)
: streamedMessage
if (modelOutput && message?.function_call) { ? provider.normalizeOutput(streamedMessage)
const rawArgs = message.function_call.arguments ?? "null"; : null;
let parsedArgs: string;
try {
parsedArgs = JSON.parse(rawArgs);
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} catch (e: any) {
parsedArgs = `Failed to parse arguments as JSON: '${rawArgs}' ERROR: ${e.message as string}`;
}
if (modelOutput && normalizedOutput?.type === "json") {
return ( return (
<VStack <VStack
w="100%" w="100%"
@@ -110,7 +104,7 @@ export default function OutputCell({
justifyContent="space-between" justifyContent="space-between"
> >
<VStack w="full" flex={1} spacing={0}> <VStack w="full" flex={1} spacing={0}>
<CellOptions refetchingOutput={refetchingOutput} refetchOutput={hardRefetch} /> <CellOptions refetchingOutput={hardRefetching} refetchOutput={hardRefetch} />
<SyntaxHighlighter <SyntaxHighlighter
customStyle={{ overflowX: "unset", width: "100%", flex: 1 }} customStyle={{ overflowX: "unset", width: "100%", flex: 1 }}
language="json" language="json"
@@ -120,13 +114,7 @@ export default function OutputCell({
}} }}
wrapLines wrapLines
> >
{stringify( {stringify(normalizedOutput.value, { maxLength: 40 })}
{
function: message.function_call.name,
args: parsedArgs,
},
{ maxLength: 40 },
)}
</SyntaxHighlighter> </SyntaxHighlighter>
</VStack> </VStack>
<OutputStats modelOutput={modelOutput} scenario={scenario} /> <OutputStats modelOutput={modelOutput} scenario={scenario} />
@@ -134,13 +122,12 @@ export default function OutputCell({
); );
} }
const contentToDisplay = const contentToDisplay = (normalizedOutput?.type === "text" && normalizedOutput.value) || "";
message?.content ?? streamedContent ?? JSON.stringify(modelOutput?.output);
return ( return (
<VStack w="100%" h="100%" justifyContent="space-between" whiteSpace="pre-wrap"> <VStack w="100%" h="100%" justifyContent="space-between" whiteSpace="pre-wrap">
<VStack w="full" alignItems="flex-start" spacing={0}> <VStack w="full" alignItems="flex-start" spacing={0}>
<CellOptions refetchingOutput={refetchingOutput} refetchOutput={hardRefetch} /> <CellOptions refetchingOutput={hardRefetching} refetchOutput={hardRefetch} />
<Text>{contentToDisplay}</Text> <Text>{contentToDisplay}</Text>
</VStack> </VStack>
{modelOutput && <OutputStats modelOutput={modelOutput} scenario={scenario} />} {modelOutput && <OutputStats modelOutput={modelOutput} scenario={scenario} />}

View File

@@ -9,7 +9,7 @@ import { Box, Button, Flex, HStack, Icon, Spinner, Stack, Tooltip, VStack } from
import { cellPadding } from "../constants"; import { cellPadding } from "../constants";
import { BsX } from "react-icons/bs"; import { BsX } from "react-icons/bs";
import { RiDraggable } from "react-icons/ri"; import { RiDraggable } from "react-icons/ri";
import AutoResizeTextArea from "../AutoResizeTextArea"; import { FloatingLabelInput } from "./FloatingLabelInput";
export default function ScenarioEditor({ export default function ScenarioEditor({
scenario, scenario,
@@ -74,9 +74,9 @@ export default function ScenarioEditor({
return ( return (
<HStack <HStack
alignItems="flex-start" alignItems="flex-start"
pr={cellPadding.x} px={cellPadding.x}
py={cellPadding.y} py={cellPadding.y}
pl={canModify ? 0 : cellPadding.x} spacing={0}
height="100%" height="100%"
draggable={!variableInputHovered} draggable={!variableInputHovered}
onDragStart={(e) => { onDragStart={(e) => {
@@ -96,42 +96,43 @@ export default function ScenarioEditor({
onDrop={onReorder} onDrop={onReorder}
backgroundColor={isDragTarget ? "gray.100" : "transparent"} backgroundColor={isDragTarget ? "gray.100" : "transparent"}
> >
{canModify && ( {canModify && props.canHide && (
<Stack alignSelf="flex-start" opacity={props.hovered ? 1 : 0} spacing={0}> <Stack
{props.canHide && ( alignSelf="flex-start"
<> opacity={props.hovered ? 1 : 0}
<Tooltip label="Hide scenario" hasArrow> spacing={0}
{/* for some reason the tooltip can't position itself properly relative to the icon without the wrapping box */} ml={-cellPadding.x}
<Button >
variant="unstyled" <Tooltip label="Hide scenario" hasArrow>
color="gray.400" {/* for some reason the tooltip can't position itself properly relative to the icon without the wrapping box */}
height="unset" <Button
width="unset" variant="unstyled"
minW="unset" color="gray.400"
onClick={onHide} height="unset"
_hover={{ width="unset"
color: "gray.800", minW="unset"
cursor: "pointer", onClick={onHide}
}} _hover={{
> color: "gray.800",
<Icon as={hidingInProgress ? Spinner : BsX} boxSize={6} /> cursor: "pointer",
</Button> }}
</Tooltip> >
<Icon <Icon as={hidingInProgress ? Spinner : BsX} boxSize={hidingInProgress ? 4 : 6} />
as={RiDraggable} </Button>
boxSize={6} </Tooltip>
color="gray.400" <Icon
_hover={{ color: "gray.800", cursor: "pointer" }} as={RiDraggable}
/> boxSize={6}
</> color="gray.400"
)} _hover={{ color: "gray.800", cursor: "pointer" }}
/>
</Stack> </Stack>
)} )}
{variableLabels.length === 0 ? ( {variableLabels.length === 0 ? (
<Box color="gray.500">{vars.data ? "No scenario variables configured" : "Loading..."}</Box> <Box color="gray.500">{vars.data ? "No scenario variables configured" : "Loading..."}</Box>
) : ( ) : (
<VStack spacing={1}> <VStack spacing={4} flex={1} py={2}>
{variableLabels.map((key) => { {variableLabels.map((key) => {
const value = values[key] ?? ""; const value = values[key] ?? "";
const layoutDirection = value.length > 20 ? "column" : "row"; const layoutDirection = value.length > 20 ? "column" : "row";
@@ -143,31 +144,14 @@ export default function ScenarioEditor({
flexWrap="wrap" flexWrap="wrap"
width="full" width="full"
> >
<Box <FloatingLabelInput
bgColor="blue.100" label={key}
color="blue.600"
px={1}
my="3px"
fontSize="xs"
fontWeight="bold"
>
{key}
</Box>
<AutoResizeTextArea
px={2}
py={1}
placeholder="empty"
borderRadius="sm"
fontSize="sm"
lineHeight={1.2}
value={value}
isDisabled={!canModify} isDisabled={!canModify}
_disabled={{ opacity: 1, cursor: "default" }} style={{ width: "100%" }}
value={value}
onChange={(e) => { onChange={(e) => {
setValues((prev) => ({ ...prev, [key]: e.target.value })); setValues((prev) => ({ ...prev, [key]: e.target.value }));
}} }}
maxH="32"
overflowY="auto"
onKeyDown={(e) => { onKeyDown={(e) => {
if (e.key === "Enter" && (e.metaKey || e.ctrlKey)) { if (e.key === "Enter" && (e.metaKey || e.ctrlKey)) {
e.preventDefault(); e.preventDefault();
@@ -175,12 +159,6 @@ export default function ScenarioEditor({
onSave(); onSave();
} }
}} }}
resize="none"
overflow="hidden"
flex={layoutDirection === "row" ? 1 : undefined}
borderColor={hasChanged ? "blue.300" : "transparent"}
_hover={{ borderColor: "gray.300" }}
_focus={{ borderColor: "blue.500", outline: "none", bg: "white" }}
onMouseEnter={() => setVariableInputHovered(true)} onMouseEnter={() => setVariableInputHovered(true)}
onMouseLeave={() => setVariableInputHovered(false)} onMouseLeave={() => setVariableInputHovered(false)}
/> />

View File

@@ -48,13 +48,11 @@ export default function VariantEditor(props: { variant: PromptVariant }) {
if (!model) return; if (!model) return;
// Make sure the user defined the prompt with the string "prompt\w*=" somewhere // Make sure the user defined the prompt with the string "prompt\w*=" somewhere
const promptRegex = /prompt\s*=/; const promptRegex = /definePrompt\(/;
if (!promptRegex.test(currentFn)) { if (!promptRegex.test(currentFn)) {
console.log("no prompt");
console.log(currentFn);
toast({ toast({
title: "Missing prompt", title: "Missing prompt",
description: "Please define the prompt (eg. `prompt = { ...`).", description: "Please define the prompt (eg. `definePrompt(...`",
status: "error", status: "error",
}); });
return; return;

View File

@@ -4,5 +4,5 @@ export const stickyHeaderStyle: SystemStyleObject = {
position: "sticky", position: "sticky",
top: "0", top: "0",
backgroundColor: "#fff", backgroundColor: "#fff",
zIndex: 1, zIndex: 10,
}; };

View File

@@ -1,44 +1,58 @@
import { HStack, VStack } from "@chakra-ui/react"; import { type StackProps, VStack, useBreakpointValue } from "@chakra-ui/react";
import React from "react"; import React from "react";
import DiffViewer, { DiffMethod } from "react-diff-viewer"; import DiffViewer, { DiffMethod } from "react-diff-viewer";
import Prism from "prismjs"; import Prism from "prismjs";
import "prismjs/components/prism-javascript"; import "prismjs/components/prism-javascript";
import "prismjs/themes/prism.css"; // choose a theme you like import "prismjs/themes/prism.css"; // choose a theme you like
const highlightSyntax = (str: string) => {
let highlighted;
try {
highlighted = Prism.highlight(str, Prism.languages.javascript as Prism.Grammar, "javascript");
} catch (e) {
console.error("Error highlighting:", e);
highlighted = str;
}
return <pre style={{ display: "inline" }} dangerouslySetInnerHTML={{ __html: highlighted }} />;
};
const CompareFunctions = ({ const CompareFunctions = ({
originalFunction, originalFunction,
newFunction = "", newFunction = "",
leftTitle = "Original",
rightTitle = "Modified",
...props
}: { }: {
originalFunction: string; originalFunction: string;
newFunction?: string; newFunction?: string;
}) => { leftTitle?: string;
console.log("newFunction", newFunction); rightTitle?: string;
const highlightSyntax = (str: string) => { } & StackProps) => {
let highlighted; const showSplitView = useBreakpointValue(
try { {
highlighted = Prism.highlight(str, Prism.languages.javascript as Prism.Grammar, "javascript"); base: false,
} catch (e) { md: true,
console.error("Error highlighting:", e); },
highlighted = str; {
} fallback: "base",
return <pre style={{ display: "inline" }} dangerouslySetInnerHTML={{ __html: highlighted }} />; },
}; );
return ( return (
<HStack w="full" spacing={5}> <VStack w="full" spacing={4} fontSize={12} lineHeight={1} overflowY="auto" {...props}>
<VStack w="full" spacing={4} maxH="65vh" fontSize={12} lineHeight={1} overflowY="auto"> <DiffViewer
<DiffViewer oldValue={originalFunction}
oldValue={originalFunction} newValue={newFunction || originalFunction}
newValue={newFunction || originalFunction} splitView={showSplitView}
splitView={true} hideLineNumbers={!showSplitView}
hideLineNumbers={true} leftTitle={leftTitle}
leftTitle="Original" rightTitle={rightTitle}
rightTitle={newFunction ? "Modified" : "Unmodified"} disableWordDiff={true}
disableWordDiff={true} compareMethod={DiffMethod.CHARS}
compareMethod={DiffMethod.CHARS} renderContent={highlightSyntax}
renderContent={highlightSyntax} showDiffOnly={false}
/> />
</VStack> </VStack>
</HStack>
); );
}; };

View File

@@ -0,0 +1,74 @@
import { Button, Spinner, InputGroup, InputRightElement, Icon, HStack } from "@chakra-ui/react";
import { IoMdSend } from "react-icons/io";
import AutoResizeTextArea from "../AutoResizeTextArea";
export const CustomInstructionsInput = ({
instructions,
setInstructions,
loading,
onSubmit,
}: {
instructions: string;
setInstructions: (instructions: string) => void;
loading: boolean;
onSubmit: () => void;
}) => {
return (
<InputGroup
size="md"
w="full"
maxW="600"
boxShadow="0 0 40px 4px rgba(0, 0, 0, 0.1);"
borderRadius={8}
alignItems="center"
colorScheme="orange"
>
<AutoResizeTextArea
value={instructions}
onChange={(e) => setInstructions(e.target.value)}
onKeyDown={(e) => {
if (e.key === "Enter" && !e.metaKey && !e.ctrlKey && !e.shiftKey) {
e.preventDefault();
e.currentTarget.blur();
onSubmit();
}
}}
placeholder="Send custom instructions"
py={4}
pl={4}
pr={12}
colorScheme="orange"
borderColor="gray.300"
borderWidth={1}
_hover={{
borderColor: "gray.300",
}}
_focus={{
borderColor: "gray.300",
}}
isDisabled={loading}
/>
<HStack></HStack>
<InputRightElement width="8" height="full">
<Button
h="8"
w="8"
minW="unset"
size="sm"
onClick={() => onSubmit()}
variant={instructions ? "solid" : "ghost"}
mr={4}
borderRadius="8"
bgColor={instructions ? "orange.400" : "transparent"}
colorScheme="orange"
>
{loading ? (
<Spinner boxSize={4} />
) : (
<Icon as={IoMdSend} color={instructions ? "white" : "gray.500"} boxSize={5} />
)}
</Button>
</InputRightElement>
</InputGroup>
);
};

View File

@@ -0,0 +1,64 @@
import { HStack, Icon, Heading, Text, VStack, GridItem } from "@chakra-ui/react";
import { type IconType } from "react-icons";
import { refineOptions, type RefineOptionLabel } from "./refineOptions";
export const RefineOption = ({
label,
activeLabel,
icon,
onClick,
loading,
}: {
label: RefineOptionLabel;
activeLabel: RefineOptionLabel | undefined;
icon: IconType;
onClick: (label: RefineOptionLabel) => void;
loading: boolean;
}) => {
const isActive = activeLabel === label;
const desciption = refineOptions[label].description;
return (
<GridItem w="80" h="44">
<VStack
w="full"
h="full"
onClick={() => {
!loading && onClick(label);
}}
borderColor={isActive ? "blue.500" : "gray.200"}
borderWidth={2}
borderRadius={16}
padding={6}
backgroundColor="gray.50"
_hover={
loading
? undefined
: {
backgroundColor: "gray.100",
}
}
spacing={8}
boxShadow="0 0 40px 4px rgba(0, 0, 0, 0.1);"
cursor="pointer"
opacity={loading ? 0.5 : 1}
>
<HStack cursor="pointer" spacing={6} fontSize="sm" fontWeight="medium" color="gray.500">
<Icon as={icon} boxSize={12} />
<Heading size="md" fontFamily="inconsolata, monospace">
{label}
</Heading>
</HStack>
<Text
fontSize="sm"
color="gray.500"
flexWrap="wrap"
wordBreak="break-word"
overflowWrap="break-word"
>
{desciption}
</Text>
</VStack>
</GridItem>
);
};

View File

@@ -11,13 +11,21 @@ import {
Text, Text,
Spinner, Spinner,
HStack, HStack,
Icon,
SimpleGrid,
} from "@chakra-ui/react"; } from "@chakra-ui/react";
import { BsStars } from "react-icons/bs";
import { VscJson } from "react-icons/vsc";
import { TfiThought } from "react-icons/tfi";
import { api } from "~/utils/api"; import { api } from "~/utils/api";
import { useHandledAsyncCallback } from "~/utils/hooks"; import { useHandledAsyncCallback } from "~/utils/hooks";
import { type PromptVariant } from "@prisma/client"; import { type PromptVariant } from "@prisma/client";
import { useState } from "react"; import { useState } from "react";
import AutoResizeTextArea from "../AutoResizeTextArea";
import CompareFunctions from "./CompareFunctions"; import CompareFunctions from "./CompareFunctions";
import { CustomInstructionsInput } from "./CustomInstructionsInput";
import { type RefineOptionLabel, refineOptions } from "./refineOptions";
import { RefineOption } from "./RefineOption";
import { isObject, isString } from "lodash-es";
export const RefinePromptModal = ({ export const RefinePromptModal = ({
variant, variant,
@@ -28,22 +36,36 @@ export const RefinePromptModal = ({
}) => { }) => {
const utils = api.useContext(); const utils = api.useContext();
const { mutateAsync: getRefinedPromptMutateAsync, data: refinedPromptFn } = const { mutateAsync: getModifiedPromptMutateAsync, data: refinedPromptFn } =
api.promptVariants.getRefinedPromptFn.useMutation(); api.promptVariants.getModifiedPromptFn.useMutation();
const [instructions, setInstructions] = useState<string>(""); const [instructions, setInstructions] = useState<string>("");
const [getRefinedPromptFn, refiningInProgress] = useHandledAsyncCallback(async () => { const [activeRefineOptionLabel, setActiveRefineOptionLabel] = useState<
if (!variant.experimentId) return; RefineOptionLabel | undefined
await getRefinedPromptMutateAsync({ >(undefined);
id: variant.id,
instructions, const [getModifiedPromptFn, modificationInProgress] = useHandledAsyncCallback(
}); async (label?: RefineOptionLabel) => {
}, [getRefinedPromptMutateAsync, onClose, variant, instructions]); if (!variant.experimentId) return;
const updatedInstructions = label ? refineOptions[label].instructions : instructions;
setActiveRefineOptionLabel(label);
await getModifiedPromptMutateAsync({
id: variant.id,
instructions: updatedInstructions,
});
},
[getModifiedPromptMutateAsync, onClose, variant, instructions, setActiveRefineOptionLabel],
);
const replaceVariantMutation = api.promptVariants.replaceVariant.useMutation(); const replaceVariantMutation = api.promptVariants.replaceVariant.useMutation();
const [replaceVariant, replacementInProgress] = useHandledAsyncCallback(async () => { const [replaceVariant, replacementInProgress] = useHandledAsyncCallback(async () => {
if (!variant.experimentId || !refinedPromptFn) return; if (
!variant.experimentId ||
!refinedPromptFn ||
(isObject(refinedPromptFn) && "status" in refinedPromptFn)
)
return;
await replaceVariantMutation.mutateAsync({ await replaceVariantMutation.mutateAsync({
id: variant.id, id: variant.id,
constructFn: refinedPromptFn, constructFn: refinedPromptFn,
@@ -53,45 +75,64 @@ export const RefinePromptModal = ({
}, [replaceVariantMutation, variant, onClose, refinedPromptFn]); }, [replaceVariantMutation, variant, onClose, refinedPromptFn]);
return ( return (
<Modal isOpen onClose={onClose} size={{ base: "xl", sm: "2xl", md: "7xl" }}> <Modal
isOpen
onClose={onClose}
size={{ base: "xl", sm: "2xl", md: "3xl", lg: "5xl", xl: "7xl" }}
>
<ModalOverlay /> <ModalOverlay />
<ModalContent w={1200}> <ModalContent w={1200}>
<ModalHeader>Refine Your Prompt</ModalHeader> <ModalHeader>
<HStack>
<Icon as={BsStars} />
<Text>Refine with GPT-4</Text>
</HStack>
</ModalHeader>
<ModalCloseButton /> <ModalCloseButton />
<ModalBody maxW="unset"> <ModalBody maxW="unset">
<VStack spacing={8}> <VStack spacing={8}>
<HStack w="full"> <VStack spacing={4}>
<AutoResizeTextArea <SimpleGrid columns={{ base: 1, md: 2 }} spacing={8}>
value={instructions} <RefineOption
onChange={(e) => setInstructions(e.target.value)} label="Convert to function call"
onKeyDown={(e) => { activeLabel={activeRefineOptionLabel}
if (e.key === "Enter" && !e.metaKey && !e.ctrlKey && !e.shiftKey) { icon={VscJson}
e.preventDefault(); onClick={getModifiedPromptFn}
e.currentTarget.blur(); loading={modificationInProgress}
getRefinedPromptFn(); />
} <RefineOption
}} label="Add chain of thought"
placeholder="Use chain of thought" activeLabel={activeRefineOptionLabel}
icon={TfiThought}
onClick={getModifiedPromptFn}
loading={modificationInProgress}
/>
</SimpleGrid>
<HStack>
<Text color="gray.500">or</Text>
</HStack>
<CustomInstructionsInput
instructions={instructions}
setInstructions={setInstructions}
loading={modificationInProgress}
onSubmit={getModifiedPromptFn}
/> />
<Button onClick={getRefinedPromptFn}> </VStack>
{refiningInProgress ? <Spinner boxSize={4} /> : <Text>Submit</Text>}
</Button>
</HStack>
<CompareFunctions <CompareFunctions
originalFunction={variant.constructFn} originalFunction={variant.constructFn}
newFunction={refinedPromptFn} newFunction={isString(refinedPromptFn) ? refinedPromptFn : undefined}
maxH="40vh"
/> />
</VStack> </VStack>
</ModalBody> </ModalBody>
<ModalFooter> <ModalFooter>
<HStack spacing={4}> <HStack spacing={4}>
<Button onClick={onClose}>Cancel</Button>
<Button <Button
colorScheme="blue" colorScheme="blue"
onClick={replaceVariant} onClick={replaceVariant}
minW={24} minW={24}
disabled={!refinedPromptFn} isDisabled={replacementInProgress || !refinedPromptFn}
> >
{replacementInProgress ? <Spinner boxSize={4} /> : <Text>Accept</Text>} {replacementInProgress ? <Spinner boxSize={4} /> : <Text>Accept</Text>}
</Button> </Button>

View File

@@ -0,0 +1,237 @@
// Super hacky, but we'll redo the organization when we have more models
export type RefineOptionLabel = "Add chain of thought" | "Convert to function call";
export const refineOptions: Record<
RefineOptionLabel,
{ description: string; instructions: string }
> = {
"Add chain of thought": {
description: "Asking the model to plan its answer can increase accuracy.",
instructions: `Adding chain of thought means asking the model to think about its answer before it gives it to you. This is useful for getting more accurate answers. Do not add an assistant message.
This is what a prompt looks like before adding chain of thought:
definePrompt("openai/ChatCompletion", {
model: "gpt-4",
stream: true,
messages: [
{
role: "system",
content: \`Evaluate sentiment.\`,
},
{
role: "user",
content: \`This is the user's message: \${scenario.user_message}. Return "positive" or "negative" or "neutral"\`,
},
],
});
This is what one looks like after adding chain of thought:
definePrompt("openai/ChatCompletion", {
model: "gpt-4",
stream: true,
messages: [
{
role: "system",
content: \`Evaluate sentiment.\`,
},
{
role: "user",
content: \`This is the user's message: \${scenario.user_message}. Return "positive" or "negative" or "neutral". Explain your answer before you give a score, then return the score on a new line.\`,
},
],
});
Here's another example:
Before:
definePrompt("openai/ChatCompletion", {
model: "gpt-3.5-turbo",
messages: [
{
role: "user",
content: \`Title: \${scenario.title}
Body: \${scenario.body}
Need: \${scenario.need}
Rate likelihood on 1-3 scale.\`,
},
],
temperature: 0,
functions: [
{
name: "score_post",
parameters: {
type: "object",
properties: {
score: {
type: "number",
},
},
},
},
],
function_call: {
name: "score_post",
},
});
After:
definePrompt("openai/ChatCompletion", {
model: "gpt-3.5-turbo",
messages: [
{
role: "user",
content: \`Title: \${scenario.title}
Body: \${scenario.body}
Need: \${scenario.need}
Rate likelihood on 1-3 scale. Provide an explanation, but always provide a score afterward.\`,
},
],
temperature: 0,
functions: [
{
name: "score_post",
parameters: {
type: "object",
properties: {
explanation: {
type: "string",
}
score: {
type: "number",
},
},
},
},
],
function_call: {
name: "score_post",
},
});
Add chain of thought to the original prompt.`,
},
"Convert to function call": {
description: "Use function calls to get output from the model in a more structured way.",
instructions: `OpenAI functions are a specialized way for an LLM to return output.
This is what a prompt looks like before adding a function:
definePrompt("openai/ChatCompletion", {
model: "gpt-4",
stream: true,
messages: [
{
role: "system",
content: \`Evaluate sentiment.\`,
},
{
role: "user",
content: \`This is the user's message: \${scenario.user_message}. Return "positive" or "negative" or "neutral"\`,
},
],
});
This is what one looks like after adding a function:
definePrompt("openai/ChatCompletion", {
model: "gpt-4",
stream: true,
messages: [
{
role: "system",
content: "Evaluate sentiment.",
},
{
role: "user",
content: scenario.user_message,
},
],
functions: [
{
name: "extract_sentiment",
parameters: {
type: "object", // parameters must always be an object with a properties key
properties: { // properties key is required
sentiment: {
type: "string",
description: "one of positive/negative/neutral",
},
},
},
},
],
function_call: {
name: "extract_sentiment",
},
});
Here's another example of adding a function:
Before:
definePrompt("openai/ChatCompletion", {
model: "gpt-3.5-turbo",
messages: [
{
role: "user",
content: \`Here is the title and body of a reddit post I am interested in:
title: \${scenario.title}
body: \${scenario.body}
On a scale from 1 to 3, how likely is it that the person writing this post has the following need? If you are not sure, make your best guess, or answer 1.
Need: \${scenario.need}
Answer one integer between 1 and 3.\`,
},
],
temperature: 0,
});
After:
definePrompt("openai/ChatCompletion", {
model: "gpt-3.5-turbo",
messages: [
{
role: "user",
content: \`Title: \${scenario.title}
Body: \${scenario.body}
Need: \${scenario.need}
Rate likelihood on 1-3 scale.\`,
},
],
temperature: 0,
functions: [
{
name: "score_post",
parameters: {
type: "object",
properties: {
score: {
type: "number",
},
},
},
},
],
function_call: {
name: "score_post",
},
});
Add an OpenAI function that takes one or more nested parameters that match the expected output from this prompt.`,
},
};

View File

@@ -7,7 +7,7 @@ import {
SimpleGrid, SimpleGrid,
Link, Link,
} from "@chakra-ui/react"; } from "@chakra-ui/react";
import { modelStats } from "~/server/modelStats"; import { modelStats } from "~/modelProviders/modelStats";
import { type SupportedModel } from "~/server/types"; import { type SupportedModel } from "~/server/types";
export const ModelStatsCard = ({ label, model }: { label: string; model: SupportedModel }) => { export const ModelStatsCard = ({ label, model }: { label: string; model: SupportedModel }) => {

View File

@@ -10,46 +10,78 @@ import {
VStack, VStack,
Text, Text,
Spinner, Spinner,
HStack,
Icon,
} from "@chakra-ui/react"; } from "@chakra-ui/react";
import { RiExchangeFundsFill } from "react-icons/ri";
import { useState } from "react"; import { useState } from "react";
import { type SupportedModel } from "~/server/types"; import { type SupportedModel } from "~/server/types";
import { ModelStatsCard } from "./ModelStatsCard"; import { ModelStatsCard } from "./ModelStatsCard";
import { SelectModelSearch } from "./SelectModelSearch"; import { SelectModelSearch } from "./SelectModelSearch";
import { api } from "~/utils/api"; import { api } from "~/utils/api";
import { useExperiment, useHandledAsyncCallback } from "~/utils/hooks"; import { useExperiment, useHandledAsyncCallback } from "~/utils/hooks";
import CompareFunctions from "../RefinePromptModal/CompareFunctions";
import { type PromptVariant } from "@prisma/client";
import { isObject, isString } from "lodash-es";
export const SelectModelModal = ({ export const SelectModelModal = ({
originalModel, variant,
variantId,
onClose, onClose,
}: { }: {
originalModel: SupportedModel; variant: PromptVariant;
variantId: string;
onClose: () => void; onClose: () => void;
}) => { }) => {
const originalModel = variant.model as SupportedModel;
const [selectedModel, setSelectedModel] = useState<SupportedModel>(originalModel); const [selectedModel, setSelectedModel] = useState<SupportedModel>(originalModel);
const [convertedModel, setConvertedModel] = useState<SupportedModel | undefined>(undefined);
const utils = api.useContext(); const utils = api.useContext();
const experiment = useExperiment(); const experiment = useExperiment();
const createMutation = api.promptVariants.create.useMutation(); const { mutateAsync: getModifiedPromptMutateAsync, data: modifiedPromptFn } =
api.promptVariants.getModifiedPromptFn.useMutation();
const [createNewVariant, creationInProgress] = useHandledAsyncCallback(async () => { const [getModifiedPromptFn, modificationInProgress] = useHandledAsyncCallback(async () => {
if (!experiment?.data?.id) return; if (!experiment) return;
await createMutation.mutateAsync({
experimentId: experiment?.data?.id, await getModifiedPromptMutateAsync({
variantId, id: variant.id,
newModel: selectedModel, newModel: selectedModel,
}); });
setConvertedModel(selectedModel);
}, [getModifiedPromptMutateAsync, onClose, experiment, variant, selectedModel]);
const replaceVariantMutation = api.promptVariants.replaceVariant.useMutation();
const [replaceVariant, replacementInProgress] = useHandledAsyncCallback(async () => {
if (
!variant.experimentId ||
!modifiedPromptFn ||
(isObject(modifiedPromptFn) && "status" in modifiedPromptFn)
)
return;
await replaceVariantMutation.mutateAsync({
id: variant.id,
constructFn: modifiedPromptFn,
});
await utils.promptVariants.list.invalidate(); await utils.promptVariants.list.invalidate();
onClose(); onClose();
}, [createMutation, experiment?.data?.id, variantId, onClose]); }, [replaceVariantMutation, variant, onClose, modifiedPromptFn]);
return ( return (
<Modal isOpen onClose={onClose} size={{ base: "xl", sm: "2xl", md: "3xl" }}> <Modal
isOpen
onClose={onClose}
size={{ base: "xl", sm: "2xl", md: "3xl", lg: "5xl", xl: "7xl" }}
>
<ModalOverlay /> <ModalOverlay />
<ModalContent w={1200}> <ModalContent w={1200}>
<ModalHeader>Select a New Model</ModalHeader> <ModalHeader>
<HStack>
<Icon as={RiExchangeFundsFill} />
<Text>Change Model</Text>
</HStack>
</ModalHeader>
<ModalCloseButton /> <ModalCloseButton />
<ModalBody maxW="unset"> <ModalBody maxW="unset">
<VStack spacing={8}> <VStack spacing={8}>
@@ -58,18 +90,36 @@ export const SelectModelModal = ({
<ModelStatsCard label="New Model" model={selectedModel} /> <ModelStatsCard label="New Model" model={selectedModel} />
)} )}
<SelectModelSearch selectedModel={selectedModel} setSelectedModel={setSelectedModel} /> <SelectModelSearch selectedModel={selectedModel} setSelectedModel={setSelectedModel} />
{isString(modifiedPromptFn) && (
<CompareFunctions
originalFunction={variant.constructFn}
newFunction={modifiedPromptFn}
leftTitle={originalModel}
rightTitle={convertedModel}
/>
)}
</VStack> </VStack>
</ModalBody> </ModalBody>
<ModalFooter> <ModalFooter>
<Button <HStack>
colorScheme="blue" <Button
onClick={createNewVariant} colorScheme="gray"
minW={24} onClick={getModifiedPromptFn}
disabled={originalModel === selectedModel} minW={24}
> isDisabled={originalModel === selectedModel || modificationInProgress}
{creationInProgress ? <Spinner boxSize={4} /> : <Text>Continue</Text>} >
</Button> {modificationInProgress ? <Spinner boxSize={4} /> : <Text>Convert</Text>}
</Button>
<Button
colorScheme="blue"
onClick={replaceVariant}
minW={24}
isDisabled={!convertedModel || modificationInProgress || replacementInProgress}
>
{replacementInProgress ? <Spinner boxSize={4} /> : <Text>Accept</Text>}
</Button>
</HStack>
</ModalFooter> </ModalFooter>
</ModalContent> </ModalContent>
</Modal> </Modal>

View File

@@ -12,14 +12,12 @@ import {
Text, Text,
Spinner, Spinner,
} from "@chakra-ui/react"; } from "@chakra-ui/react";
import { BsFillTrashFill, BsGear } from "react-icons/bs"; import { BsFillTrashFill, BsGear, BsStars } from "react-icons/bs";
import { FaRegClone } from "react-icons/fa"; import { FaRegClone } from "react-icons/fa";
import { AiOutlineDiff } from "react-icons/ai";
import { useState } from "react"; import { useState } from "react";
import { RefinePromptModal } from "../RefinePromptModal/RefinePromptModal"; import { RefinePromptModal } from "../RefinePromptModal/RefinePromptModal";
import { RiExchangeFundsFill } from "react-icons/ri"; import { RiExchangeFundsFill } from "react-icons/ri";
import { SelectModelModal } from "../SelectModelModal/SelectModelModal"; import { SelectModelModal } from "../SelectModelModal/SelectModelModal";
import { type SupportedModel } from "~/server/types";
export default function VariantHeaderMenuButton({ export default function VariantHeaderMenuButton({
variant, variant,
@@ -79,7 +77,7 @@ export default function VariantHeaderMenuButton({
Change Model Change Model
</MenuItem> </MenuItem>
<MenuItem <MenuItem
icon={<Icon as={AiOutlineDiff} boxSize={5} />} icon={<Icon as={BsStars} boxSize={5} />}
onClick={() => setRefinePromptModalOpen(true)} onClick={() => setRefinePromptModalOpen(true)}
> >
Refine Refine
@@ -100,11 +98,7 @@ export default function VariantHeaderMenuButton({
</MenuList> </MenuList>
</Menu> </Menu>
{selectModelModalOpen && ( {selectModelModalOpen && (
<SelectModelModal <SelectModelModal variant={variant} onClose={() => setSelectModelModalOpen(false)} />
originalModel={variant.model as SupportedModel}
variantId={variant.id}
onClose={() => setSelectModelModalOpen(false)}
/>
)} )}
{refinePromptModalOpen && ( {refinePromptModalOpen && (
<RefinePromptModal variant={variant} onClose={() => setRefinePromptModalOpen(false)} /> <RefinePromptModal variant={variant} onClose={() => setRefinePromptModalOpen(false)} />

View File

@@ -17,6 +17,7 @@ export const env = createEnv({
.transform((val) => val.toLowerCase() === "true"), .transform((val) => val.toLowerCase() === "true"),
GITHUB_CLIENT_ID: z.string().min(1), GITHUB_CLIENT_ID: z.string().min(1),
GITHUB_CLIENT_SECRET: z.string().min(1), GITHUB_CLIENT_SECRET: z.string().min(1),
REPLICATE_API_TOKEN: z.string().min(1),
}, },
/** /**
@@ -42,6 +43,7 @@ export const env = createEnv({
NEXT_PUBLIC_SOCKET_URL: process.env.NEXT_PUBLIC_SOCKET_URL, NEXT_PUBLIC_SOCKET_URL: process.env.NEXT_PUBLIC_SOCKET_URL,
GITHUB_CLIENT_ID: process.env.GITHUB_CLIENT_ID, GITHUB_CLIENT_ID: process.env.GITHUB_CLIENT_ID,
GITHUB_CLIENT_SECRET: process.env.GITHUB_CLIENT_SECRET, GITHUB_CLIENT_SECRET: process.env.GITHUB_CLIENT_SECRET,
REPLICATE_API_TOKEN: process.env.REPLICATE_API_TOKEN,
}, },
/** /**
* Run `build` or `dev` with `SKIP_ENV_VALIDATION` to skip env validation. * Run `build` or `dev` with `SKIP_ENV_VALIDATION` to skip env validation.

View File

@@ -0,0 +1,15 @@
import openaiChatCompletionFrontend from "./openai-ChatCompletion/frontend";
import replicateLlama2Frontend from "./replicate-llama2/frontend";
import { type SupportedProvider, type FrontendModelProvider } from "./types";
// TODO: make sure we get a typescript error if you forget to add a provider here
// Keep attributes here that need to be accessible from the frontend. We can't
// just include them in the default `modelProviders` object because it has some
// transient dependencies that can only be imported on the server.
const frontendModelProviders: Record<SupportedProvider, FrontendModelProvider<any, any>> = {
"openai/ChatCompletion": openaiChatCompletionFrontend,
"replicate/llama2": replicateLlama2Frontend,
};
export default frontendModelProviders;

View File

@@ -0,0 +1,36 @@
import { type JSONSchema4Object } from "json-schema";
import modelProviders from "./modelProviders";
import { compile } from "json-schema-to-typescript";
import dedent from "dedent";
export default async function generateTypes() {
const combinedSchema = {
type: "object",
properties: {} as Record<string, JSONSchema4Object>,
};
Object.entries(modelProviders).forEach(([id, provider]) => {
combinedSchema.properties[id] = provider.inputSchema;
});
Object.entries(modelProviders).forEach(([id, provider]) => {
combinedSchema.properties[id] = provider.inputSchema;
});
const promptTypes = (
await compile(combinedSchema as JSONSchema4Object, "PromptTypes", {
additionalProperties: false,
bannerComment: dedent`
/**
* This type map defines the input types for each model provider.
*/
`,
})
).replace(/export interface PromptTypes/g, "interface PromptTypes");
return dedent`
${promptTypes}
declare function definePrompt<T extends keyof PromptTypes>(modelProvider: T, input: PromptTypes[T])
`;
}

View File

@@ -0,0 +1,10 @@
import openaiChatCompletion from "./openai-ChatCompletion";
import replicateLlama2 from "./replicate-llama2";
import { type SupportedProvider, type ModelProvider } from "./types";
const modelProviders: Record<SupportedProvider, ModelProvider<any, any, any>> = {
"openai/ChatCompletion": openaiChatCompletion,
"replicate/llama2": replicateLlama2,
};
export default modelProviders;

View File

@@ -1,4 +1,4 @@
import { type SupportedModel } from "./types"; import { type SupportedModel } from "../server/types";
interface ModelStats { interface ModelStats {
contextLength: number; contextLength: number;

View File

@@ -0,0 +1,77 @@
/* eslint-disable @typescript-eslint/no-var-requires */
import YAML from "yaml";
import fs from "fs";
import path from "path";
import { openapiSchemaToJsonSchema } from "@openapi-contrib/openapi-schema-to-json-schema";
import $RefParser from "@apidevtools/json-schema-ref-parser";
import { type JSONObject } from "superjson/dist/types";
import assert from "assert";
import { type JSONSchema4Object } from "json-schema";
import { isObject } from "lodash-es";
// @ts-expect-error for some reason missing from types
import parserEstree from "prettier/plugins/estree";
import parserBabel from "prettier/plugins/babel";
import prettier from "prettier/standalone";
const OPENAPI_URL =
"https://raw.githubusercontent.com/openai/openai-openapi/0c432eb66fd0c758fd8b9bd69db41c1096e5f4db/openapi.yaml";
// Fetch the openapi document
const response = await fetch(OPENAPI_URL);
const openApiYaml = await response.text();
// Parse the yaml document
let schema = YAML.parse(openApiYaml) as JSONObject;
schema = openapiSchemaToJsonSchema(schema);
const jsonSchema = await $RefParser.dereference(schema);
assert("components" in jsonSchema);
const completionRequestSchema = jsonSchema.components.schemas
.CreateChatCompletionRequest as JSONSchema4Object;
// We need to do a bit of surgery here since the Monaco editor doesn't like
// the fact that the schema says `model` can be either a string or an enum,
// and displays a warning in the editor. Let's stick with just an enum for
// now and drop the string option.
assert(
"properties" in completionRequestSchema &&
isObject(completionRequestSchema.properties) &&
"model" in completionRequestSchema.properties &&
isObject(completionRequestSchema.properties.model),
);
const modelProperty = completionRequestSchema.properties.model;
assert(
"oneOf" in modelProperty &&
Array.isArray(modelProperty.oneOf) &&
modelProperty.oneOf.length === 2 &&
isObject(modelProperty.oneOf[1]) &&
"enum" in modelProperty.oneOf[1],
"Expected model to have oneOf length of 2",
);
modelProperty.type = "string";
modelProperty.enum = modelProperty.oneOf[1].enum;
delete modelProperty["oneOf"];
// The default of "inf" confuses the Typescript generator, so can just remove it
assert(
"max_tokens" in completionRequestSchema.properties &&
isObject(completionRequestSchema.properties.max_tokens) &&
"default" in completionRequestSchema.properties.max_tokens,
);
delete completionRequestSchema.properties.max_tokens["default"];
// Get the directory of the current script
const currentDirectory = path.dirname(import.meta.url).replace("file://", "");
// Write the JSON schema to a file in the current directory
fs.writeFileSync(
path.join(currentDirectory, "input.schema.json"),
await prettier.format(JSON.stringify(completionRequestSchema, null, 2), {
parser: "json",
plugins: [parserBabel, parserEstree],
}),
);

View File

@@ -0,0 +1,185 @@
{
"type": "object",
"properties": {
"model": {
"description": "ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.",
"example": "gpt-3.5-turbo",
"type": "string",
"enum": [
"gpt-4",
"gpt-4-0613",
"gpt-4-32k",
"gpt-4-32k-0613",
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k-0613"
]
},
"messages": {
"description": "A list of messages comprising the conversation so far. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb).",
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"properties": {
"role": {
"type": "string",
"enum": ["system", "user", "assistant", "function"],
"description": "The role of the messages author. One of `system`, `user`, `assistant`, or `function`."
},
"content": {
"type": "string",
"description": "The contents of the message. `content` is required for all messages except assistant messages with function calls."
},
"name": {
"type": "string",
"description": "The name of the author of this message. `name` is required if role is `function`, and it should be the name of the function whose response is in the `content`. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters."
},
"function_call": {
"type": "object",
"description": "The name and arguments of a function that should be called, as generated by the model.",
"properties": {
"name": {
"type": "string",
"description": "The name of the function to call."
},
"arguments": {
"type": "string",
"description": "The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."
}
}
}
},
"required": ["role"]
}
},
"functions": {
"description": "A list of functions the model may generate JSON inputs for.",
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."
},
"description": {
"type": "string",
"description": "The description of what the function does."
},
"parameters": {
"type": "object",
"description": "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.",
"additionalProperties": true
}
},
"required": ["name"]
}
},
"function_call": {
"description": "Controls how the model responds to function calls. \"none\" means the model does not call a function, and responds to the end-user. \"auto\" means the model can pick between an end-user or calling a function. Specifying a particular function via `{\"name\":\\ \"my_function\"}` forces the model to call that function. \"none\" is the default when no functions are present. \"auto\" is the default if functions are present.",
"oneOf": [
{
"type": "string",
"enum": ["none", "auto"]
},
{
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "The name of the function to call."
}
},
"required": ["name"]
}
]
},
"temperature": {
"type": "number",
"minimum": 0,
"maximum": 2,
"default": 1,
"example": 1,
"nullable": true,
"description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n\nWe generally recommend altering this or `top_p` but not both.\n"
},
"top_p": {
"type": "number",
"minimum": 0,
"maximum": 1,
"default": 1,
"example": 1,
"nullable": true,
"description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\nWe generally recommend altering this or `temperature` but not both.\n"
},
"n": {
"type": "integer",
"minimum": 1,
"maximum": 128,
"default": 1,
"example": 1,
"nullable": true,
"description": "How many chat completion choices to generate for each input message."
},
"stream": {
"description": "If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb).\n",
"type": "boolean",
"nullable": true,
"default": false
},
"stop": {
"description": "Up to 4 sequences where the API will stop generating further tokens.\n",
"default": null,
"oneOf": [
{
"type": "string",
"nullable": true
},
{
"type": "array",
"minItems": 1,
"maxItems": 4,
"items": {
"type": "string"
}
}
]
},
"max_tokens": {
"description": "The maximum number of [tokens](/tokenizer) to generate in the chat completion.\n\nThe total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.\n",
"type": "integer"
},
"presence_penalty": {
"type": "number",
"default": 0,
"minimum": -2,
"maximum": 2,
"nullable": true,
"description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.\n\n[See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)\n"
},
"frequency_penalty": {
"type": "number",
"default": 0,
"minimum": -2,
"maximum": 2,
"nullable": true,
"description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.\n\n[See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)\n"
},
"logit_bias": {
"type": "object",
"x-oaiTypeLabel": "map",
"default": null,
"nullable": true,
"description": "Modify the likelihood of specified tokens appearing in the completion.\n\nAccepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n"
},
"user": {
"type": "string",
"example": "user-1234",
"description": "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n"
}
},
"required": ["model", "messages"]
}

View File

@@ -0,0 +1,64 @@
import { type JsonValue } from "type-fest";
import { type SupportedModel } from ".";
import { type FrontendModelProvider } from "../types";
import { type ChatCompletion } from "openai/resources/chat";
const frontendModelProvider: FrontendModelProvider<SupportedModel, ChatCompletion> = {
name: "OpenAI ChatCompletion",
models: {
"gpt-4-0613": {
name: "GPT-4",
learnMore: "https://openai.com/gpt-4",
},
"gpt-4-32k-0613": {
name: "GPT-4 32k",
learnMore: "https://openai.com/gpt-4",
},
"gpt-3.5-turbo-0613": {
name: "GPT-3.5 Turbo",
learnMore: "https://platform.openai.com/docs/guides/gpt/chat-completions-api",
},
"gpt-3.5-turbo-16k-0613": {
name: "GPT-3.5 Turbo 16k",
learnMore: "https://platform.openai.com/docs/guides/gpt/chat-completions-api",
},
},
normalizeOutput: (output) => {
const message = output.choices[0]?.message;
if (!message)
return {
type: "json",
value: output as unknown as JsonValue,
};
if (message.content) {
return {
type: "text",
value: message.content,
};
} else if (message.function_call) {
let args = message.function_call.arguments ?? "";
try {
args = JSON.parse(args);
} catch (e) {
// Ignore
}
return {
type: "json",
value: {
...message.function_call,
arguments: args,
},
};
} else {
return {
type: "json",
value: message as unknown as JsonValue,
};
}
},
};
export default frontendModelProvider;

View File

@@ -0,0 +1,142 @@
/* eslint-disable @typescript-eslint/no-unsafe-call */
import {
type ChatCompletionChunk,
type ChatCompletion,
type CompletionCreateParams,
} from "openai/resources/chat";
import { countOpenAIChatTokens } from "~/utils/countTokens";
import { type CompletionResponse } from "../types";
import { omit } from "lodash-es";
import { openai } from "~/server/utils/openai";
import { type OpenAIChatModel } from "~/server/types";
import { truthyFilter } from "~/utils/utils";
import { APIError } from "openai";
import { modelStats } from "../modelStats";
const mergeStreamedChunks = (
base: ChatCompletion | null,
chunk: ChatCompletionChunk,
): ChatCompletion => {
if (base === null) {
return mergeStreamedChunks({ ...chunk, choices: [] }, chunk);
}
const choices = [...base.choices];
for (const choice of chunk.choices) {
const baseChoice = choices.find((c) => c.index === choice.index);
if (baseChoice) {
baseChoice.finish_reason = choice.finish_reason ?? baseChoice.finish_reason;
baseChoice.message = baseChoice.message ?? { role: "assistant" };
if (choice.delta?.content)
baseChoice.message.content =
((baseChoice.message.content as string) ?? "") + (choice.delta.content ?? "");
if (choice.delta?.function_call) {
const fnCall = baseChoice.message.function_call ?? {};
fnCall.name =
((fnCall.name as string) ?? "") + ((choice.delta.function_call.name as string) ?? "");
fnCall.arguments =
((fnCall.arguments as string) ?? "") +
((choice.delta.function_call.arguments as string) ?? "");
}
} else {
choices.push({ ...omit(choice, "delta"), message: { role: "assistant", ...choice.delta } });
}
}
const merged: ChatCompletion = {
...base,
choices,
};
return merged;
};
export async function getCompletion(
input: CompletionCreateParams,
onStream: ((partialOutput: ChatCompletion) => void) | null,
): Promise<CompletionResponse<ChatCompletion>> {
const start = Date.now();
let finalCompletion: ChatCompletion | null = null;
let promptTokens: number | undefined = undefined;
let completionTokens: number | undefined = undefined;
try {
if (onStream) {
const resp = await openai.chat.completions.create(
{ ...input, stream: true },
{
maxRetries: 0,
},
);
for await (const part of resp) {
finalCompletion = mergeStreamedChunks(finalCompletion, part);
onStream(finalCompletion);
}
if (!finalCompletion) {
return {
type: "error",
message: "Streaming failed to return a completion",
autoRetry: false,
};
}
try {
promptTokens = countOpenAIChatTokens(
input.model as keyof typeof OpenAIChatModel,
input.messages,
);
completionTokens = countOpenAIChatTokens(
input.model as keyof typeof OpenAIChatModel,
finalCompletion.choices.map((c) => c.message).filter(truthyFilter),
);
} catch (err) {
// TODO handle this, library seems like maybe it doesn't work with function calls?
console.error(err);
}
} else {
const resp = await openai.chat.completions.create(
{ ...input, stream: false },
{
maxRetries: 0,
},
);
finalCompletion = resp;
promptTokens = resp.usage?.prompt_tokens ?? 0;
completionTokens = resp.usage?.completion_tokens ?? 0;
}
const timeToComplete = Date.now() - start;
const stats = modelStats[input.model as keyof typeof OpenAIChatModel];
let cost = undefined;
if (stats && promptTokens && completionTokens) {
cost = promptTokens * stats.promptTokenPrice + completionTokens * stats.completionTokenPrice;
}
return {
type: "success",
statusCode: 200,
value: finalCompletion,
timeToComplete,
promptTokens,
completionTokens,
cost,
};
} catch (error: unknown) {
console.error("ERROR IS", error);
if (error instanceof APIError) {
return {
type: "error",
message: error.message,
autoRetry: error.status === 429 || error.status === 503,
statusCode: error.status,
};
} else {
console.error(error);
return {
type: "error",
message: (error as Error).message,
autoRetry: true,
};
}
}
}

View File

@@ -0,0 +1,45 @@
import { type JSONSchema4 } from "json-schema";
import { type ModelProvider } from "../types";
import inputSchema from "./codegen/input.schema.json";
import { type ChatCompletion, type CompletionCreateParams } from "openai/resources/chat";
import { getCompletion } from "./getCompletion";
import frontendModelProvider from "./frontend";
const supportedModels = [
"gpt-4-0613",
"gpt-4-32k-0613",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k-0613",
] as const;
export type SupportedModel = (typeof supportedModels)[number];
export type OpenaiChatModelProvider = ModelProvider<
SupportedModel,
CompletionCreateParams,
ChatCompletion
>;
const modelProvider: OpenaiChatModelProvider = {
getModel: (input) => {
if (supportedModels.includes(input.model as SupportedModel))
return input.model as SupportedModel;
const modelMaps: Record<string, SupportedModel> = {
"gpt-4": "gpt-4-0613",
"gpt-4-32k": "gpt-4-32k-0613",
"gpt-3.5-turbo": "gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k": "gpt-3.5-turbo-16k-0613",
};
if (input.model in modelMaps) return modelMaps[input.model] as SupportedModel;
return null;
},
inputSchema: inputSchema as JSONSchema4,
shouldStream: (input) => input.stream ?? false,
getCompletion,
...frontendModelProvider,
};
export default modelProvider;

View File

@@ -0,0 +1,21 @@
import { type SupportedModel, type ReplicateLlama2Output } from ".";
import { type FrontendModelProvider } from "../types";
const frontendModelProvider: FrontendModelProvider<SupportedModel, ReplicateLlama2Output> = {
name: "Replicate Llama2",
models: {
"7b-chat": {},
"13b-chat": {},
"70b-chat": {},
},
normalizeOutput: (output) => {
return {
type: "text",
value: output.join(""),
};
},
};
export default frontendModelProvider;

View File

@@ -0,0 +1,62 @@
import { env } from "~/env.mjs";
import { type ReplicateLlama2Input, type ReplicateLlama2Output } from ".";
import { type CompletionResponse } from "../types";
import Replicate from "replicate";
const replicate = new Replicate({
auth: env.REPLICATE_API_TOKEN || "",
});
const modelIds: Record<ReplicateLlama2Input["model"], string> = {
"7b-chat": "3725a659b5afff1a0ba9bead5fac3899d998feaad00e07032ca2b0e35eb14f8a",
"13b-chat": "5c785d117c5bcdd1928d5a9acb1ffa6272d6cf13fcb722e90886a0196633f9d3",
"70b-chat": "e951f18578850b652510200860fc4ea62b3b16fac280f83ff32282f87bbd2e48",
};
export async function getCompletion(
input: ReplicateLlama2Input,
onStream: ((partialOutput: string[]) => void) | null,
): Promise<CompletionResponse<ReplicateLlama2Output>> {
const start = Date.now();
const { model, stream, ...rest } = input;
try {
const prediction = await replicate.predictions.create({
version: modelIds[model],
input: rest,
});
console.log("stream?", onStream);
const interval = onStream
? // eslint-disable-next-line @typescript-eslint/no-misused-promises
setInterval(async () => {
const partialPrediction = await replicate.predictions.get(prediction.id);
if (partialPrediction.output) onStream(partialPrediction.output as ReplicateLlama2Output);
}, 500)
: null;
const resp = await replicate.wait(prediction, {});
if (interval) clearInterval(interval);
const timeToComplete = Date.now() - start;
if (resp.error) throw new Error(resp.error as string);
return {
type: "success",
statusCode: 200,
value: resp.output as ReplicateLlama2Output,
timeToComplete,
};
} catch (error: unknown) {
console.error("ERROR IS", error);
return {
type: "error",
message: (error as Error).message,
autoRetry: true,
};
}
}

View File

@@ -0,0 +1,70 @@
import { type ModelProvider } from "../types";
import frontendModelProvider from "./frontend";
import { getCompletion } from "./getCompletion";
const supportedModels = ["7b-chat", "13b-chat", "70b-chat"] as const;
export type SupportedModel = (typeof supportedModels)[number];
export type ReplicateLlama2Input = {
model: SupportedModel;
prompt: string;
stream?: boolean;
max_length?: number;
temperature?: number;
top_p?: number;
repetition_penalty?: number;
debug?: boolean;
};
export type ReplicateLlama2Output = string[];
export type ReplicateLlama2Provider = ModelProvider<
SupportedModel,
ReplicateLlama2Input,
ReplicateLlama2Output
>;
const modelProvider: ReplicateLlama2Provider = {
getModel: (input) => {
if (supportedModels.includes(input.model)) return input.model;
return null;
},
inputSchema: {
type: "object",
properties: {
model: {
type: "string",
enum: supportedModels as unknown as string[],
},
prompt: {
type: "string",
},
stream: {
type: "boolean",
},
max_length: {
type: "number",
},
temperature: {
type: "number",
},
top_p: {
type: "number",
},
repetition_penalty: {
type: "number",
},
debug: {
type: "boolean",
},
},
required: ["model", "prompt"],
},
shouldStream: (input) => input.stream ?? false,
getCompletion,
...frontendModelProvider,
};
export default modelProvider;

View File

@@ -0,0 +1,51 @@
import { type JSONSchema4 } from "json-schema";
import { type JsonValue } from "type-fest";
export type SupportedProvider = "openai/ChatCompletion" | "replicate/llama2";
type ModelInfo = {
name?: string;
learnMore?: string;
};
export type FrontendModelProvider<SupportedModels extends string, OutputSchema> = {
name: string;
models: Record<SupportedModels, ModelInfo>;
normalizeOutput: (output: OutputSchema) => NormalizedOutput;
};
export type CompletionResponse<T> =
| { type: "error"; message: string; autoRetry: boolean; statusCode?: number }
| {
type: "success";
value: T;
timeToComplete: number;
statusCode: number;
promptTokens?: number;
completionTokens?: number;
cost?: number;
};
export type ModelProvider<SupportedModels extends string, InputSchema, OutputSchema> = {
getModel: (input: InputSchema) => SupportedModels | null;
shouldStream: (input: InputSchema) => boolean;
inputSchema: JSONSchema4;
getCompletion: (
input: InputSchema,
onStream: ((partialOutput: OutputSchema) => void) | null,
) => Promise<CompletionResponse<OutputSchema>>;
// This is just a convenience for type inference, don't use it at runtime
_outputSchema?: OutputSchema | null;
} & FrontendModelProvider<SupportedModels, OutputSchema>;
export type NormalizedOutput =
| {
type: "text";
value: string;
}
| {
type: "json";
value: JsonValue;
};

View File

@@ -2,11 +2,11 @@ import { type Session } from "next-auth";
import { SessionProvider } from "next-auth/react"; import { SessionProvider } from "next-auth/react";
import { type AppType } from "next/app"; import { type AppType } from "next/app";
import { api } from "~/utils/api"; import { api } from "~/utils/api";
import { ChakraProvider } from "@chakra-ui/react";
import theme from "~/utils/theme";
import Favicon from "~/components/Favicon"; import Favicon from "~/components/Favicon";
import "~/utils/analytics"; import "~/utils/analytics";
import Head from "next/head"; import Head from "next/head";
import { ChakraThemeProvider } from "~/theme/ChakraThemeProvider";
import { SyncAppStore } from "~/state/sync";
const MyApp: AppType<{ session: Session | null }> = ({ const MyApp: AppType<{ session: Session | null }> = ({
Component, Component,
@@ -21,10 +21,11 @@ const MyApp: AppType<{ session: Session | null }> = ({
/> />
</Head> </Head>
<SessionProvider session={session}> <SessionProvider session={session}>
<SyncAppStore />
<Favicon /> <Favicon />
<ChakraProvider theme={theme}> <ChakraThemeProvider>
<Component {...pageProps} /> <Component {...pageProps} />
</ChakraProvider> </ChakraThemeProvider>
</SessionProvider> </SessionProvider>
</> </>
); );

View File

@@ -49,6 +49,10 @@ const DeleteButton = () => {
onClose(); onClose();
}, [mutation, experiment.data?.id, router]); }, [mutation, experiment.data?.id, router]);
useEffect(() => {
useAppStore.getState().sharedVariantEditor.loadMonaco().catch(console.error);
});
return ( return (
<> <>
<Button <Button

View File

@@ -20,22 +20,25 @@ export default function ExperimentsPage() {
const experiments = api.experiments.list.useQuery(); const experiments = api.experiments.list.useQuery();
const user = useSession().data; const user = useSession().data;
const authLoading = useSession().status === "loading";
if (user === null) { if (user === null || authLoading) {
return ( return (
<AppShell title="Experiments"> <AppShell title="Experiments">
<Center h="100%"> <Center h="100%">
<Text> {!authLoading && (
<Link <Text>
onClick={() => { <Link
signIn("github").catch(console.error); onClick={() => {
}} signIn("github").catch(console.error);
textDecor="underline" }}
> textDecor="underline"
Sign in >
</Link>{" "} Sign in
to view or create new experiments! </Link>{" "}
</Text> to view or create new experiments!
</Text>
)}
</Center> </Center>
</AppShell> </AppShell>
); );

View File

@@ -10,6 +10,7 @@ import {
requireNothing, requireNothing,
} from "~/utils/accessControl"; } from "~/utils/accessControl";
import userOrg from "~/server/utils/userOrg"; import userOrg from "~/server/utils/userOrg";
import generateTypes from "~/modelProviders/generateTypes";
export const experimentsRouter = createTRPCRouter({ export const experimentsRouter = createTRPCRouter({
list: protectedProcedure.query(async ({ ctx }) => { list: protectedProcedure.query(async ({ ctx }) => {
@@ -97,7 +98,7 @@ export const experimentsRouter = createTRPCRouter({
}, },
}); });
const [variant, _, scenario] = await prisma.$transaction([ const [variant, _, scenario1, scenario2, scenario3] = await prisma.$transaction([
prisma.promptVariant.create({ prisma.promptVariant.create({
data: { data: {
experimentId: exp.id, experimentId: exp.id,
@@ -108,43 +109,62 @@ export const experimentsRouter = createTRPCRouter({
constructFn: dedent` constructFn: dedent`
/** /**
* Use Javascript to define an OpenAI chat completion * Use Javascript to define an OpenAI chat completion
* (https://platform.openai.com/docs/api-reference/chat/create) and * (https://platform.openai.com/docs/api-reference/chat/create).
* assign it to the \`prompt\` variable.
* *
* You have access to the current scenario in the \`scenario\` * You have access to the current scenario in the \`scenario\`
* variable. * variable.
*/ */
prompt = { definePrompt("openai/ChatCompletion", {
model: "gpt-3.5-turbo-0613", model: "gpt-3.5-turbo-0613",
stream: true, stream: true,
messages: [ messages: [
{ {
role: "system", role: "system",
content: \`"Return 'this is output for the scenario "${"$"}{scenario.text}"'\`, content: \`Write 'Start experimenting!' in ${"$"}{scenario.language}\`,
}, },
], ],
};`, });`,
model: "gpt-3.5-turbo-0613", model: "gpt-3.5-turbo-0613",
modelProvider: "openai/ChatCompletion",
constructFnVersion: 2,
}, },
}), }),
prisma.templateVariable.create({ prisma.templateVariable.create({
data: { data: {
experimentId: exp.id, experimentId: exp.id,
label: "text", label: "language",
}, },
}), }),
prisma.testScenario.create({ prisma.testScenario.create({
data: { data: {
experimentId: exp.id, experimentId: exp.id,
variableValues: { variableValues: {
text: "This is a test scenario.", language: "English",
},
},
}),
prisma.testScenario.create({
data: {
experimentId: exp.id,
variableValues: {
language: "Spanish",
},
},
}),
prisma.testScenario.create({
data: {
experimentId: exp.id,
variableValues: {
language: "German",
}, },
}, },
}), }),
]); ]);
await generateNewCell(variant.id, scenario.id); await generateNewCell(variant.id, scenario1.id);
await generateNewCell(variant.id, scenario2.id);
await generateNewCell(variant.id, scenario3.id);
return exp; return exp;
}), }),
@@ -174,4 +194,10 @@ export const experimentsRouter = createTRPCRouter({
}, },
}); });
}), }),
// Keeping these on `experiment` for now because we might want to limit the
// providers based on your account/experiment
promptTypes: publicProcedure.query(async () => {
return await generateTypes();
}),
}); });

View File

@@ -1,16 +1,15 @@
import { isObject } from "lodash-es";
import { z } from "zod"; import { z } from "zod";
import { createTRPCRouter, protectedProcedure, publicProcedure } from "~/server/api/trpc"; import { createTRPCRouter, protectedProcedure, publicProcedure } from "~/server/api/trpc";
import { prisma } from "~/server/db"; import { prisma } from "~/server/db";
import { generateNewCell } from "~/server/utils/generateNewCell"; import { generateNewCell } from "~/server/utils/generateNewCell";
import { OpenAIChatModel, type SupportedModel } from "~/server/types"; import { type SupportedModel } from "~/server/types";
import { constructPrompt } from "~/server/utils/constructPrompt";
import userError from "~/server/utils/error"; import userError from "~/server/utils/error";
import { recordExperimentUpdated } from "~/server/utils/recordExperimentUpdated"; import { recordExperimentUpdated } from "~/server/utils/recordExperimentUpdated";
import { reorderPromptVariants } from "~/server/utils/reorderPromptVariants"; import { reorderPromptVariants } from "~/server/utils/reorderPromptVariants";
import { type PromptVariant } from "@prisma/client"; import { type PromptVariant } from "@prisma/client";
import { deriveNewConstructFn } from "~/server/utils/deriveNewContructFn"; import { deriveNewConstructFn } from "~/server/utils/deriveNewContructFn";
import { requireCanModifyExperiment, requireCanViewExperiment } from "~/utils/accessControl"; import { requireCanModifyExperiment, requireCanViewExperiment } from "~/utils/accessControl";
import parseConstructFn from "~/server/utils/parseConstructFn";
export const promptVariantsRouter = createTRPCRouter({ export const promptVariantsRouter = createTRPCRouter({
list: publicProcedure list: publicProcedure
@@ -198,7 +197,9 @@ export const promptVariantsRouter = createTRPCRouter({
label: newVariantLabel, label: newVariantLabel,
sortIndex: (originalVariant?.sortIndex ?? 0) + 1, sortIndex: (originalVariant?.sortIndex ?? 0) + 1,
constructFn: newConstructFn, constructFn: newConstructFn,
constructFnVersion: 2,
model: originalVariant?.model ?? "gpt-3.5-turbo", model: originalVariant?.model ?? "gpt-3.5-turbo",
modelProvider: originalVariant?.modelProvider ?? "openai/ChatCompletion",
}, },
}); });
@@ -283,11 +284,12 @@ export const promptVariantsRouter = createTRPCRouter({
return updatedPromptVariant; return updatedPromptVariant;
}), }),
getRefinedPromptFn: protectedProcedure getModifiedPromptFn: protectedProcedure
.input( .input(
z.object({ z.object({
id: z.string(), id: z.string(),
instructions: z.string(), instructions: z.string().optional(),
newModel: z.string().optional(),
}), }),
) )
.mutation(async ({ input, ctx }) => { .mutation(async ({ input, ctx }) => {
@@ -298,12 +300,15 @@ export const promptVariantsRouter = createTRPCRouter({
}); });
await requireCanModifyExperiment(existing.experimentId, ctx); await requireCanModifyExperiment(existing.experimentId, ctx);
const constructedPrompt = await constructPrompt({ constructFn: existing.constructFn }, null); const constructedPrompt = await parseConstructFn(existing.constructFn);
if ("error" in constructedPrompt) {
return userError(constructedPrompt.error);
}
const promptConstructionFn = await deriveNewConstructFn( const promptConstructionFn = await deriveNewConstructFn(
existing, existing,
// @ts-expect-error TODO clean this up input.newModel as SupportedModel | undefined,
constructedPrompt?.model as SupportedModel,
input.instructions, input.instructions,
); );
@@ -332,25 +337,10 @@ export const promptVariantsRouter = createTRPCRouter({
throw new Error(`Prompt Variant with id ${input.id} does not exist`); throw new Error(`Prompt Variant with id ${input.id} does not exist`);
} }
let model = existing.model; const parsedPrompt = await parseConstructFn(input.constructFn);
try {
const contructedPrompt = await constructPrompt({ constructFn: input.constructFn }, null);
if (!isObject(contructedPrompt)) { if ("error" in parsedPrompt) {
return userError("Prompt is not an object"); return userError(parsedPrompt.error);
}
if (!("model" in contructedPrompt)) {
return userError("Prompt does not define a model");
}
if (
typeof contructedPrompt.model !== "string" ||
!(contructedPrompt.model in OpenAIChatModel)
) {
return userError("Prompt defines an invalid model");
}
model = contructedPrompt.model;
} catch (e) {
return userError((e as Error).message);
} }
// Create a duplicate with only the config changed // Create a duplicate with only the config changed
@@ -361,7 +351,9 @@ export const promptVariantsRouter = createTRPCRouter({
sortIndex: existing.sortIndex, sortIndex: existing.sortIndex,
uiId: existing.uiId, uiId: existing.uiId,
constructFn: input.constructFn, constructFn: input.constructFn,
model, constructFnVersion: 2,
modelProvider: parsedPrompt.modelProvider,
model: parsedPrompt.model,
}, },
}); });

View File

@@ -0,0 +1,45 @@
import "dotenv/config";
import dedent from "dedent";
import { expect, test } from "vitest";
import { migrate1to2 } from "./migrateConstructFns";
test("migrate1to2", () => {
const constructFn = dedent`
// Test comment
prompt = {
model: "gpt-3.5-turbo-0613",
messages: [
{
role: "user",
content: "What is the capital of China?"
}
]
}
`;
const migrated = migrate1to2(constructFn);
expect(migrated).toBe(dedent`
// Test comment
definePrompt("openai/ChatCompletion", {
model: "gpt-3.5-turbo-0613",
messages: [
{
role: "user",
content: "What is the capital of China?"
}
]
})
`);
// console.log(
// migrateConstructFn(dedent`definePrompt(
// "openai/ChatCompletion",
// {
// model: 'gpt-3.5-turbo-0613',
// messages: []
// }
// )`),
// );
});

View File

@@ -0,0 +1,58 @@
import * as recast from "recast";
import { type ASTNode } from "ast-types";
import { prisma } from "../db";
import { fileURLToPath } from "url";
const { builders: b } = recast.types;
export const migrate1to2 = (fnBody: string): string => {
const ast: ASTNode = recast.parse(fnBody);
recast.visit(ast, {
visitAssignmentExpression(path) {
const node = path.node;
if ("name" in node.left && node.left.name === "prompt") {
const functionCall = b.callExpression(b.identifier("definePrompt"), [
b.literal("openai/ChatCompletion"),
node.right,
]);
path.replace(functionCall);
}
return false;
},
});
return recast.print(ast).code;
};
export default async function migrateConstructFns() {
const v1Prompts = await prisma.promptVariant.findMany({
where: {
constructFnVersion: 1,
},
});
console.log(`Migrating ${v1Prompts.length} prompts 1->2`);
await Promise.all(
v1Prompts.map(async (variant) => {
try {
await prisma.promptVariant.update({
where: {
id: variant.id,
},
data: {
constructFn: migrate1to2(variant.constructFn),
constructFnVersion: 2,
},
});
} catch (e) {
console.error("Error migrating constructFn for variant", variant.id, e);
}
}),
);
}
// If we're running this file directly, run the migration
if (process.argv.at(-1) === fileURLToPath(import.meta.url)) {
console.log("Running migration");
await migrateConstructFns();
console.log("Done");
}

View File

@@ -0,0 +1,19 @@
import "dotenv/config";
import { openai } from "../utils/openai";
const resp = await openai.chat.completions.create({
model: "gpt-3.5-turbo-0613",
stream: true,
messages: [
{
role: "user",
content: "count to 20",
},
],
});
for await (const part of resp) {
console.log("part", part);
}
console.log("final resp", resp);

View File

@@ -1,26 +1,26 @@
// /* eslint-disable */ /* eslint-disable */
// import "dotenv/config"; import "dotenv/config";
// import Replicate from "replicate"; import Replicate from "replicate";
// const replicate = new Replicate({ const replicate = new Replicate({
// auth: process.env.REPLICATE_API_TOKEN || "", auth: process.env.REPLICATE_API_TOKEN || "",
// }); });
// console.log("going to run"); console.log("going to run");
// const prediction = await replicate.predictions.create({ const prediction = await replicate.predictions.create({
// version: "e951f18578850b652510200860fc4ea62b3b16fac280f83ff32282f87bbd2e48", version: "3725a659b5afff1a0ba9bead5fac3899d998feaad00e07032ca2b0e35eb14f8a",
// input: { input: {
// prompt: "...", prompt: "...",
// }, },
// }); });
// console.log("waiting"); console.log("waiting");
// setInterval(() => { setInterval(() => {
// replicate.predictions.get(prediction.id).then((prediction) => { replicate.predictions.get(prediction.id).then((prediction) => {
// console.log(prediction.output); console.log(prediction);
// }); });
// }, 500); }, 500);
// // const output = await replicate.wait(prediction, {}); // const output = await replicate.wait(prediction, {});
// // console.log(output); // console.log(output);

View File

@@ -1,15 +1,18 @@
import crypto from "crypto";
import { prisma } from "~/server/db"; import { prisma } from "~/server/db";
import defineTask from "./defineTask"; import defineTask from "./defineTask";
import { type CompletionResponse, getOpenAIChatCompletion } from "../utils/getCompletion";
import { type JSONSerializable } from "../types";
import { sleep } from "../utils/sleep"; import { sleep } from "../utils/sleep";
import { shouldStream } from "../utils/shouldStream";
import { generateChannel } from "~/utils/generateChannel"; import { generateChannel } from "~/utils/generateChannel";
import { runEvalsForOutput } from "../utils/evaluations"; import { runEvalsForOutput } from "../utils/evaluations";
import { constructPrompt } from "../utils/constructPrompt";
import { type CompletionCreateParams } from "openai/resources/chat";
import { type Prisma } from "@prisma/client"; import { type Prisma } from "@prisma/client";
import parseConstructFn from "../utils/parseConstructFn";
import hashPrompt from "../utils/hashPrompt";
import { type JsonObject } from "type-fest";
import modelProviders from "~/modelProviders/modelProviders";
import { wsConnection } from "~/utils/wsConnection";
export type queryLLMJob = {
scenarioVariantCellId: string;
};
const MAX_AUTO_RETRIES = 10; const MAX_AUTO_RETRIES = 10;
const MIN_DELAY = 500; // milliseconds const MIN_DELAY = 500; // milliseconds
@@ -21,48 +24,6 @@ function calculateDelay(numPreviousTries: number): number {
return baseDelay + jitter; return baseDelay + jitter;
} }
const getCompletionWithRetries = async (
cellId: string,
payload: JSONSerializable,
channel?: string,
): Promise<CompletionResponse> => {
let modelResponse: CompletionResponse | null = null;
try {
for (let i = 0; i < MAX_AUTO_RETRIES; i++) {
modelResponse = await getOpenAIChatCompletion(
payload as unknown as CompletionCreateParams,
channel,
);
if (modelResponse.statusCode !== 429 || i === MAX_AUTO_RETRIES - 1) {
return modelResponse;
}
const delay = calculateDelay(i);
await prisma.scenarioVariantCell.update({
where: { id: cellId },
data: {
errorMessage: "Rate limit exceeded",
statusCode: 429,
retryTime: new Date(Date.now() + delay),
},
});
// TODO: Maybe requeue the job so other jobs can run in the future?
await sleep(delay);
}
throw new Error("Max retries limit reached");
} catch (error: unknown) {
return {
statusCode: modelResponse?.statusCode ?? 500,
errorMessage: modelResponse?.errorMessage ?? (error as Error).message,
output: null,
timeToComplete: 0,
};
}
};
export type queryLLMJob = {
scenarioVariantCellId: string;
};
export const queryLLM = defineTask<queryLLMJob>("queryLLM", async (task) => { export const queryLLM = defineTask<queryLLMJob>("queryLLM", async (task) => {
const { scenarioVariantCellId } = task; const { scenarioVariantCellId } = task;
const cell = await prisma.scenarioVariantCell.findUnique({ const cell = await prisma.scenarioVariantCell.findUnique({
@@ -122,61 +83,83 @@ export const queryLLM = defineTask<queryLLMJob>("queryLLM", async (task) => {
return; return;
} }
const prompt = await constructPrompt(variant, scenario.variableValues); const prompt = await parseConstructFn(variant.constructFn, scenario.variableValues as JsonObject);
const streamingEnabled = shouldStream(prompt); if ("error" in prompt) {
let streamingChannel;
if (streamingEnabled) {
streamingChannel = generateChannel();
// Save streaming channel so that UI can connect to it
await prisma.scenarioVariantCell.update({ await prisma.scenarioVariantCell.update({
where: { id: scenarioVariantCellId }, where: { id: scenarioVariantCellId },
data: { data: {
streamingChannel, statusCode: 400,
errorMessage: prompt.error,
retrievalStatus: "ERROR",
}, },
}); });
return;
} }
const modelResponse = await getCompletionWithRetries( const provider = modelProviders[prompt.modelProvider];
scenarioVariantCellId,
prompt,
streamingChannel,
);
let modelOutput = null; const streamingChannel = provider.shouldStream(prompt.modelInput) ? generateChannel() : null;
if (modelResponse.statusCode === 200) {
const inputHash = crypto.createHash("sha256").update(JSON.stringify(prompt)).digest("hex");
modelOutput = await prisma.modelOutput.create({ if (streamingChannel) {
data: { // Save streaming channel so that UI can connect to it
scenarioVariantCellId, await prisma.scenarioVariantCell.update({
inputHash, where: { id: scenarioVariantCellId },
output: modelResponse.output as unknown as Prisma.InputJsonObject, data: { streamingChannel },
timeToComplete: modelResponse.timeToComplete,
promptTokens: modelResponse.promptTokens,
completionTokens: modelResponse.completionTokens,
cost: modelResponse.cost,
},
}); });
} }
const onStream = streamingChannel
? (partialOutput: (typeof provider)["_outputSchema"]) => {
wsConnection.emit("message", { channel: streamingChannel, payload: partialOutput });
}
: null;
await prisma.scenarioVariantCell.update({ for (let i = 0; true; i++) {
where: { id: scenarioVariantCellId }, const response = await provider.getCompletion(prompt.modelInput, onStream);
data: { if (response.type === "success") {
statusCode: modelResponse.statusCode, const inputHash = hashPrompt(prompt);
errorMessage: modelResponse.errorMessage,
streamingChannel: null, const modelOutput = await prisma.modelOutput.create({
retrievalStatus: modelOutput ? "COMPLETE" : "ERROR", data: {
modelOutput: { scenarioVariantCellId,
connect: { inputHash,
id: modelOutput?.id, output: response.value as Prisma.InputJsonObject,
timeToComplete: response.timeToComplete,
promptTokens: response.promptTokens,
completionTokens: response.completionTokens,
cost: response.cost,
}, },
}, });
},
});
if (modelOutput) { await prisma.scenarioVariantCell.update({
await runEvalsForOutput(variant.experimentId, scenario, modelOutput); where: { id: scenarioVariantCellId },
data: {
statusCode: response.statusCode,
retrievalStatus: "COMPLETE",
},
});
await runEvalsForOutput(variant.experimentId, scenario, modelOutput);
break;
} else {
const shouldRetry = response.autoRetry && i < MAX_AUTO_RETRIES;
const delay = calculateDelay(i);
await prisma.scenarioVariantCell.update({
where: { id: scenarioVariantCellId },
data: {
errorMessage: response.message,
statusCode: response.statusCode,
retryTime: shouldRetry ? new Date(Date.now() + delay) : null,
retrievalStatus: "ERROR",
},
});
if (shouldRetry) {
await sleep(delay);
} else {
break;
}
}
} }
}); });

View File

@@ -1,14 +1,3 @@
export type JSONSerializable =
| string
| number
| boolean
| null
| JSONSerializable[]
| { [key: string]: JSONSerializable };
// Placeholder for now
export type OpenAIChatConfig = NonNullable<JSONSerializable>;
export enum OpenAIChatModel { export enum OpenAIChatModel {
"gpt-4" = "gpt-4", "gpt-4" = "gpt-4",
"gpt-4-0613" = "gpt-4-0613", "gpt-4-0613" = "gpt-4-0613",

View File

@@ -1,15 +0,0 @@
import { test } from "vitest";
import { constructPrompt } from "./constructPrompt";
test.skip("constructPrompt", async () => {
const constructed = await constructPrompt(
{
constructFn: `prompt = { "fooz": "bar" }`,
},
{
foo: "bar",
},
);
console.log(constructed);
});

View File

@@ -1,35 +0,0 @@
import { type PromptVariant, type TestScenario } from "@prisma/client";
import ivm from "isolated-vm";
import { type JSONSerializable } from "../types";
const isolate = new ivm.Isolate({ memoryLimit: 128 });
export async function constructPrompt(
variant: Pick<PromptVariant, "constructFn">,
scenario: TestScenario["variableValues"],
): Promise<JSONSerializable> {
const code = `
const scenario = ${JSON.stringify(scenario ?? {}, null, 2)};
let prompt
${variant.constructFn}
global.prompt = prompt;
`;
console.log("code is", code);
const context = await isolate.createContext();
const jail = context.global;
await jail.set("global", jail.derefInto());
const script = await isolate.compileScript(code);
await script.run(context);
const promptReference = (await context.global.get("prompt")) as ivm.Reference;
const prompt = await promptReference.copy(); // Get the actual value from the isolate
return prompt as JSONSerializable;
}

View File

@@ -6,6 +6,7 @@ import { openai } from "./openai";
import { getApiShapeForModel } from "./getTypesForModel"; import { getApiShapeForModel } from "./getTypesForModel";
import { isObject } from "lodash-es"; import { isObject } from "lodash-es";
import { type CompletionCreateParams } from "openai/resources/chat/completions"; import { type CompletionCreateParams } from "openai/resources/chat/completions";
import formatPromptConstructor from "~/utils/formatPromptConstructor";
const isolate = new ivm.Isolate({ memoryLimit: 128 }); const isolate = new ivm.Isolate({ memoryLimit: 128 });
@@ -49,26 +50,25 @@ const requestUpdatedPromptFunction = async (
getApiShapeForModel(originalModel), getApiShapeForModel(originalModel),
null, null,
2, 2,
)}`, )}\n\nDo not add any assistant messages.`,
},
{
role: "user",
content: `This is the current prompt constructor function:\n---\n${originalVariant.constructFn}`,
}, },
]; ];
if (newModel) { if (newModel) {
messages.push({ messages.push({
role: "user", role: "user",
content: `Return the prompt constructor function for ${newModel} given the following prompt constructor function for ${originalModel}:\n---\n${originalVariant.constructFn}`, content: `Return the prompt constructor function for ${newModel} given the existing prompt constructor function for ${originalModel}`,
}); });
} }
if (instructions) { if (instructions) {
messages.push({ messages.push({
role: "user", role: "user",
content: `Follow these instructions: ${instructions}`, content: instructions,
}); });
} }
messages.push({
role: "user",
content:
"The prompt variable has already been declared, so do not declare it again. Rewrite the entire prompt constructor function.",
});
const completion = await openai.chat.completions.create({ const completion = await openai.chat.completions.create({
model: "gpt-4", model: "gpt-4",
messages, messages,
@@ -111,7 +111,7 @@ const requestUpdatedPromptFunction = async (
const args = await contructPromptFunctionArgs.copy(); // Get the actual value from the isolate const args = await contructPromptFunctionArgs.copy(); // Get the actual value from the isolate
if (args && isObject(args) && "new_prompt_function" in args) { if (args && isObject(args) && "new_prompt_function" in args) {
newContructionFn = args.new_prompt_function as string; newContructionFn = await formatPromptConstructor(args.new_prompt_function as string);
break; break;
} }
} catch (e) { } catch (e) {

View File

@@ -1,5 +1,3 @@
import { type JSONSerializable } from "../types";
export type VariableMap = Record<string, string>; export type VariableMap = Record<string, string>;
// Escape quotes to match the way we encode JSON // Escape quotes to match the way we encode JSON
@@ -15,24 +13,3 @@ export function escapeRegExp(str: string) {
export function fillTemplate(template: string, variables: VariableMap): string { export function fillTemplate(template: string, variables: VariableMap): string {
return template.replace(/{{\s*(\w+)\s*}}/g, (_, key: string) => variables[key] || ""); return template.replace(/{{\s*(\w+)\s*}}/g, (_, key: string) => variables[key] || "");
} }
export function fillTemplateJson<T extends JSONSerializable>(
template: T,
variables: VariableMap,
): T {
if (typeof template === "string") {
return fillTemplate(template, variables) as T;
} else if (Array.isArray(template)) {
return template.map((item) => fillTemplateJson(item, variables)) as T;
} else if (typeof template === "object" && template !== null) {
return Object.keys(template).reduce(
(acc, key) => {
acc[key] = fillTemplateJson(template[key] as JSONSerializable, variables);
return acc;
},
{} as { [key: string]: JSONSerializable } & T,
);
} else {
return template;
}
}

View File

@@ -1,8 +1,9 @@
import crypto from "crypto";
import { type Prisma } from "@prisma/client"; import { type Prisma } from "@prisma/client";
import { prisma } from "../db"; import { prisma } from "../db";
import { queueLLMRetrievalTask } from "./queueLLMRetrievalTask"; import { queueLLMRetrievalTask } from "./queueLLMRetrievalTask";
import { constructPrompt } from "./constructPrompt"; import parseConstructFn from "./parseConstructFn";
import { type JsonObject } from "type-fest";
import hashPrompt from "./hashPrompt";
export const generateNewCell = async (variantId: string, scenarioId: string) => { export const generateNewCell = async (variantId: string, scenarioId: string) => {
const variant = await prisma.promptVariant.findUnique({ const variant = await prisma.promptVariant.findUnique({
@@ -19,10 +20,6 @@ export const generateNewCell = async (variantId: string, scenarioId: string) =>
if (!variant || !scenario) return null; if (!variant || !scenario) return null;
const prompt = await constructPrompt(variant, scenario.variableValues);
const inputHash = crypto.createHash("sha256").update(JSON.stringify(prompt)).digest("hex");
let cell = await prisma.scenarioVariantCell.findUnique({ let cell = await prisma.scenarioVariantCell.findUnique({
where: { where: {
promptVariantId_testScenarioId: { promptVariantId_testScenarioId: {
@@ -37,10 +34,31 @@ export const generateNewCell = async (variantId: string, scenarioId: string) =>
if (cell) return cell; if (cell) return cell;
const parsedConstructFn = await parseConstructFn(
variant.constructFn,
scenario.variableValues as JsonObject,
);
if ("error" in parsedConstructFn) {
return await prisma.scenarioVariantCell.create({
data: {
promptVariantId: variantId,
testScenarioId: scenarioId,
statusCode: 400,
errorMessage: parsedConstructFn.error,
retrievalStatus: "ERROR",
},
});
}
const inputHash = hashPrompt(parsedConstructFn);
cell = await prisma.scenarioVariantCell.create({ cell = await prisma.scenarioVariantCell.create({
data: { data: {
promptVariantId: variantId, promptVariantId: variantId,
testScenarioId: scenarioId, testScenarioId: scenarioId,
prompt: parsedConstructFn.modelInput as unknown as Prisma.InputJsonValue,
retrievalStatus: "PENDING",
}, },
include: { include: {
modelOutput: true, modelOutput: true,
@@ -48,9 +66,7 @@ export const generateNewCell = async (variantId: string, scenarioId: string) =>
}); });
const matchingModelOutput = await prisma.modelOutput.findFirst({ const matchingModelOutput = await prisma.modelOutput.findFirst({
where: { where: { inputHash },
inputHash,
},
}); });
let newModelOutput; let newModelOutput;
@@ -69,6 +85,10 @@ export const generateNewCell = async (variantId: string, scenarioId: string) =>
updatedAt: matchingModelOutput.updatedAt, updatedAt: matchingModelOutput.updatedAt,
}, },
}); });
await prisma.scenarioVariantCell.update({
where: { id: cell.id },
data: { retrievalStatus: "COMPLETE" },
});
} else { } else {
cell = await queueLLMRetrievalTask(cell.id); cell = await queueLLMRetrievalTask(cell.id);
} }

View File

@@ -1,107 +0,0 @@
/* eslint-disable @typescript-eslint/no-unsafe-call */
import { isObject } from "lodash-es";
import { streamChatCompletion } from "./openai";
import { wsConnection } from "~/utils/wsConnection";
import { type ChatCompletion, type CompletionCreateParams } from "openai/resources/chat";
import { type SupportedModel, type OpenAIChatModel } from "../types";
import { env } from "~/env.mjs";
import { countOpenAIChatTokens } from "~/utils/countTokens";
import { rateLimitErrorMessage } from "~/sharedStrings";
import { modelStats } from "../modelStats";
export type CompletionResponse = {
output: ChatCompletion | null;
statusCode: number;
errorMessage: string | null;
timeToComplete: number;
promptTokens?: number;
completionTokens?: number;
cost?: number;
};
export async function getOpenAIChatCompletion(
payload: CompletionCreateParams,
channel?: string,
): Promise<CompletionResponse> {
// If functions are enabled, disable streaming so that we get the full response with token counts
if (payload.functions?.length) payload.stream = false;
const start = Date.now();
const response = await fetch("https://api.openai.com/v1/chat/completions", {
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${env.OPENAI_API_KEY}`,
},
body: JSON.stringify(payload),
});
const resp: CompletionResponse = {
output: null,
errorMessage: null,
statusCode: response.status,
timeToComplete: 0,
};
try {
if (payload.stream) {
const completion = streamChatCompletion(payload as unknown as CompletionCreateParams);
let finalOutput: ChatCompletion | null = null;
await (async () => {
for await (const partialCompletion of completion) {
finalOutput = partialCompletion;
wsConnection.emit("message", { channel, payload: partialCompletion });
}
})().catch((err) => console.error(err));
if (finalOutput) {
resp.output = finalOutput;
resp.timeToComplete = Date.now() - start;
}
} else {
resp.timeToComplete = Date.now() - start;
resp.output = await response.json();
}
if (!response.ok) {
if (response.status === 429) {
resp.errorMessage = rateLimitErrorMessage;
} else if (
isObject(resp.output) &&
"error" in resp.output &&
isObject(resp.output.error) &&
"message" in resp.output.error
) {
// If it's an object, try to get the error message
resp.errorMessage = resp.output.error.message?.toString() ?? "Unknown error";
}
}
if (isObject(resp.output) && "usage" in resp.output) {
const usage = resp.output.usage as unknown as ChatCompletion.Usage;
resp.promptTokens = usage.prompt_tokens;
resp.completionTokens = usage.completion_tokens;
} else if (isObject(resp.output) && "choices" in resp.output) {
const model = payload.model as unknown as OpenAIChatModel;
resp.promptTokens = countOpenAIChatTokens(model, payload.messages);
const choices = resp.output.choices as unknown as ChatCompletion.Choice[];
const message = choices[0]?.message;
if (message) {
const messages = [message];
resp.completionTokens = countOpenAIChatTokens(model, messages);
}
}
const stats = modelStats[resp.output?.model as SupportedModel];
if (stats && resp.promptTokens && resp.completionTokens) {
resp.cost =
resp.promptTokens * stats.promptTokenPrice +
resp.completionTokens * stats.completionTokenPrice;
}
} catch (e) {
console.error(e);
if (response.ok) {
resp.errorMessage = "Failed to parse response";
}
}
return resp;
}

View File

@@ -1,7 +1,6 @@
import { OpenAIChatModel, type SupportedModel } from "../types"; import { type SupportedModel } from "../types";
import openAIChatApiShape from "~/codegen/openai.types.ts.txt";
export const getApiShapeForModel = (model: SupportedModel) => { export const getApiShapeForModel = (model: SupportedModel) => {
if (model in OpenAIChatModel) return openAIChatApiShape; // if (model in OpenAIChatModel) return openAIChatApiShape;
return ""; return "";
}; };

View File

@@ -0,0 +1,37 @@
import crypto from "crypto";
import { type JsonValue } from "type-fest";
import { type ParsedConstructFn } from "./parseConstructFn";
function sortKeys(obj: JsonValue): JsonValue {
if (typeof obj !== "object" || obj === null) {
// Not an object or array, return as is
return obj;
}
if (Array.isArray(obj)) {
return obj.map(sortKeys);
}
// Get keys and sort them
const keys = Object.keys(obj).sort();
const sortedObj = {};
for (const key of keys) {
// @ts-expect-error not worth fixing types
// eslint-disable-next-line @typescript-eslint/no-unsafe-argument
sortedObj[key] = sortKeys(obj[key]);
}
return sortedObj;
}
export default function hashPrompt(prompt: ParsedConstructFn<any>): string {
// Sort object keys recursively
const sortedObj = sortKeys(prompt as unknown as JsonValue);
// Convert to JSON and hash it
const str = JSON.stringify(sortedObj);
const hash = crypto.createHash("sha256");
hash.update(str);
return hash.digest("hex");
}

View File

@@ -1,64 +1,5 @@
import { omit } from "lodash-es";
import { env } from "~/env.mjs"; import { env } from "~/env.mjs";
import OpenAI from "openai"; import OpenAI from "openai";
import {
type ChatCompletion,
type ChatCompletionChunk,
type CompletionCreateParams,
} from "openai/resources/chat";
export const openai = new OpenAI({ apiKey: env.OPENAI_API_KEY }); export const openai = new OpenAI({ apiKey: env.OPENAI_API_KEY });
export const mergeStreamedChunks = (
base: ChatCompletion | null,
chunk: ChatCompletionChunk,
): ChatCompletion => {
if (base === null) {
return mergeStreamedChunks({ ...chunk, choices: [] }, chunk);
}
const choices = [...base.choices];
for (const choice of chunk.choices) {
const baseChoice = choices.find((c) => c.index === choice.index);
if (baseChoice) {
baseChoice.finish_reason = choice.finish_reason ?? baseChoice.finish_reason;
baseChoice.message = baseChoice.message ?? { role: "assistant" };
if (choice.delta?.content)
baseChoice.message.content =
((baseChoice.message.content as string) ?? "") + (choice.delta.content ?? "");
if (choice.delta?.function_call) {
const fnCall = baseChoice.message.function_call ?? {};
fnCall.name =
((fnCall.name as string) ?? "") + ((choice.delta.function_call.name as string) ?? "");
fnCall.arguments =
((fnCall.arguments as string) ?? "") +
((choice.delta.function_call.arguments as string) ?? "");
}
} else {
choices.push({ ...omit(choice, "delta"), message: { role: "assistant", ...choice.delta } });
}
}
const merged: ChatCompletion = {
...base,
choices,
};
return merged;
};
export const streamChatCompletion = async function* (body: CompletionCreateParams) {
// eslint-disable-next-line @typescript-eslint/no-unsafe-call
const resp = await openai.chat.completions.create({
...body,
stream: true,
});
let mergedChunks: ChatCompletion | null = null;
for await (const part of resp) {
mergedChunks = mergeStreamedChunks(mergedChunks, part);
yield mergedChunks;
}
};

View File

@@ -0,0 +1,45 @@
import { expect, test } from "vitest";
import parseConstructFn from "./parseConstructFn";
import assert from "assert";
// Note: this has to be run with `vitest --no-threads` option or else
// isolated-vm seems to throw errors
test("parseConstructFn", async () => {
const constructed = await parseConstructFn(
`
// These sometimes have a comment
definePrompt("openai/ChatCompletion", {
model: "gpt-3.5-turbo-0613",
messages: [
{
role: "user",
content: \`What is the capital of \${scenario.country}?\`
}
]
})
`,
{ country: "Bolivia" },
);
expect(constructed).toEqual({
modelProvider: "openai/ChatCompletion",
model: "gpt-3.5-turbo-0613",
modelInput: {
messages: [
{
content: "What is the capital of Bolivia?",
role: "user",
},
],
model: "gpt-3.5-turbo-0613",
},
});
});
test("bad syntax", async () => {
const parsed = await parseConstructFn(`definePrompt("openai/ChatCompletion", {`);
assert("error" in parsed);
expect(parsed.error).toContain("Unexpected end of input");
});

View File

@@ -0,0 +1,92 @@
import modelProviders from "~/modelProviders/modelProviders";
import ivm from "isolated-vm";
import { isObject, isString } from "lodash-es";
import { type JsonObject } from "type-fest";
import { validate } from "jsonschema";
export type ParsedConstructFn<T extends keyof typeof modelProviders> = {
modelProvider: T;
model: keyof (typeof modelProviders)[T]["models"];
modelInput: Parameters<(typeof modelProviders)[T]["getModel"]>[0];
};
const isolate = new ivm.Isolate({ memoryLimit: 128 });
export default async function parseConstructFn(
constructFn: string,
scenario: JsonObject | undefined = {},
): Promise<ParsedConstructFn<keyof typeof modelProviders> | { error: string }> {
try {
const modifiedConstructFn = constructFn.replace(
"definePrompt(",
"global.prompt = definePrompt(",
);
const code = `
const scenario = ${JSON.stringify(scenario ?? {}, null, 2)};
const definePrompt = (modelProvider, input) => ({
modelProvider,
input
})
${modifiedConstructFn}
`;
const context = await isolate.createContext();
const jail = context.global;
await jail.set("global", jail.derefInto());
const script = await isolate.compileScript(code);
await script.run(context);
const promptReference = (await context.global.get("prompt")) as ivm.Reference;
const prompt = await promptReference.copy();
if (!isObject(prompt)) {
return { error: "definePrompt did not return an object" };
}
if (!("modelProvider" in prompt) || !isString(prompt.modelProvider)) {
return { error: "definePrompt did not return a valid modelProvider" };
}
const provider =
prompt.modelProvider in modelProviders &&
modelProviders[prompt.modelProvider as keyof typeof modelProviders];
if (!provider) {
return { error: "definePrompt did not return a known modelProvider" };
}
if (!("input" in prompt) || !isObject(prompt.input)) {
return { error: "definePrompt did not return an input" };
}
const validationResult = validate(prompt.input, provider.inputSchema);
if (!validationResult.valid)
return {
error: `definePrompt did not return a valid input: ${validationResult.errors
.map((e) => e.stack)
.join(", ")}`,
};
// We've validated the JSON schema so this should be safe
const input = prompt.input as Parameters<(typeof provider)["getModel"]>[0];
const model = provider.getModel(input);
if (!model) {
return {
error: `definePrompt did not return a known model for the provider ${prompt.modelProvider}`,
};
}
return {
modelProvider: prompt.modelProvider as keyof typeof modelProviders,
model,
modelInput: input,
};
} catch (e) {
const msg =
isObject(e) && "message" in e && isString(e.message)
? e.message
: "unknown error parsing definePrompt script";
return { error: msg };
}
}

View File

@@ -1,7 +0,0 @@
import { isObject } from "lodash-es";
import { type JSONSerializable } from "../types";
export const shouldStream = (config: JSONSerializable): boolean => {
const shouldStream = isObject(config) && "stream" in config && config.stream === true;
return shouldStream;
};

View File

@@ -1,7 +1,6 @@
import { type RouterOutputs } from "~/utils/api"; import { type RouterOutputs } from "~/utils/api";
import { type SliceCreator } from "./store"; import { type SliceCreator } from "./store";
import loader from "@monaco-editor/loader"; import loader from "@monaco-editor/loader";
import openAITypes from "~/codegen/openai.types.ts.txt";
import formatPromptConstructor from "~/utils/formatPromptConstructor"; import formatPromptConstructor from "~/utils/formatPromptConstructor";
export const editorBackground = "#fafafa"; export const editorBackground = "#fafafa";
@@ -20,7 +19,10 @@ export const createVariantEditorSlice: SliceCreator<SharedVariantEditorSlice> =
// We only want to run this client-side // We only want to run this client-side
if (typeof window === "undefined") return; if (typeof window === "undefined") return;
const monaco = await loader.init(); const [monaco, promptTypes] = await Promise.all([
loader.init(),
get().api?.client.experiments.promptTypes.query(),
]);
monaco.editor.defineTheme("customTheme", { monaco.editor.defineTheme("customTheme", {
base: "vs", base: "vs",
@@ -37,14 +39,9 @@ export const createVariantEditorSlice: SliceCreator<SharedVariantEditorSlice> =
lib: ["esnext"], lib: ["esnext"],
}); });
monaco.editor.createModel( monaco.languages.typescript.typescriptDefaults.addExtraLib(
` promptTypes ?? "",
${openAITypes} "file:///PromptTypes.d.ts",
declare var prompt: components["schemas"]["CreateChatCompletionRequest"];
`,
"typescript",
monaco.Uri.parse("file:///openai.types.ts"),
); );
monaco.languages.registerDocumentFormattingEditProvider("typescript", { monaco.languages.registerDocumentFormattingEditProvider("typescript", {
@@ -64,7 +61,6 @@ export const createVariantEditorSlice: SliceCreator<SharedVariantEditorSlice> =
get().sharedVariantEditor.updateScenariosModel(); get().sharedVariantEditor.updateScenariosModel();
}, },
scenarios: [], scenarios: [],
// scenariosModel: null,
setScenarios: (scenarios) => { setScenarios: (scenarios) => {
set((state) => { set((state) => {
state.sharedVariantEditor.scenarios = scenarios; state.sharedVariantEditor.scenarios = scenarios;

View File

@@ -5,11 +5,14 @@ import {
type SharedVariantEditorSlice, type SharedVariantEditorSlice,
createVariantEditorSlice, createVariantEditorSlice,
} from "./sharedVariantEditor.slice"; } from "./sharedVariantEditor.slice";
import { type APIClient } from "~/utils/api";
export type State = { export type State = {
drawerOpen: boolean; drawerOpen: boolean;
openDrawer: () => void; openDrawer: () => void;
closeDrawer: () => void; closeDrawer: () => void;
api: APIClient | null;
setApi: (api: APIClient) => void;
sharedVariantEditor: SharedVariantEditorSlice; sharedVariantEditor: SharedVariantEditorSlice;
}; };
@@ -20,6 +23,12 @@ export type GetFn = Parameters<SliceCreator<unknown>>[1];
const useBaseStore = create<State, [["zustand/immer", never]]>( const useBaseStore = create<State, [["zustand/immer", never]]>(
immer((set, get, ...rest) => ({ immer((set, get, ...rest) => ({
api: null,
setApi: (api) =>
set((state) => {
state.api = api;
}),
drawerOpen: false, drawerOpen: false,
openDrawer: () => openDrawer: () =>
set((state) => { set((state) => {
@@ -34,5 +43,3 @@ const useBaseStore = create<State, [["zustand/immer", never]]>(
); );
export const useAppStore = createSelectors(useBaseStore); export const useAppStore = createSelectors(useBaseStore);
useAppStore.getState().sharedVariantEditor.loadMonaco().catch(console.error);

View File

@@ -15,3 +15,15 @@ export function useSyncVariantEditor() {
} }
}, [scenarios.data]); }, [scenarios.data]);
} }
export function SyncAppStore() {
const utils = api.useContext();
const setApi = useAppStore((state) => state.setApi);
useEffect(() => {
setApi(utils);
}, [utils, setApi]);
return null;
}

View File

@@ -1,5 +1,6 @@
import { extendTheme } from "@chakra-ui/react"; import { extendTheme } from "@chakra-ui/react";
import "@fontsource/inconsolata"; import "@fontsource/inconsolata";
import { ChakraProvider } from "@chakra-ui/react";
const systemFont = const systemFont =
'ui-sans-serif, -apple-system, "system-ui", "Segoe UI", Helvetica, "Apple Color Emoji", Arial, sans-serif, "Segoe UI Emoji", "Segoe UI Symbol"'; 'ui-sans-serif, -apple-system, "system-ui", "Segoe UI", Helvetica, "Apple Color Emoji", Arial, sans-serif, "Segoe UI Emoji", "Segoe UI Symbol"';
@@ -34,4 +35,6 @@ const theme = extendTheme({
}, },
}); });
export default theme; export const ChakraThemeProvider = ({ children }: { children: JSX.Element }) => {
return <ChakraProvider theme={theme}>{children}</ChakraProvider>;
};

View File

@@ -65,3 +65,5 @@ export type RouterInputs = inferRouterInputs<AppRouter>;
* @example type HelloOutput = RouterOutputs['example']['hello'] * @example type HelloOutput = RouterOutputs['example']['hello']
*/ */
export type RouterOutputs = inferRouterOutputs<AppRouter>; export type RouterOutputs = inferRouterOutputs<AppRouter>;
export type APIClient = ReturnType<typeof api.useContext>;

View File

@@ -9,7 +9,7 @@ interface GPTTokensMessageItem {
} }
export const countOpenAIChatTokens = ( export const countOpenAIChatTokens = (
model: OpenAIChatModel, model: keyof typeof OpenAIChatModel,
messages: ChatCompletion.Choice.Message[], messages: ChatCompletion.Choice.Message[],
) => { ) => {
return new GPTTokens({ model, messages: messages as unknown as GPTTokensMessageItem[] }) return new GPTTokens({ model, messages: messages as unknown as GPTTokensMessageItem[] })

View File

@@ -16,7 +16,7 @@ export function stripTypes(tsCode: string): string {
const result = babel.transform(tsCode, options); const result = babel.transform(tsCode, options);
return result.code ?? tsCode; return result.code ?? tsCode;
} catch (error) { } catch (error) {
console.error("Error stripping types", error); // console.error("Error stripping types", error);
return tsCode; return tsCode;
} }
} }

View File

@@ -1,13 +1,12 @@
import { type ChatCompletion } from "openai/resources/chat";
import { useRef, useState, useEffect } from "react"; import { useRef, useState, useEffect } from "react";
import { io, type Socket } from "socket.io-client"; import { io, type Socket } from "socket.io-client";
import { env } from "~/env.mjs"; import { env } from "~/env.mjs";
const url = env.NEXT_PUBLIC_SOCKET_URL; const url = env.NEXT_PUBLIC_SOCKET_URL;
export default function useSocket(channel?: string | null) { export default function useSocket<T>(channel?: string | null) {
const socketRef = useRef<Socket>(); const socketRef = useRef<Socket>();
const [message, setMessage] = useState<ChatCompletion | null>(null); const [message, setMessage] = useState<T | null>(null);
useEffect(() => { useEffect(() => {
if (!channel) return; if (!channel) return;
@@ -21,7 +20,7 @@ export default function useSocket(channel?: string | null) {
socketRef.current?.emit("join", channel); socketRef.current?.emit("join", channel);
// Listen for 'message' events // Listen for 'message' events
socketRef.current?.on("message", (message: ChatCompletion) => { socketRef.current?.on("message", (message: T) => {
setMessage(message); setMessage(message);
}); });
}); });

1
src/utils/utils.ts Normal file
View File

@@ -0,0 +1 @@
export const truthyFilter = <T>(x: T | null | undefined): x is T => Boolean(x);

11
vitest.config.ts Normal file
View File

@@ -0,0 +1,11 @@
import tsconfigPaths from "vite-tsconfig-paths";
import { configDefaults, defineConfig, type UserConfig } from "vitest/config";
const config = defineConfig({
test: {
...configDefaults, // Extending Vitest's default options
},
plugins: [tsconfigPaths()],
}) as UserConfig;
export default config;