Prep for more model providers

Adds a `modelProvider` field to `promptVariants`, currently just set to "openai/ChatCompletion" for all variants for now.

Adds a `modelProviders/` directory where we can define and store pluggable model providers. Currently just OpenAI. Not everything is pluggable yet -- notably the code to actually generate completions hasn't been migrated to this setup yet.

Does a lot of work to get the types working. Prompts are now defined with a function `definePrompt(modelProvider, config)` instead of `prompt = config`. Added a script to migrate old prompt definitions.

This is still partial work, but the diff is large enough that I want to get it in. I don't think anything is broken but I haven't tested thoroughly.
This commit is contained in:
Kyle Corbitt
2023-07-20 14:47:39 -07:00
parent 2c8c8d07cf
commit ded6678e97
43 changed files with 1195 additions and 3023 deletions

View File

@@ -19,7 +19,6 @@ FROM base as builder
# Include all NEXT_PUBLIC_* env vars here
ARG NEXT_PUBLIC_POSTHOG_KEY
ARG NEXT_PUBLIC_IS_PUBLIC_PLAYGROUND
ARG NEXT_PUBLIC_SOCKET_URL
WORKDIR /app

View File

@@ -21,6 +21,7 @@
"check": "concurrently 'pnpm lint' 'pnpm tsc' 'pnpm prettier . --check'"
},
"dependencies": {
"@apidevtools/json-schema-ref-parser": "^10.1.0",
"@babel/preset-typescript": "^7.22.5",
"@babel/standalone": "^7.22.9",
"@chakra-ui/next-js": "^2.1.4",
@@ -39,6 +40,7 @@
"@trpc/next": "^10.26.0",
"@trpc/react-query": "^10.26.0",
"@trpc/server": "^10.26.0",
"ast-types": "^0.14.2",
"chroma-js": "^2.4.2",
"concurrently": "^8.2.0",
"cors": "^2.8.5",
@@ -51,7 +53,9 @@
"graphile-worker": "^0.13.0",
"immer": "^10.0.2",
"isolated-vm": "^4.5.0",
"json-schema-to-typescript": "^13.0.2",
"json-stringify-pretty-compact": "^4.0.0",
"jsonschema": "^1.4.1",
"lodash-es": "^4.17.21",
"next": "^13.4.2",
"next-auth": "^4.22.1",
@@ -68,10 +72,13 @@
"react-select": "^5.7.4",
"react-syntax-highlighter": "^15.5.0",
"react-textarea-autosize": "^8.5.0",
"recast": "^0.23.3",
"socket.io": "^4.7.1",
"socket.io-client": "^4.7.1",
"superjson": "1.12.2",
"tsx": "^3.12.7",
"type-fest": "^4.0.0",
"vite-tsconfig-paths": "^4.2.0",
"zod": "^3.21.4",
"zustand": "^4.3.9"
},
@@ -83,6 +90,7 @@
"@types/cors": "^2.8.13",
"@types/eslint": "^8.37.0",
"@types/express": "^4.17.17",
"@types/json-schema": "^7.0.12",
"@types/lodash-es": "^4.17.8",
"@types/node": "^18.16.0",
"@types/pluralize": "^0.0.30",

400
pnpm-lock.yaml generated
View File

@@ -5,6 +5,9 @@ settings:
excludeLinksFromLockfile: false
dependencies:
'@apidevtools/json-schema-ref-parser':
specifier: ^10.1.0
version: 10.1.0
'@babel/preset-typescript':
specifier: ^7.22.5
version: 7.22.5(@babel/core@7.22.9)
@@ -59,6 +62,9 @@ dependencies:
'@trpc/server':
specifier: ^10.26.0
version: 10.26.0
ast-types:
specifier: ^0.14.2
version: 0.14.2
chroma-js:
specifier: ^2.4.2
version: 2.4.2
@@ -95,9 +101,15 @@ dependencies:
isolated-vm:
specifier: ^4.5.0
version: 4.5.0
json-schema-to-typescript:
specifier: ^13.0.2
version: 13.0.2
json-stringify-pretty-compact:
specifier: ^4.0.0
version: 4.0.0
jsonschema:
specifier: ^1.4.1
version: 1.4.1
lodash-es:
specifier: ^4.17.21
version: 4.17.21
@@ -146,6 +158,9 @@ dependencies:
react-textarea-autosize:
specifier: ^8.5.0
version: 8.5.0(@types/react@18.2.6)(react@18.2.0)
recast:
specifier: ^0.23.3
version: 0.23.3
socket.io:
specifier: ^4.7.1
version: 4.7.1
@@ -158,6 +173,12 @@ dependencies:
tsx:
specifier: ^3.12.7
version: 3.12.7
type-fest:
specifier: ^4.0.0
version: 4.0.0
vite-tsconfig-paths:
specifier: ^4.2.0
version: 4.2.0(typescript@5.0.4)
zod:
specifier: ^3.21.4
version: 3.21.4
@@ -187,6 +208,9 @@ devDependencies:
'@types/express':
specifier: ^4.17.17
version: 4.17.17
'@types/json-schema':
specifier: ^7.0.12
version: 7.0.12
'@types/lodash-es':
specifier: ^4.17.8
version: 4.17.8
@@ -259,6 +283,17 @@ packages:
'@jridgewell/gen-mapping': 0.3.3
'@jridgewell/trace-mapping': 0.3.18
/@apidevtools/json-schema-ref-parser@10.1.0:
resolution: {integrity: sha512-3e+viyMuXdrcK8v5pvP+SDoAQ77FH6OyRmuK48SZKmdHJRFm87RsSs8qm6kP39a/pOPURByJw+OXzQIqcfmKtA==}
engines: {node: '>= 16'}
dependencies:
'@jsdevtools/ono': 7.1.3
'@types/json-schema': 7.0.12
'@types/lodash.clonedeep': 4.5.7
js-yaml: 4.1.0
lodash.clonedeep: 4.5.0
dev: false
/@babel/code-frame@7.22.5:
resolution: {integrity: sha512-Xmwn266vad+6DAqEB2A6V/CcZVp62BbwVmcOJc2RPuwih1kw02TjQvWVWlcKGbBPd+8/0V5DEkOcizRGYsspYQ==}
engines: {node: '>=6.9.0'}
@@ -564,6 +599,16 @@ packages:
'@babel/helper-validator-identifier': 7.22.5
to-fast-properties: 2.0.0
/@bcherny/json-schema-ref-parser@10.0.5-fork:
resolution: {integrity: sha512-E/jKbPoca1tfUPj3iSbitDZTGnq6FUFjkH6L8U2oDwSuwK1WhnnVtCG7oFOTg/DDnyoXbQYUiUiGOibHqaGVnw==}
engines: {node: '>= 16'}
dependencies:
'@jsdevtools/ono': 7.1.3
'@types/json-schema': 7.0.12
call-me-maybe: 1.0.2
js-yaml: 4.1.0
dev: false
/@chakra-ui/accordion@2.2.0(@chakra-ui/system@2.5.8)(framer-motion@10.12.17)(react@18.2.0):
resolution: {integrity: sha512-2IK1iLzTZ22u8GKPPPn65mqJdZidn4AvkgAbv17ISdKA07VHJ8jSd4QF1T5iCXjKfZ0XaXozmhP4kDhjwF2IbQ==}
peerDependencies:
@@ -2385,6 +2430,10 @@ packages:
'@jridgewell/resolve-uri': 3.1.0
'@jridgewell/sourcemap-codec': 1.4.14
/@jsdevtools/ono@7.1.3:
resolution: {integrity: sha512-4JQNk+3mVzK3xh2rqd6RB4J46qUR19azEHBneZyTZM+c456qOrbbM/5xcR8huNCCcbVt7+UmizG6GuUvPvKUYg==}
dev: false
/@monaco-editor/loader@1.3.3(monaco-editor@0.40.0):
resolution: {integrity: sha512-6KKF4CTzcJiS8BJwtxtfyYt9shBiEv32ateQ9T4UVogwn4HM/uPo9iJd2Dmbkpz8CM6Y0PDUpjnZzCwC+eYo2Q==}
peerDependencies:
@@ -2806,6 +2855,13 @@ packages:
'@types/serve-static': 1.15.2
dev: true
/@types/glob@7.2.0:
resolution: {integrity: sha512-ZUxbzKl0IfJILTS6t7ip5fQQM/J3TJYubDm3nMbgubNNYS62eXeUpoLUC8/7fJNiFYHTrGPQn7hspDUzIHX3UA==}
dependencies:
'@types/minimatch': 5.1.2
'@types/node': 18.16.0
dev: false
/@types/hast@2.3.5:
resolution: {integrity: sha512-SvQi0L/lNpThgPoleH53cdjB3y9zpLlVjRbqB3rH8hx1jiRSBGAhyjV3H+URFjNVRqt2EdYNrbZE5IsGlNfpRg==}
dependencies:
@@ -2818,7 +2874,6 @@ packages:
/@types/json-schema@7.0.12:
resolution: {integrity: sha512-Hr5Jfhc9eYOQNPYO5WLDq/n4jqijdHNlDXjuAQkkt+mWdQR+XJToOHrsD4cPaMXpn6KO7y2+wM8AZEs8VpBLVA==}
dev: true
/@types/json5@0.0.29:
resolution: {integrity: sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ==}
@@ -2830,6 +2885,12 @@ packages:
'@types/lodash': 4.14.195
dev: true
/@types/lodash.clonedeep@4.5.7:
resolution: {integrity: sha512-ccNqkPptFIXrpVqUECi60/DFxjNKsfoQxSQsgcBJCX/fuX1wgyQieojkcWH/KpE3xzLoWN/2k+ZeGqIN3paSvw==}
dependencies:
'@types/lodash': 4.14.195
dev: false
/@types/lodash.mergewith@4.6.7:
resolution: {integrity: sha512-3m+lkO5CLRRYU0fhGRp7zbsGi6+BZj0uTVSwvcKU+nSlhjA9/QRNfuSGnD2mX6hQA7ZbmcCkzk5h4ZYGOtk14A==}
dependencies:
@@ -2847,6 +2908,10 @@ packages:
resolution: {integrity: sha512-Y4XFY5VJAuw0FgAqPNd6NNoV44jbq9Bz2L7Rh/J6jLTiHBSBJa9fxqQIvkIld4GsoDOcCbvzOUAbLPsSKKg+uA==}
dev: true
/@types/minimatch@5.1.2:
resolution: {integrity: sha512-K0VQKziLUWkVKiRVrx4a40iPaxTUefQmjtkQofBkYRcoaaL/8rhwDWww9qWbrgicNOgnpIsMxyNIUM4+n6dUIA==}
dev: false
/@types/ms@0.7.31:
resolution: {integrity: sha512-iiUgKzV9AuaEkZqkOLDIvlQiL6ltuZd9tGcW3gwpnX8JbuiuhFlEGmmFXEXkN50Cvq7Os88IY2v0dkDqXYWVgA==}
dev: false
@@ -2881,6 +2946,10 @@ packages:
resolution: {integrity: sha512-kVww6xZrW/db5BR9OqiT71J9huRdQ+z/r+LbDuT7/EK50mCmj5FoaIARnVv0rvjUS/YpDox0cDU9lpQT011VBA==}
dev: true
/@types/prettier@2.7.3:
resolution: {integrity: sha512-+68kP9yzs4LMp7VNh8gdzMSPZFL44MLGqiHWvttYJe+6qnuVr4Ek9wSBQoveqY/r+LwjCcU29kNVkidwim+kYA==}
dev: false
/@types/prismjs@1.26.0:
resolution: {integrity: sha512-ZTaqn/qSqUuAq1YwvOFQfVW1AR/oQJlLSZVustdjwI+GZ8kr0MSHBj0tsXPW1EqHubx50gtBEjbPGsdZwQwCjQ==}
dev: true
@@ -3332,6 +3401,10 @@ packages:
engines: {node: '>=10'}
dev: true
/any-promise@1.3.0:
resolution: {integrity: sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==}
dev: false
/anymatch@3.1.3:
resolution: {integrity: sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==}
engines: {node: '>= 8'}
@@ -3342,7 +3415,6 @@ packages:
/argparse@2.0.1:
resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==}
dev: true
/aria-hidden@1.2.3:
resolution: {integrity: sha512-xcLxITLe2HYa1cnYnwCjkOO1PqUHQpozB8x9AR0OgWN2woOBi5kSDVxKfd0b7sb1hw5qFeJhXm9H1nu3xSfLeQ==}
@@ -3426,6 +3498,15 @@ packages:
is-shared-array-buffer: 1.0.2
dev: true
/assert@2.0.0:
resolution: {integrity: sha512-se5Cd+js9dXJnu6Ag2JFc00t+HmHOen+8Q+L7O9zI0PqQXr20uk2J0XQqMxZEeo5U50o8Nvmmx7dZrl+Ufr35A==}
dependencies:
es6-object-assign: 1.1.0
is-nan: 1.3.2
object-is: 1.1.5
util: 0.12.5
dev: false
/assertion-error@1.1.0:
resolution: {integrity: sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==}
dev: true
@@ -3434,6 +3515,20 @@ packages:
resolution: {integrity: sha512-eBvWn1lvIApYMhzQMsu9ciLfkBY499mFZlNqG+/9WR7PVlroQw0vG30cOQQbaKz3sCEc44TAOu2ykzqXSNnwag==}
dev: true
/ast-types@0.14.2:
resolution: {integrity: sha512-O0yuUDnZeQDL+ncNGlJ78BiO4jnYI3bvMsD5prT0/nsgijG/LpNBIr63gTjVTNsiGkgQhiyCShTgxt8oXOrklA==}
engines: {node: '>=4'}
dependencies:
tslib: 2.6.0
dev: false
/ast-types@0.16.1:
resolution: {integrity: sha512-6t10qk83GOG8p0vKmaCr8eiilZwO171AvbROMtvvNiwrTly62t+7XkA8RdIIVbpMhCASAsxgAzdRSwh6nw/5Dg==}
engines: {node: '>=4'}
dependencies:
tslib: 2.6.0
dev: false
/asynckit@0.4.0:
resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==}
dev: false
@@ -3441,7 +3536,6 @@ packages:
/available-typed-arrays@1.0.5:
resolution: {integrity: sha512-DMD0KiN46eipeziST1LPP/STfDU0sufISXmjSgvVsoU2tqxctQeASejWcfNtxYKqETM1UxQ8sp2OrSBWpHY6sw==}
engines: {node: '>= 0.4'}
dev: true
/axe-core@4.7.2:
resolution: {integrity: sha512-zIURGIS1E1Q4pcrMjp+nnEh+16G56eG/MUllJH8yEvw7asDo7Ac9uhC9KIH5jzpITueEZolfYglnCGIuSBz39g==}
@@ -3492,7 +3586,6 @@ packages:
/balanced-match@1.0.2:
resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==}
dev: true
/base-64@0.1.0:
resolution: {integrity: sha512-Y5gU45svrR5tI2Vt/X9GPd3L0HNIKzGu202EjxrXMpuc2V2CiKgemAbUUsqYmZJvPtCXoUKjNZwBJzsNScUbXA==}
@@ -3553,7 +3646,6 @@ packages:
dependencies:
balanced-match: 1.0.2
concat-map: 0.0.1
dev: true
/braces@3.0.2:
resolution: {integrity: sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==}
@@ -3612,6 +3704,10 @@ packages:
function-bind: 1.1.1
get-intrinsic: 1.2.1
/call-me-maybe@1.0.2:
resolution: {integrity: sha512-HpX65o1Hnr9HH25ojC1YGs7HCQLq0GCOibSaWER0eNpgJ/Z1MZv2mTc7+xh6WOPxbRVcmgbv4hGU+uSQ/2xFZQ==}
dev: false
/callsites@3.1.0:
resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==}
engines: {node: '>=6'}
@@ -3695,6 +3791,17 @@ packages:
resolution: {integrity: sha512-CSbhY4cFEJRe6/GQzIk5qXZ4Jeg5pcsP7b5peFSDpffpe1cqjASH/n9UTjBwOp6XpMSTwQ8Za2K5V02ueA7Tmw==}
dev: false
/cli-color@2.0.3:
resolution: {integrity: sha512-OkoZnxyC4ERN3zLzZaY9Emb7f/MhBOIpePv0Ycok0fJYT+Ouo00UBEIwsVsr0yoow++n5YWlSUgST9GKhNHiRQ==}
engines: {node: '>=0.10'}
dependencies:
d: 1.0.1
es5-ext: 0.10.62
es6-iterator: 2.0.3
memoizee: 0.4.15
timers-ext: 0.1.7
dev: false
/client-only@0.0.1:
resolution: {integrity: sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==}
dev: false
@@ -3758,7 +3865,6 @@ packages:
/concat-map@0.0.1:
resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==}
dev: true
/concurrently@8.2.0:
resolution: {integrity: sha512-nnLMxO2LU492mTUj9qX/az/lESonSZu81UznYDoXtz1IQf996ixVqPAgHXwvHiHCAef/7S8HIK+fTFK7Ifk8YA==}
@@ -3887,6 +3993,13 @@ packages:
/csstype@3.1.2:
resolution: {integrity: sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ==}
/d@1.0.1:
resolution: {integrity: sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA==}
dependencies:
es5-ext: 0.10.62
type: 1.2.0
dev: false
/damerau-levenshtein@1.0.8:
resolution: {integrity: sha512-sdQSFB7+llfUcQHUQO3+B8ERRj0Oa4w9POWMI/puGtuf7gFywGmkaLCElnudfTiKZV+NvHqL0ifzdrI8Ro7ESA==}
dev: true
@@ -3985,7 +4098,6 @@ packages:
dependencies:
has-property-descriptors: 1.0.0
object-keys: 1.1.1
dev: true
/delayed-stream@1.0.0:
resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==}
@@ -4225,6 +4337,44 @@ packages:
is-symbol: 1.0.4
dev: true
/es5-ext@0.10.62:
resolution: {integrity: sha512-BHLqn0klhEpnOKSrzn/Xsz2UIW8j+cGmo9JLzr8BiUapV8hPL9+FliFqjwr9ngW7jWdnxv6eO+/LqyhJVqgrjA==}
engines: {node: '>=0.10'}
requiresBuild: true
dependencies:
es6-iterator: 2.0.3
es6-symbol: 3.1.3
next-tick: 1.1.0
dev: false
/es6-iterator@2.0.3:
resolution: {integrity: sha512-zw4SRzoUkd+cl+ZoE15A9o1oQd920Bb0iOJMQkQhl3jNc03YqVjAhG7scf9C5KWRU/R13Orf588uCC6525o02g==}
dependencies:
d: 1.0.1
es5-ext: 0.10.62
es6-symbol: 3.1.3
dev: false
/es6-object-assign@1.1.0:
resolution: {integrity: sha512-MEl9uirslVwqQU369iHNWZXsI8yaZYGg/D65aOgZkeyFJwHYSxilf7rQzXKI7DdDuBPrBXbfk3sl9hJhmd5AUw==}
dev: false
/es6-symbol@3.1.3:
resolution: {integrity: sha512-NJ6Yn3FuDinBaBRWl/q5X/s4koRHBrgKAu+yGI6JCBeiu3qrcbJhwT2GeR/EXVfylRk8dpQVJoLEFhK+Mu31NA==}
dependencies:
d: 1.0.1
ext: 1.7.0
dev: false
/es6-weak-map@2.0.3:
resolution: {integrity: sha512-p5um32HOTO1kP+w7PRnB+5lQ43Z6muuMuIMffvDN8ZB4GcnjLBV6zGStpbASIMk4DCAvEaamhe2zhyCb/QXXsA==}
dependencies:
d: 1.0.1
es5-ext: 0.10.62
es6-iterator: 2.0.3
es6-symbol: 3.1.3
dev: false
/esbuild@0.17.19:
resolution: {integrity: sha512-XQ0jAPFkK/u3LcVRcvVHQcTIqD6E2H1fvZMA5dQPSOWb3suUbWbfbRf94pjc0bNzRYLfIrDRQXr7X+LHIm5oHw==}
engines: {node: '>=12'}
@@ -4580,6 +4730,12 @@ packages:
eslint-visitor-keys: 3.4.1
dev: true
/esprima@4.0.1:
resolution: {integrity: sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==}
engines: {node: '>=4'}
hasBin: true
dev: false
/esquery@1.5.0:
resolution: {integrity: sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==}
engines: {node: '>=0.10'}
@@ -4614,6 +4770,13 @@ packages:
engines: {node: '>= 0.6'}
dev: false
/event-emitter@0.3.5:
resolution: {integrity: sha512-D9rRn9y7kLPnJ+hMq7S/nhvoKwwvVJahBi2BPmx3bvbsEdK3W9ii8cBSGjP+72/LnM4n6fo3+dkCX5FeTQruXA==}
dependencies:
d: 1.0.1
es5-ext: 0.10.62
dev: false
/event-target-shim@5.0.1:
resolution: {integrity: sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==}
engines: {node: '>=6'}
@@ -4693,6 +4856,12 @@ packages:
- supports-color
dev: false
/ext@1.7.0:
resolution: {integrity: sha512-6hxeJYaL110a9b5TEJSj0gojyHQAmA2ch5Os+ySCiA1QGdS697XWY1pzsrSjqA9LDEEgdB/KypIlR59RcLuHYw==}
dependencies:
type: 2.7.2
dev: false
/fast-deep-equal@3.1.3:
resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==}
dev: true
@@ -4795,7 +4964,6 @@ packages:
resolution: {integrity: sha512-jqYfLp7mo9vIyQf8ykW2v7A+2N4QjeCeI5+Dz9XraiO1ign81wjiH7Fb9vSOWvQfNtmSa4H2RoQTrrXivdUZmw==}
dependencies:
is-callable: 1.2.7
dev: true
/form-data-encoder@1.7.2:
resolution: {integrity: sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==}
@@ -4859,7 +5027,6 @@ packages:
/fs.realpath@1.0.0:
resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==}
dev: true
/fsevents@2.3.2:
resolution: {integrity: sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==}
@@ -4911,6 +5078,11 @@ packages:
engines: {node: '>=6'}
dev: false
/get-stdin@8.0.0:
resolution: {integrity: sha512-sY22aA6xchAzprjyqmSEQv4UbAAzRN0L2dQB0NlN5acTTK9Don6nhoc3eAbUnpZiCANAMfd/+40kVdKfFygohg==}
engines: {node: '>=10'}
dev: false
/get-stream@6.0.1:
resolution: {integrity: sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==}
engines: {node: '>=10'}
@@ -4942,6 +5114,16 @@ packages:
is-glob: 4.0.3
dev: true
/glob-promise@4.2.2(glob@7.2.3):
resolution: {integrity: sha512-xcUzJ8NWN5bktoTIX7eOclO1Npxd/dyVqUJxlLIDasT4C7KZyqlPIwkdJ0Ypiy3p2ZKahTjK4M9uC3sNSfNMzw==}
engines: {node: '>=12'}
peerDependencies:
glob: ^7.1.6
dependencies:
'@types/glob': 7.2.0
glob: 7.2.3
dev: false
/glob-to-regexp@0.4.1:
resolution: {integrity: sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==}
dev: true
@@ -4966,7 +5148,6 @@ packages:
minimatch: 3.1.2
once: 1.4.0
path-is-absolute: 1.0.1
dev: true
/globals@11.12.0:
resolution: {integrity: sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==}
@@ -5015,13 +5196,11 @@ packages:
/globrex@0.1.2:
resolution: {integrity: sha512-uHJgbwAMwNFf5mLst7IWLNg14x1CkeqglJb/K3doi4dw6q2IvAAmM/Y81kevy83wP+Sst+nutFTYOGg3d1lsxg==}
dev: true
/gopd@1.0.1:
resolution: {integrity: sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==}
dependencies:
get-intrinsic: 1.2.1
dev: true
/gpt-tokens@1.0.10:
resolution: {integrity: sha512-DNWfqhu+ZAbjTUT76Xc5UBE+e7L0WejsrbiJy+/zgvA2C4697OFN6TLfQY7zaWlay8bNUKqLzbStz0VI0thDtQ==}
@@ -5072,7 +5251,6 @@ packages:
resolution: {integrity: sha512-62DVLZGoiEBDHQyqG4w9xCuZ7eJEwNmJRWw2VY84Oedb7WFcA27fiEVe8oUQx9hAUJ4ekurquucTGwsyO1XGdQ==}
dependencies:
get-intrinsic: 1.2.1
dev: true
/has-proto@1.0.1:
resolution: {integrity: sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==}
@@ -5087,7 +5265,6 @@ packages:
engines: {node: '>= 0.4'}
dependencies:
has-symbols: 1.0.3
dev: true
/has@1.0.3:
resolution: {integrity: sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==}
@@ -5190,7 +5367,6 @@ packages:
dependencies:
once: 1.4.0
wrappy: 1.0.2
dev: true
/inherits@2.0.4:
resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==}
@@ -5226,6 +5402,14 @@ packages:
is-decimal: 1.0.4
dev: false
/is-arguments@1.1.1:
resolution: {integrity: sha512-8Q7EARjzEnKpt/PCD7e1cgUS0a6X8u5tdSiMqXhojOdoV9TsMsiO+9VLC5vAmO8N7/GmXn7yjR8qnA6bVAEzfA==}
engines: {node: '>= 0.4'}
dependencies:
call-bind: 1.0.2
has-tostringtag: 1.0.0
dev: false
/is-array-buffer@3.0.2:
resolution: {integrity: sha512-y+FyyR/w8vfIRq4eQcM1EYgSTnmHXPqaF+IgzgraytCFq5Xh8lllDVmAZolPJiZttZLeFSINPYMaEJ7/vWUa1w==}
dependencies:
@@ -5266,7 +5450,6 @@ packages:
/is-callable@1.2.7:
resolution: {integrity: sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==}
engines: {node: '>= 0.4'}
dev: true
/is-core-module@2.12.1:
resolution: {integrity: sha512-Q4ZuBAe2FUsKtyQJoQHlvP8OvBERxO3jEmy1I7hcRXcJBGGHFh/aJBswbXuS9sgrDH2QUO8ilkwNPHvHMd8clg==}
@@ -5305,6 +5488,13 @@ packages:
engines: {node: '>=8'}
dev: false
/is-generator-function@1.0.10:
resolution: {integrity: sha512-jsEjy9l3yiXEQ+PsXdmBwEPcOxaXWLspKdplFUVI9vq1iZgIekeC0L167qeu86czQaxed3q/Uzuw0swL0irL8A==}
engines: {node: '>= 0.4'}
dependencies:
has-tostringtag: 1.0.0
dev: false
/is-glob@4.0.3:
resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==}
engines: {node: '>=0.10.0'}
@@ -5323,6 +5513,14 @@ packages:
is-docker: 3.0.0
dev: true
/is-nan@1.3.2:
resolution: {integrity: sha512-E+zBKpQ2t6MEo1VsonYmluk9NxGrbzpeeLC2xIViuO2EjU2xsXsBPwTr3Ykv9l08UYEVEdWeRZNouaZqF6RN0w==}
engines: {node: '>= 0.4'}
dependencies:
call-bind: 1.0.2
define-properties: 1.2.0
dev: false
/is-negative-zero@2.0.2:
resolution: {integrity: sha512-dqJvarLawXsFbNDeJW7zAz8ItJ9cd28YufuuFzh0G8pNHjJMnY08Dv7sYX2uF5UpQOwieAeOExEYAWWfu7ZZUA==}
engines: {node: '>= 0.4'}
@@ -5344,6 +5542,10 @@ packages:
engines: {node: '>=8'}
dev: true
/is-promise@2.2.2:
resolution: {integrity: sha512-+lP4/6lKUBfQjZ2pdxThZvLUAafmZb8OAxFb8XXtiQmS35INgr85hdOGoEs124ez1FCnZJt6jau/T+alh58QFQ==}
dev: false
/is-regex@1.1.4:
resolution: {integrity: sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==}
engines: {node: '>= 0.4'}
@@ -5387,7 +5589,6 @@ packages:
engines: {node: '>= 0.4'}
dependencies:
which-typed-array: 1.1.11
dev: true
/is-weakref@1.0.2:
resolution: {integrity: sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==}
@@ -5460,7 +5661,6 @@ packages:
hasBin: true
dependencies:
argparse: 2.0.1
dev: true
/jsesc@2.5.2:
resolution: {integrity: sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==}
@@ -5470,6 +5670,27 @@ packages:
/json-parse-even-better-errors@2.3.1:
resolution: {integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==}
/json-schema-to-typescript@13.0.2:
resolution: {integrity: sha512-TCaEVW4aI2FmMQe7f98mvr3/oiVmXEC1xZjkTZ9L/BSoTXFlC7p64mD5AD2d8XWycNBQZUnHwXL5iVXt1HWwNQ==}
engines: {node: '>=12.0.0'}
hasBin: true
dependencies:
'@bcherny/json-schema-ref-parser': 10.0.5-fork
'@types/json-schema': 7.0.12
'@types/lodash': 4.14.195
'@types/prettier': 2.7.3
cli-color: 2.0.3
get-stdin: 8.0.0
glob: 7.2.3
glob-promise: 4.2.2(glob@7.2.3)
is-glob: 4.0.3
lodash: 4.17.21
minimist: 1.2.8
mkdirp: 1.0.4
mz: 2.7.0
prettier: 2.8.8
dev: false
/json-schema-traverse@0.4.1:
resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==}
dev: true
@@ -5498,6 +5719,10 @@ packages:
resolution: {integrity: sha512-gfFQZrcTc8CnKXp6Y4/CBT3fTc0OVuDofpre4aEeEpSBPV5X5v4+Vmx+8snU7RLPrNHPKSgLxGo9YuQzz20o+w==}
dev: true
/jsonschema@1.4.1:
resolution: {integrity: sha512-S6cATIPVv1z0IlxdN+zUk5EPjkGCdnhN4wVSBlvoUO1tOLJootbo9CquNJmbIh4yikWHiUedhRYrNPn1arpEmQ==}
dev: false
/jsx-ast-utils@3.3.4:
resolution: {integrity: sha512-fX2TVdCViod6HwKEtSWGHs57oFhVfCMwieb9PuRDgjDPh5XeqJiHFFFJCHxU5cnTc3Bu/GRL+kPiFmw8XWOfKw==}
engines: {node: '>=4.0'}
@@ -5560,6 +5785,10 @@ packages:
resolution: {integrity: sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==}
dev: false
/lodash.clonedeep@4.5.0:
resolution: {integrity: sha512-H5ZhCF25riFd9uB5UCkVKo61m3S/xZk1x4wA6yp/L3RFP6Z/eHH1ymQcGLo7J3GMPfm0V/7m1tryHuGVxpqEBQ==}
dev: false
/lodash.merge@4.6.2:
resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==}
dev: true
@@ -5602,6 +5831,12 @@ packages:
dependencies:
yallist: 4.0.0
/lru-queue@0.1.0:
resolution: {integrity: sha512-BpdYkt9EvGl8OfWHDQPISVpcl5xZthb+XPsbELj5AQXxIC8IriDZIQYjBJPEm5rS420sjZ0TLEzRcq5KdBhYrQ==}
dependencies:
es5-ext: 0.10.62
dev: false
/magic-string@0.30.1:
resolution: {integrity: sha512-mbVKXPmS0z0G4XqFDCTllmDQ6coZzn94aMlb0o/A4HEHJCKcanlDZwYJgwnkmgD3jyWhUgj9VsPrfd972yPffA==}
engines: {node: '>=12'}
@@ -5630,6 +5865,19 @@ packages:
resolution: {integrity: sha512-rkpe71W0N0c0Xz6QD0eJETuWAJGnJ9afsl1srmwPrI+yBCkge5EycXXbYRyvL29zZVUWQCY7InPRCv3GDXuZNw==}
dev: false
/memoizee@0.4.15:
resolution: {integrity: sha512-UBWmJpLZd5STPm7PMUlOw/TSy972M+z8gcyQ5veOnSDRREz/0bmpyTfKt3/51DhEBqCZQn1udM/5flcSPYhkdQ==}
dependencies:
d: 1.0.1
es5-ext: 0.10.62
es6-weak-map: 2.0.3
event-emitter: 0.3.5
is-promise: 2.2.2
lru-queue: 0.1.0
next-tick: 1.1.0
timers-ext: 0.1.7
dev: false
/merge-descriptors@1.0.1:
resolution: {integrity: sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==}
dev: false
@@ -5692,11 +5940,16 @@ packages:
resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==}
dependencies:
brace-expansion: 1.1.11
dev: true
/minimist@1.2.8:
resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==}
/mkdirp@1.0.4:
resolution: {integrity: sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==}
engines: {node: '>=10'}
hasBin: true
dev: false
/mlly@1.4.0:
resolution: {integrity: sha512-ua8PAThnTwpprIaU47EPeZ/bPUVp2QYBbWMphUQpVdBI3Lgqzm5KZQ45Agm3YJedHXaIHl6pBGabaLSUPPSptg==}
dependencies:
@@ -5726,6 +5979,14 @@ packages:
object-assign: 4.1.1
dev: false
/mz@2.7.0:
resolution: {integrity: sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==}
dependencies:
any-promise: 1.3.0
object-assign: 4.1.1
thenify-all: 1.6.0
dev: false
/nanoid@3.3.6:
resolution: {integrity: sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==}
engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1}
@@ -5773,6 +6034,10 @@ packages:
uuid: 8.3.2
dev: false
/next-tick@1.1.0:
resolution: {integrity: sha512-CXdUiJembsNjuToQvxayPZF9Vqht7hewsvy2sOWafLvi2awflj9mOC6bHIg50orX8IJvWKY9wYQ/zB2kogPslQ==}
dev: false
/next@13.4.2(@babel/core@7.22.9)(react-dom@18.2.0)(react@18.2.0):
resolution: {integrity: sha512-aNFqLs3a3nTGvLWlO9SUhCuMUHVPSFQC0+tDNGAsDXqx+WJDFSbvc233gOJ5H19SBc7nw36A9LwQepOJ2u/8Kg==}
engines: {node: '>=16.8.0'}
@@ -5883,6 +6148,14 @@ packages:
/object-inspect@1.12.3:
resolution: {integrity: sha512-geUvdk7c+eizMNUDkRpW1wJwgfOiOeHbxBR/hLXK1aT6zmVSO0jsQcs7fj6MGw89jC/cjGfLcNOrtMYtGqm81g==}
/object-is@1.1.5:
resolution: {integrity: sha512-3cyDsyHgtmi7I7DfSSI2LDp6SK2lwvtbg0p0R1e0RvTqF5ceGx+K2dfSjm1bKDMVCFEDAQvy+o8c6a7VujOddw==}
engines: {node: '>= 0.4'}
dependencies:
call-bind: 1.0.2
define-properties: 1.2.0
dev: false
/object-keys@0.4.0:
resolution: {integrity: sha512-ncrLw+X55z7bkl5PnUvHwFK9FcGuFYo9gtjws2XtSzL+aZ8tm830P60WJ0dSmFVaSalWieW5MD7kEdnXda9yJw==}
dev: false
@@ -5890,7 +6163,6 @@ packages:
/object-keys@1.1.1:
resolution: {integrity: sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==}
engines: {node: '>= 0.4'}
dev: true
/object.assign@4.1.4:
resolution: {integrity: sha512-1mxKf0e58bvyjSCtKYY4sRe9itRk3PJpquJOjeIkz885CczcI4IvJJDLPS72oowuSh+pBxUFROpX+TU++hxhZQ==}
@@ -5956,7 +6228,6 @@ packages:
resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==}
dependencies:
wrappy: 1.0.2
dev: true
/onetime@5.1.2:
resolution: {integrity: sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==}
@@ -6111,7 +6382,6 @@ packages:
/path-is-absolute@1.0.1:
resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==}
engines: {node: '>=0.10.0'}
dev: true
/path-key@3.1.1:
resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==}
@@ -6338,7 +6608,6 @@ packages:
resolution: {integrity: sha512-tdN8qQGvNjw4CHbY+XXk0JgCXn9QiF21a55rBe5LJAU+kDyC4WQn4+awm2Xfk2lQMk5fKup9XgzTZtGkjBdP9Q==}
engines: {node: '>=10.13.0'}
hasBin: true
dev: true
/prettier@3.0.0:
resolution: {integrity: sha512-zBf5eHpwHOGPC47h0zrPyNn+eAEIdEzfywMoYn2XPi0P44Zp0tSq64rq0xAREh4auw2cJZHo9QUob+NqCQky4g==}
@@ -6688,6 +6957,17 @@ packages:
picomatch: 2.3.1
dev: false
/recast@0.23.3:
resolution: {integrity: sha512-HbCVFh2ANP6a09nzD4lx7XthsxMOJWKX5pIcUwtLrmeEIl3I0DwjCoVXDE0Aobk+7k/mS3H50FK4iuYArpcT6Q==}
engines: {node: '>= 4'}
dependencies:
assert: 2.0.0
ast-types: 0.16.1
esprima: 4.0.1
source-map: 0.6.1
tslib: 2.6.0
dev: false
/refractor@3.6.0:
resolution: {integrity: sha512-MY9W41IOWxxk31o+YvFCNyNzdkc9M20NoZK5vq6jkv4I/uh2zkWcfudj0Q1fovjUQJrNewS9NMzeTtqPf+n5EA==}
dependencies:
@@ -7216,6 +7496,19 @@ packages:
resolution: {integrity: sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==}
dev: true
/thenify-all@1.6.0:
resolution: {integrity: sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==}
engines: {node: '>=0.8'}
dependencies:
thenify: 3.3.1
dev: false
/thenify@3.3.1:
resolution: {integrity: sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==}
dependencies:
any-promise: 1.3.0
dev: false
/through2@0.4.2:
resolution: {integrity: sha512-45Llu+EwHKtAZYTPPVn3XZHBgakWMN3rokhEv5hu596XP+cNgplMg+Gj+1nmAvj+L0K7+N49zBKx5rah5u0QIQ==}
dependencies:
@@ -7227,6 +7520,13 @@ packages:
resolution: {integrity: sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==}
dev: false
/timers-ext@0.1.7:
resolution: {integrity: sha512-b85NUNzTSdodShTIbky6ZF02e8STtVVfD+fu4aXXShEELpozH+bCpJLYMPZbsABN2wDH7fJpqIoXxJpzbf0NqQ==}
dependencies:
es5-ext: 0.10.62
next-tick: 1.1.0
dev: false
/tiny-glob@0.2.9:
resolution: {integrity: sha512-g/55ssRPUjShh+xkfx9UPDXqhckHEsHr4Vd9zX55oSdGZc/MD0m3sferOkwWtp98bv+kcVfEHtRJgBVJzelrzg==}
dependencies:
@@ -7285,6 +7585,19 @@ packages:
hasBin: true
dev: false
/tsconfck@2.1.2(typescript@5.0.4):
resolution: {integrity: sha512-ghqN1b0puy3MhhviwO2kGF8SeMDNhEbnKxjK7h6+fvY9JAxqvXi8y5NAHSQv687OVboS2uZIByzGd45/YxrRHg==}
engines: {node: ^14.13.1 || ^16 || >=18}
hasBin: true
peerDependencies:
typescript: ^4.3.5 || ^5.0.0
peerDependenciesMeta:
typescript:
optional: true
dependencies:
typescript: 5.0.4
dev: false
/tsconfig-paths@3.14.2:
resolution: {integrity: sha512-o/9iXgCYc5L/JxCHPe3Hvh8Q/2xm5Z+p18PESBU6Ff33695QnCHBEjcytY2q19ua7Mbl/DavtBOLq+oG0RCL+g==}
dependencies:
@@ -7343,6 +7656,11 @@ packages:
engines: {node: '>=10'}
dev: true
/type-fest@4.0.0:
resolution: {integrity: sha512-d/oYtUnPM9zar2fqqGLYPzgcY0qUlYK0evgNVti93xpzfjGkMgZHu9Lvgrkn0rqGXTgsFRxFamzjGoD9Uo+dgw==}
engines: {node: '>=16'}
dev: false
/type-is@1.6.18:
resolution: {integrity: sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==}
engines: {node: '>= 0.6'}
@@ -7351,6 +7669,14 @@ packages:
mime-types: 2.1.35
dev: false
/type@1.2.0:
resolution: {integrity: sha512-+5nt5AAniqsCnu2cEQQdpzCAh33kVx8n0VoFidKpB1dVVLAN/F+bgVOqOJqOnEnrhp222clB5p3vUlD+1QAnfg==}
dev: false
/type@2.7.2:
resolution: {integrity: sha512-dzlvlNlt6AXU7EBSfpAscydQ7gXB+pPGsPnfJnZpiNJBDj7IaJzQlBZYGdEi4R9HmPdBv2XmWJ6YUtoTa7lmCw==}
dev: false
/typed-array-buffer@1.0.0:
resolution: {integrity: sha512-Y8KTSIglk9OZEr8zywiIHG/kmQ7KWyjseXs1CbSo8vC42w7hg2HgYTxSWwP0+is7bWDc1H+Fo026CpHFwm8tkw==}
engines: {node: '>= 0.4'}
@@ -7518,6 +7844,16 @@ packages:
resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==}
dev: false
/util@0.12.5:
resolution: {integrity: sha512-kZf/K6hEIrWHI6XqOFUiiMa+79wE/D8Q+NCNAWclkyg3b4d2k7s0QGepNjiABc+aR3N1PAyHL7p6UcLY6LmrnA==}
dependencies:
inherits: 2.0.4
is-arguments: 1.1.1
is-generator-function: 1.0.10
is-typed-array: 1.1.12
which-typed-array: 1.1.11
dev: false
/utils-merge@1.0.1:
resolution: {integrity: sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==}
engines: {node: '>= 0.4.0'}
@@ -7555,6 +7891,22 @@ packages:
- terser
dev: true
/vite-tsconfig-paths@4.2.0(typescript@5.0.4):
resolution: {integrity: sha512-jGpus0eUy5qbbMVGiTxCL1iB9ZGN6Bd37VGLJU39kTDD6ZfULTTb1bcc5IeTWqWJKiWV5YihCaibeASPiGi8kw==}
peerDependencies:
vite: '*'
peerDependenciesMeta:
vite:
optional: true
dependencies:
debug: 4.3.4
globrex: 0.1.2
tsconfck: 2.1.2(typescript@5.0.4)
transitivePeerDependencies:
- supports-color
- typescript
dev: false
/vite@4.4.4(@types/node@18.16.0):
resolution: {integrity: sha512-4mvsTxjkveWrKDJI70QmelfVqTm+ihFAb6+xf4sjEU2TmUCTlVX87tmg/QooPEMQb/lM9qGHT99ebqPziEd3wg==}
engines: {node: ^14.18.0 || >=16.0.0}
@@ -7744,7 +8096,6 @@ packages:
for-each: 0.3.3
gopd: 1.0.1
has-tostringtag: 1.0.0
dev: true
/which@2.0.2:
resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==}
@@ -7774,7 +8125,6 @@ packages:
/wrappy@1.0.2:
resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==}
dev: true
/ws@8.11.0:
resolution: {integrity: sha512-HPG3wQd9sNQoT9xHyNCXoDUa+Xw/VevmY9FoHyQ+g+rrMn4j6FB4np7Z0OhdTgjx6MgQLK7jwSy1YecU1+4Asg==}

View File

@@ -0,0 +1,17 @@
-- Add new columns allowing NULL values
ALTER TABLE "PromptVariant"
ADD COLUMN "constructFnVersion" INTEGER,
ADD COLUMN "modelProvider" TEXT;
-- Update existing records to have the default values
UPDATE "PromptVariant"
SET "constructFnVersion" = 1,
"modelProvider" = 'openai/ChatCompletion'
WHERE "constructFnVersion" IS NULL OR "modelProvider" IS NULL;
-- Alter table to set NOT NULL constraint
ALTER TABLE "PromptVariant"
ALTER COLUMN "constructFnVersion" SET NOT NULL,
ALTER COLUMN "modelProvider" SET NOT NULL;
ALTER TABLE "ScenarioVariantCell" ADD COLUMN "prompt" JSONB;

View File

@@ -33,7 +33,9 @@ model PromptVariant {
label String
constructFn String
constructFnVersion Int
model String
modelProvider String
uiId String @default(uuid()) @db.Uuid
visible Boolean @default(true)
@@ -98,6 +100,7 @@ model ScenarioVariantCell {
promptVariantId String @db.Uuid
promptVariant PromptVariant @relation(fields: [promptVariantId], references: [id], onDelete: Cascade)
prompt Json?
testScenarioId String @db.Uuid
testScenario TestScenario @relation(fields: [testScenarioId], references: [id], onDelete: Cascade)

View File

@@ -46,8 +46,10 @@ await prisma.promptVariant.createMany({
label: "Prompt Variant 1",
sortIndex: 0,
model: "gpt-3.5-turbo-0613",
modelProvider: "openai/ChatCompletion",
constructFnVersion: 1,
constructFn: dedent`
prompt = {
definePrompt("openai/ChatCompletion", {
model: "gpt-3.5-turbo-0613",
messages: [
{
@@ -56,15 +58,17 @@ await prisma.promptVariant.createMany({
}
],
temperature: 0,
}`,
})`,
},
{
experimentId: defaultId,
label: "Prompt Variant 2",
sortIndex: 1,
model: "gpt-3.5-turbo-0613",
modelProvider: "openai/ChatCompletion",
constructFnVersion: 1,
constructFn: dedent`
prompt = {
definePrompt("openai/ChatCompletion", {
model: "gpt-3.5-turbo-0613",
messages: [
{
@@ -73,7 +77,7 @@ await prisma.promptVariant.createMany({
}
],
temperature: 0,
}`,
})`,
},
],
});

View File

@@ -1,48 +0,0 @@
/* eslint-disable @typescript-eslint/no-var-requires */
import YAML from "yaml";
import fs from "fs";
import path from "path";
import { openapiSchemaToJsonSchema } from "@openapi-contrib/openapi-schema-to-json-schema";
import assert from "assert";
import { type AcceptibleInputSchema } from "@openapi-contrib/openapi-schema-to-json-schema/dist/mjs/openapi-schema-types";
const OPENAPI_URL =
"https://raw.githubusercontent.com/openai/openai-openapi/0c432eb66fd0c758fd8b9bd69db41c1096e5f4db/openapi.yaml";
const convertOpenApiToJsonSchema = async (url: string) => {
// Fetch the openapi document
const response = await fetch(url);
const openApiYaml = await response.text();
// Parse the yaml document
const openApiDocument = YAML.parse(openApiYaml) as AcceptibleInputSchema;
// Convert the openapi schema to json schema
const jsonSchema = openapiSchemaToJsonSchema(openApiDocument);
const modelProperty = jsonSchema.components.schemas.CreateChatCompletionRequest.properties.model;
assert(modelProperty.oneOf.length === 2, "Expected model to have oneOf length of 2");
// We need to do a bit of surgery here since the Monaco editor doesn't like
// the fact that the schema says `model` can be either a string or an enum,
// and displays a warning in the editor. Let's stick with just an enum for
// now and drop the string option.
modelProperty.type = "string";
modelProperty.enum = modelProperty.oneOf[1].enum;
modelProperty.oneOf = undefined;
// Get the directory of the current script
const currentDirectory = path.dirname(import.meta.url).replace("file://", "");
// Write the JSON schema to a file in the current directory
fs.writeFileSync(
path.join(currentDirectory, "openai.schema.json"),
JSON.stringify(jsonSchema, null, 2),
);
};
convertOpenApiToJsonSchema(OPENAPI_URL)
.then(() => console.log("JSON schema has been written successfully."))
.catch((err) => console.error(err));

View File

@@ -1,52 +0,0 @@
import fs from "fs";
import path from "path";
import openapiTS, { type OpenAPI3 } from "openapi-typescript";
import YAML from "yaml";
import { pick } from "lodash-es";
import assert from "assert";
const OPENAPI_URL =
"https://raw.githubusercontent.com/openai/openai-openapi/0c432eb66fd0c758fd8b9bd69db41c1096e5f4db/openapi.yaml";
// Generate TypeScript types from OpenAPI
const schema = await fetch(OPENAPI_URL)
.then((res) => res.text())
.then((txt) => YAML.parse(txt) as OpenAPI3);
console.log(schema.components?.schemas?.CreateChatCompletionRequest);
// @ts-expect-error just assume this works, the assert will catch it if it doesn't
const modelProperty = schema.components?.schemas?.CreateChatCompletionRequest?.properties?.model;
assert(modelProperty.oneOf.length === 2, "Expected model to have oneOf length of 2");
// We need to do a bit of surgery here since the Monaco editor doesn't like
// the fact that the schema says `model` can be either a string or an enum,
// and displays a warning in the editor. Let's stick with just an enum for
// now and drop the string option.
modelProperty.type = "string";
modelProperty.enum = modelProperty.oneOf[1].enum;
modelProperty.oneOf = undefined;
delete schema["paths"];
assert(schema.components?.schemas);
schema.components.schemas = pick(schema.components?.schemas, [
"CreateChatCompletionRequest",
"ChatCompletionRequestMessage",
"ChatCompletionFunctions",
"ChatCompletionFunctionParameters",
]);
console.log(schema);
let openApiTypes = await openapiTS(schema);
// Remove the `export` from any line that starts with `export`
openApiTypes = openApiTypes.replaceAll("\nexport ", "\n");
// Get the directory of the current script
const currentDirectory = path.dirname(import.meta.url).replace("file://", "");
// Write the TypeScript types. We only want to use this in our in-app editor, so
// save as a .txt so VS Code doesn't try to auto-import definitions from it.
fs.writeFileSync(path.join(currentDirectory, "openai.types.ts.txt"), openApiTypes);

File diff suppressed because it is too large Load Diff

View File

@@ -1,148 +0,0 @@
/**
* This file was auto-generated by openapi-typescript.
* Do not make direct changes to the file.
*/
/** OneOf type helpers */
type Without<T, U> = { [P in Exclude<keyof T, keyof U>]?: never };
type XOR<T, U> = (T | U) extends object ? (Without<T, U> & U) | (Without<U, T> & T) : T | U;
type OneOf<T extends any[]> = T extends [infer Only] ? Only : T extends [infer A, infer B, ...infer Rest] ? OneOf<[XOR<A, B>, ...Rest]> : never;
type paths = Record<string, never>;
type webhooks = Record<string, never>;
interface components {
schemas: {
CreateChatCompletionRequest: {
/**
* @description ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.
* @example gpt-3.5-turbo
* @enum {string}
*/
model: "gpt-4" | "gpt-4-0613" | "gpt-4-32k" | "gpt-4-32k-0613" | "gpt-3.5-turbo" | "gpt-3.5-turbo-16k" | "gpt-3.5-turbo-0613" | "gpt-3.5-turbo-16k-0613";
/** @description A list of messages comprising the conversation so far. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). */
messages: (components["schemas"]["ChatCompletionRequestMessage"])[];
/** @description A list of functions the model may generate JSON inputs for. */
functions?: (components["schemas"]["ChatCompletionFunctions"])[];
/** @description Controls how the model responds to function calls. "none" means the model does not call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name":\ "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. */
function_call?: OneOf<["none" | "auto", {
/** @description The name of the function to call. */
name: string;
}]>;
/**
* @description What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
*
* We generally recommend altering this or `top_p` but not both.
*
* @default 1
* @example 1
*/
temperature?: number | null;
/**
* @description An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
*
* We generally recommend altering this or `temperature` but not both.
*
* @default 1
* @example 1
*/
top_p?: number | null;
/**
* @description How many chat completion choices to generate for each input message.
* @default 1
* @example 1
*/
n?: number | null;
/**
* @description If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb).
*
* @default false
*/
stream?: boolean | null;
/**
* @description Up to 4 sequences where the API will stop generating further tokens.
*
* @default null
*/
stop?: (string | null) | (string)[];
/**
* @description The maximum number of [tokens](/tokenizer) to generate in the chat completion.
*
* The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
*
* @default inf
*/
max_tokens?: number;
/**
* @description Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
*
* [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
*
* @default 0
*/
presence_penalty?: number | null;
/**
* @description Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
*
* [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
*
* @default 0
*/
frequency_penalty?: number | null;
/**
* @description Modify the likelihood of specified tokens appearing in the completion.
*
* Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
*
* @default null
*/
logit_bias?: Record<string, unknown> | null;
/**
* @description A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
*
* @example user-1234
*/
user?: string;
};
ChatCompletionRequestMessage: {
/**
* @description The role of the messages author. One of `system`, `user`, `assistant`, or `function`.
* @enum {string}
*/
role: "system" | "user" | "assistant" | "function";
/** @description The contents of the message. `content` is required for all messages except assistant messages with function calls. */
content?: string;
/** @description The name of the author of this message. `name` is required if role is `function`, and it should be the name of the function whose response is in the `content`. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters. */
name?: string;
/** @description The name and arguments of a function that should be called, as generated by the model. */
function_call?: {
/** @description The name of the function to call. */
name?: string;
/** @description The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. */
arguments?: string;
};
};
ChatCompletionFunctions: {
/** @description The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. */
name: string;
/** @description The description of what the function does. */
description?: string;
parameters?: components["schemas"]["ChatCompletionFunctionParameters"];
};
/** @description The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. */
ChatCompletionFunctionParameters: {
[key: string]: unknown;
};
};
responses: never;
parameters: never;
requestBodies: never;
headers: never;
pathItems: never;
}
type external = Record<string, never>;
type operations = Record<string, never>;

View File

@@ -1,6 +0,0 @@
{
"compilerOptions": {
"target": "esnext",
"moduleResolution": "nodenext"
}
}

View File

@@ -48,13 +48,13 @@ export default function VariantEditor(props: { variant: PromptVariant }) {
if (!model) return;
// Make sure the user defined the prompt with the string "prompt\w*=" somewhere
const promptRegex = /prompt\s*=/;
const promptRegex = /definePrompt\(/;
if (!promptRegex.test(currentFn)) {
console.log("no prompt");
console.log(currentFn);
toast({
title: "Missing prompt",
description: "Please define the prompt (eg. `prompt = { ...`).",
description: "Please define the prompt (eg. `definePrompt(...`",
status: "error",
});
return;

View File

@@ -25,6 +25,7 @@ import CompareFunctions from "./CompareFunctions";
import { CustomInstructionsInput } from "./CustomInstructionsInput";
import { type RefineOptionLabel, refineOptions } from "./refineOptions";
import { RefineOption } from "./RefineOption";
import { isObject, isString } from "lodash-es";
export const RefinePromptModal = ({
variant,
@@ -59,7 +60,12 @@ export const RefinePromptModal = ({
const replaceVariantMutation = api.promptVariants.replaceVariant.useMutation();
const [replaceVariant, replacementInProgress] = useHandledAsyncCallback(async () => {
if (!variant.experimentId || !refinedPromptFn) return;
if (
!variant.experimentId ||
!refinedPromptFn ||
(isObject(refinedPromptFn) && "status" in refinedPromptFn)
)
return;
await replaceVariantMutation.mutateAsync({
id: variant.id,
constructFn: refinedPromptFn,
@@ -110,7 +116,7 @@ export const RefinePromptModal = ({
</VStack>
<CompareFunctions
originalFunction={variant.constructFn}
newFunction={refinedPromptFn}
newFunction={isString(refinedPromptFn) ? refinedPromptFn : undefined}
/>
</VStack>
</ModalBody>

View File

@@ -7,7 +7,7 @@ import {
SimpleGrid,
Link,
} from "@chakra-ui/react";
import { modelStats } from "~/server/modelStats";
import { modelStats } from "~/modelProviders/modelStats";
import { type SupportedModel } from "~/server/types";
export const ModelStatsCard = ({ label, model }: { label: string; model: SupportedModel }) => {

View File

@@ -0,0 +1,36 @@
import { type JSONSchema4Object } from "json-schema";
import modelProviders from ".";
import { compile } from "json-schema-to-typescript";
import dedent from "dedent";
export default async function generateTypes() {
const combinedSchema = {
type: "object",
properties: {} as Record<string, JSONSchema4Object>,
};
Object.entries(modelProviders).forEach(([id, provider]) => {
combinedSchema.properties[id] = provider.inputSchema;
});
Object.entries(modelProviders).forEach(([id, provider]) => {
combinedSchema.properties[id] = provider.inputSchema;
});
const promptTypes = (
await compile(combinedSchema as JSONSchema4Object, "PromptTypes", {
additionalProperties: false,
bannerComment: dedent`
/**
* This type map defines the input types for each model provider.
*/
`,
})
).replace(/export interface PromptTypes/g, "interface PromptTypes");
return dedent`
${promptTypes}
declare function definePrompt<T extends keyof PromptTypes>(modelProvider: T, input: PromptTypes[T])
`;
}

View File

@@ -0,0 +1,7 @@
import openaiChatCompletion from "./openai-ChatCompletion";
const modelProviders = {
"openai/ChatCompletion": openaiChatCompletion,
} as const;
export default modelProviders;

View File

@@ -1,4 +1,4 @@
import { type SupportedModel } from "./types";
import { type SupportedModel } from "../server/types";
interface ModelStats {
contextLength: number;

View File

@@ -0,0 +1,69 @@
/* eslint-disable @typescript-eslint/no-var-requires */
import YAML from "yaml";
import fs from "fs";
import path from "path";
import { openapiSchemaToJsonSchema } from "@openapi-contrib/openapi-schema-to-json-schema";
import $RefParser from "@apidevtools/json-schema-ref-parser";
import { type JSONObject } from "superjson/dist/types";
import assert from "assert";
import { type JSONSchema4Object } from "json-schema";
import { isObject } from "lodash-es";
// @ts-expect-error for some reason missing from types
import parserEstree from "prettier/plugins/estree";
import parserBabel from "prettier/plugins/babel";
import prettier from "prettier/standalone";
const OPENAPI_URL =
"https://raw.githubusercontent.com/openai/openai-openapi/0c432eb66fd0c758fd8b9bd69db41c1096e5f4db/openapi.yaml";
// Fetch the openapi document
const response = await fetch(OPENAPI_URL);
const openApiYaml = await response.text();
// Parse the yaml document
let schema = YAML.parse(openApiYaml) as JSONObject;
schema = openapiSchemaToJsonSchema(schema);
const jsonSchema = await $RefParser.dereference(schema);
assert("components" in jsonSchema);
const completionRequestSchema = jsonSchema.components.schemas
.CreateChatCompletionRequest as JSONSchema4Object;
// We need to do a bit of surgery here since the Monaco editor doesn't like
// the fact that the schema says `model` can be either a string or an enum,
// and displays a warning in the editor. Let's stick with just an enum for
// now and drop the string option.
assert(
"properties" in completionRequestSchema &&
isObject(completionRequestSchema.properties) &&
"model" in completionRequestSchema.properties &&
isObject(completionRequestSchema.properties.model),
);
const modelProperty = completionRequestSchema.properties.model;
assert(
"oneOf" in modelProperty &&
Array.isArray(modelProperty.oneOf) &&
modelProperty.oneOf.length === 2 &&
isObject(modelProperty.oneOf[1]) &&
"enum" in modelProperty.oneOf[1],
"Expected model to have oneOf length of 2",
);
modelProperty.type = "string";
modelProperty.enum = modelProperty.oneOf[1].enum;
delete modelProperty["oneOf"];
// Get the directory of the current script
const currentDirectory = path.dirname(import.meta.url).replace("file://", "");
// Write the JSON schema to a file in the current directory
fs.writeFileSync(
path.join(currentDirectory, "input.schema.json"),
await prettier.format(JSON.stringify(completionRequestSchema, null, 2), {
parser: "json",
plugins: [parserBabel, parserEstree],
}),
);

View File

@@ -0,0 +1,186 @@
{
"type": "object",
"properties": {
"model": {
"description": "ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.",
"example": "gpt-3.5-turbo",
"type": "string",
"enum": [
"gpt-4",
"gpt-4-0613",
"gpt-4-32k",
"gpt-4-32k-0613",
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k-0613"
]
},
"messages": {
"description": "A list of messages comprising the conversation so far. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb).",
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"properties": {
"role": {
"type": "string",
"enum": ["system", "user", "assistant", "function"],
"description": "The role of the messages author. One of `system`, `user`, `assistant`, or `function`."
},
"content": {
"type": "string",
"description": "The contents of the message. `content` is required for all messages except assistant messages with function calls."
},
"name": {
"type": "string",
"description": "The name of the author of this message. `name` is required if role is `function`, and it should be the name of the function whose response is in the `content`. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters."
},
"function_call": {
"type": "object",
"description": "The name and arguments of a function that should be called, as generated by the model.",
"properties": {
"name": {
"type": "string",
"description": "The name of the function to call."
},
"arguments": {
"type": "string",
"description": "The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."
}
}
}
},
"required": ["role"]
}
},
"functions": {
"description": "A list of functions the model may generate JSON inputs for.",
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."
},
"description": {
"type": "string",
"description": "The description of what the function does."
},
"parameters": {
"type": "object",
"description": "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.",
"additionalProperties": true
}
},
"required": ["name"]
}
},
"function_call": {
"description": "Controls how the model responds to function calls. \"none\" means the model does not call a function, and responds to the end-user. \"auto\" means the model can pick between an end-user or calling a function. Specifying a particular function via `{\"name\":\\ \"my_function\"}` forces the model to call that function. \"none\" is the default when no functions are present. \"auto\" is the default if functions are present.",
"oneOf": [
{
"type": "string",
"enum": ["none", "auto"]
},
{
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "The name of the function to call."
}
},
"required": ["name"]
}
]
},
"temperature": {
"type": "number",
"minimum": 0,
"maximum": 2,
"default": 1,
"example": 1,
"nullable": true,
"description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n\nWe generally recommend altering this or `top_p` but not both.\n"
},
"top_p": {
"type": "number",
"minimum": 0,
"maximum": 1,
"default": 1,
"example": 1,
"nullable": true,
"description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\nWe generally recommend altering this or `temperature` but not both.\n"
},
"n": {
"type": "integer",
"minimum": 1,
"maximum": 128,
"default": 1,
"example": 1,
"nullable": true,
"description": "How many chat completion choices to generate for each input message."
},
"stream": {
"description": "If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb).\n",
"type": "boolean",
"nullable": true,
"default": false
},
"stop": {
"description": "Up to 4 sequences where the API will stop generating further tokens.\n",
"default": null,
"oneOf": [
{
"type": "string",
"nullable": true
},
{
"type": "array",
"minItems": 1,
"maxItems": 4,
"items": {
"type": "string"
}
}
]
},
"max_tokens": {
"description": "The maximum number of [tokens](/tokenizer) to generate in the chat completion.\n\nThe total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.\n",
"default": "inf",
"type": "integer"
},
"presence_penalty": {
"type": "number",
"default": 0,
"minimum": -2,
"maximum": 2,
"nullable": true,
"description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.\n\n[See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)\n"
},
"frequency_penalty": {
"type": "number",
"default": 0,
"minimum": -2,
"maximum": 2,
"nullable": true,
"description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.\n\n[See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)\n"
},
"logit_bias": {
"type": "object",
"x-oaiTypeLabel": "map",
"default": null,
"nullable": true,
"description": "Modify the likelihood of specified tokens appearing in the completion.\n\nAccepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n"
},
"user": {
"type": "string",
"example": "user-1234",
"description": "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n"
}
},
"required": ["model", "messages"]
}

View File

@@ -0,0 +1,54 @@
import { type JSONSchema4 } from "json-schema";
import { type ModelProvider } from "../types";
import inputSchema from "./codegen/input.schema.json";
import { type CompletionCreateParams } from "openai/resources/chat";
const supportedModels = [
"gpt-4-0613",
"gpt-4-32k-0613",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k-0613",
] as const;
type SupportedModel = (typeof supportedModels)[number];
const modelProvider: ModelProvider<SupportedModel, CompletionCreateParams> = {
name: "OpenAI ChatCompletion",
models: {
"gpt-4-0613": {
name: "GPT-4",
learnMore: "https://openai.com/gpt-4",
},
"gpt-4-32k-0613": {
name: "GPT-4 32k",
learnMore: "https://openai.com/gpt-4",
},
"gpt-3.5-turbo-0613": {
name: "GPT-3.5 Turbo",
learnMore: "https://platform.openai.com/docs/guides/gpt/chat-completions-api",
},
"gpt-3.5-turbo-16k-0613": {
name: "GPT-3.5 Turbo 16k",
learnMore: "https://platform.openai.com/docs/guides/gpt/chat-completions-api",
},
},
getModel: (input) => {
if (supportedModels.includes(input.model as SupportedModel))
return input.model as SupportedModel;
const modelMaps: Record<string, SupportedModel> = {
"gpt-4": "gpt-4-0613",
"gpt-4-32k": "gpt-4-32k-0613",
"gpt-3.5-turbo": "gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k": "gpt-3.5-turbo-16k-0613",
};
if (input.model in modelMaps) return modelMaps[input.model] as SupportedModel;
return null;
},
inputSchema: inputSchema as JSONSchema4,
shouldStream: (input) => input.stream ?? false,
};
export default modelProvider;

View File

@@ -0,0 +1,14 @@
import { type JSONSchema4 } from "json-schema";
export type ModelProviderModel = {
name: string;
learnMore: string;
};
export type ModelProvider<SupportedModels extends string, InputSchema> = {
name: string;
models: Record<SupportedModels, ModelProviderModel>;
getModel: (input: InputSchema) => SupportedModels | null;
shouldStream: (input: InputSchema) => boolean;
inputSchema: JSONSchema4;
};

View File

@@ -6,6 +6,7 @@ import Favicon from "~/components/Favicon";
import "~/utils/analytics";
import Head from "next/head";
import { ChakraThemeProvider } from "~/theme/ChakraThemeProvider";
import { SyncAppStore } from "~/state/sync";
const MyApp: AppType<{ session: Session | null }> = ({
Component,
@@ -20,6 +21,7 @@ const MyApp: AppType<{ session: Session | null }> = ({
/>
</Head>
<SessionProvider session={session}>
<SyncAppStore />
<Favicon />
<ChakraThemeProvider>
<Component {...pageProps} />

View File

@@ -49,6 +49,10 @@ const DeleteButton = () => {
onClose();
}, [mutation, experiment.data?.id, router]);
useEffect(() => {
useAppStore.getState().sharedVariantEditor.loadMonaco().catch(console.error);
});
return (
<>
<Button

View File

@@ -10,6 +10,7 @@ import {
requireNothing,
} from "~/utils/accessControl";
import userOrg from "~/server/utils/userOrg";
import generateTypes from "~/modelProviders/generateTypes";
export const experimentsRouter = createTRPCRouter({
list: protectedProcedure.query(async ({ ctx }) => {
@@ -115,7 +116,7 @@ export const experimentsRouter = createTRPCRouter({
* variable.
*/
prompt = {
definePrompt("openai/ChatCompletion", {
model: "gpt-3.5-turbo-0613",
stream: true,
messages: [
@@ -124,8 +125,10 @@ export const experimentsRouter = createTRPCRouter({
content: \`"Return 'this is output for the scenario "${"$"}{scenario.text}"'\`,
},
],
};`,
});`,
model: "gpt-3.5-turbo-0613",
modelProvider: "openai/ChatCompletion",
constructFnVersion: 2,
},
}),
prisma.templateVariable.create({
@@ -174,4 +177,10 @@ export const experimentsRouter = createTRPCRouter({
},
});
}),
// Keeping these on `experiment` for now because we might want to limit the
// providers based on your account/experiment
promptTypes: publicProcedure.query(async () => {
return await generateTypes();
}),
});

View File

@@ -1,16 +1,15 @@
import { isObject } from "lodash-es";
import { z } from "zod";
import { createTRPCRouter, protectedProcedure, publicProcedure } from "~/server/api/trpc";
import { prisma } from "~/server/db";
import { generateNewCell } from "~/server/utils/generateNewCell";
import { OpenAIChatModel, type SupportedModel } from "~/server/types";
import { constructPrompt } from "~/server/utils/constructPrompt";
import { type SupportedModel } from "~/server/types";
import userError from "~/server/utils/error";
import { recordExperimentUpdated } from "~/server/utils/recordExperimentUpdated";
import { reorderPromptVariants } from "~/server/utils/reorderPromptVariants";
import { type PromptVariant } from "@prisma/client";
import { deriveNewConstructFn } from "~/server/utils/deriveNewContructFn";
import { requireCanModifyExperiment, requireCanViewExperiment } from "~/utils/accessControl";
import parseConstructFn from "~/server/utils/parseConstructFn";
export const promptVariantsRouter = createTRPCRouter({
list: publicProcedure
@@ -198,7 +197,9 @@ export const promptVariantsRouter = createTRPCRouter({
label: newVariantLabel,
sortIndex: (originalVariant?.sortIndex ?? 0) + 1,
constructFn: newConstructFn,
constructFnVersion: 2,
model: originalVariant?.model ?? "gpt-3.5-turbo",
modelProvider: originalVariant?.modelProvider ?? "openai/ChatCompletion",
},
});
@@ -298,12 +299,15 @@ export const promptVariantsRouter = createTRPCRouter({
});
await requireCanModifyExperiment(existing.experimentId, ctx);
const constructedPrompt = await constructPrompt({ constructFn: existing.constructFn }, null);
const constructedPrompt = await parseConstructFn(existing.constructFn);
if ("error" in constructedPrompt) {
return userError(constructedPrompt.error);
}
const promptConstructionFn = await deriveNewConstructFn(
existing,
// @ts-expect-error TODO clean this up
constructedPrompt?.model as SupportedModel,
constructedPrompt.model as SupportedModel,
input.instructions,
);
@@ -332,25 +336,10 @@ export const promptVariantsRouter = createTRPCRouter({
throw new Error(`Prompt Variant with id ${input.id} does not exist`);
}
let model = existing.model;
try {
const contructedPrompt = await constructPrompt({ constructFn: input.constructFn }, null);
const parsedPrompt = await parseConstructFn(input.constructFn);
if (!isObject(contructedPrompt)) {
return userError("Prompt is not an object");
}
if (!("model" in contructedPrompt)) {
return userError("Prompt does not define a model");
}
if (
typeof contructedPrompt.model !== "string" ||
!(contructedPrompt.model in OpenAIChatModel)
) {
return userError("Prompt defines an invalid model");
}
model = contructedPrompt.model;
} catch (e) {
return userError((e as Error).message);
if ("error" in parsedPrompt) {
return userError(parsedPrompt.error);
}
// Create a duplicate with only the config changed
@@ -361,7 +350,9 @@ export const promptVariantsRouter = createTRPCRouter({
sortIndex: existing.sortIndex,
uiId: existing.uiId,
constructFn: input.constructFn,
model,
constructFnVersion: 2,
modelProvider: parsedPrompt.modelProvider,
model: parsedPrompt.model,
},
});

View File

@@ -0,0 +1,45 @@
import "dotenv/config";
import dedent from "dedent";
import { expect, test } from "vitest";
import { migrate1to2 } from "./migrateConstructFns";
test("migrate1to2", () => {
const constructFn = dedent`
// Test comment
prompt = {
model: "gpt-3.5-turbo-0613",
messages: [
{
role: "user",
content: "What is the capital of China?"
}
]
}
`;
const migrated = migrate1to2(constructFn);
expect(migrated).toBe(dedent`
// Test comment
definePrompt("openai/ChatCompletion", {
model: "gpt-3.5-turbo-0613",
messages: [
{
role: "user",
content: "What is the capital of China?"
}
]
})
`);
// console.log(
// migrateConstructFn(dedent`definePrompt(
// "openai/ChatCompletion",
// {
// model: 'gpt-3.5-turbo-0613',
// messages: []
// }
// )`),
// );
});

View File

@@ -0,0 +1,58 @@
import * as recast from "recast";
import { type ASTNode } from "ast-types";
import { prisma } from "../db";
import { fileURLToPath } from "url";
const { builders: b } = recast.types;
export const migrate1to2 = (fnBody: string): string => {
const ast: ASTNode = recast.parse(fnBody);
recast.visit(ast, {
visitAssignmentExpression(path) {
const node = path.node;
if ("name" in node.left && node.left.name === "prompt") {
const functionCall = b.callExpression(b.identifier("definePrompt"), [
b.literal("openai/ChatCompletion"),
node.right,
]);
path.replace(functionCall);
}
return false;
},
});
return recast.print(ast).code;
};
export default async function migrateConstructFns() {
const v1Prompts = await prisma.promptVariant.findMany({
where: {
constructFnVersion: 1,
},
});
console.log(`Migrating ${v1Prompts.length} prompts 1->2`);
await Promise.all(
v1Prompts.map(async (variant) => {
try {
await prisma.promptVariant.update({
where: {
id: variant.id,
},
data: {
constructFn: migrate1to2(variant.constructFn),
constructFnVersion: 2,
},
});
} catch (e) {
console.error("Error migrating constructFn for variant", variant.id, e);
}
}),
);
}
// If we're running this file directly, run the migration
if (process.argv.at(-1) === fileURLToPath(import.meta.url)) {
console.log("Running migration");
await migrateConstructFns();
console.log("Done");
}

View File

@@ -1,15 +1,15 @@
import crypto from "crypto";
import { prisma } from "~/server/db";
import defineTask from "./defineTask";
import { type CompletionResponse, getOpenAIChatCompletion } from "../utils/getCompletion";
import { type JSONSerializable } from "../types";
import { sleep } from "../utils/sleep";
import { shouldStream } from "../utils/shouldStream";
import { generateChannel } from "~/utils/generateChannel";
import { runEvalsForOutput } from "../utils/evaluations";
import { constructPrompt } from "../utils/constructPrompt";
import { type CompletionCreateParams } from "openai/resources/chat";
import { type Prisma } from "@prisma/client";
import parseConstructFn from "../utils/parseConstructFn";
import hashPrompt from "../utils/hashPrompt";
import { type JsonObject } from "type-fest";
import modelProviders from "~/modelProviders";
const MAX_AUTO_RETRIES = 10;
const MIN_DELAY = 500; // milliseconds
@@ -23,7 +23,7 @@ function calculateDelay(numPreviousTries: number): number {
const getCompletionWithRetries = async (
cellId: string,
payload: JSONSerializable,
payload: JsonObject,
channel?: string,
): Promise<CompletionResponse> => {
let modelResponse: CompletionResponse | null = null;
@@ -125,9 +125,23 @@ export const queryLLM = defineTask<queryLLMJob>("queryLLM", async (task) => {
return;
}
const prompt = await constructPrompt(variant, scenario.variableValues);
const prompt = await parseConstructFn(variant.constructFn, scenario.variableValues as JsonObject);
const streamingEnabled = shouldStream(prompt);
if ("error" in prompt) {
await prisma.scenarioVariantCell.update({
where: { id: scenarioVariantCellId },
data: {
statusCode: 400,
errorMessage: prompt.error,
retrievalStatus: "ERROR",
},
});
return;
}
const provider = modelProviders[prompt.modelProvider];
const streamingEnabled = provider.shouldStream(prompt.modelInput);
let streamingChannel;
if (streamingEnabled) {
@@ -135,21 +149,19 @@ export const queryLLM = defineTask<queryLLMJob>("queryLLM", async (task) => {
// Save streaming channel so that UI can connect to it
await prisma.scenarioVariantCell.update({
where: { id: scenarioVariantCellId },
data: {
streamingChannel,
},
data: { streamingChannel },
});
}
const modelResponse = await getCompletionWithRetries(
scenarioVariantCellId,
prompt,
prompt.modelInput as unknown as JsonObject,
streamingChannel,
);
let modelOutput = null;
if (modelResponse.statusCode === 200) {
const inputHash = crypto.createHash("sha256").update(JSON.stringify(prompt)).digest("hex");
const inputHash = hashPrompt(prompt);
modelOutput = await prisma.modelOutput.create({
data: {

View File

@@ -1,15 +0,0 @@
import { test } from "vitest";
import { constructPrompt } from "./constructPrompt";
test.skip("constructPrompt", async () => {
const constructed = await constructPrompt(
{
constructFn: `prompt = { "fooz": "bar" }`,
},
{
foo: "bar",
},
);
console.log(constructed);
});

View File

@@ -1,35 +0,0 @@
import { type PromptVariant, type TestScenario } from "@prisma/client";
import ivm from "isolated-vm";
import { type JSONSerializable } from "../types";
const isolate = new ivm.Isolate({ memoryLimit: 128 });
export async function constructPrompt(
variant: Pick<PromptVariant, "constructFn">,
scenario: TestScenario["variableValues"],
): Promise<JSONSerializable> {
const code = `
const scenario = ${JSON.stringify(scenario ?? {}, null, 2)};
let prompt = {};
${variant.constructFn}
global.prompt = prompt;
`;
console.log("code is", code);
const context = await isolate.createContext();
const jail = context.global;
await jail.set("global", jail.derefInto());
const script = await isolate.compileScript(code);
await script.run(context);
const promptReference = (await context.global.get("prompt")) as ivm.Reference;
const prompt = await promptReference.copy(); // Get the actual value from the isolate
return prompt as JSONSerializable;
}

View File

@@ -1,8 +1,9 @@
import crypto from "crypto";
import { type Prisma } from "@prisma/client";
import { prisma } from "../db";
import { queueLLMRetrievalTask } from "./queueLLMRetrievalTask";
import { constructPrompt } from "./constructPrompt";
import parseConstructFn from "./parseConstructFn";
import { type JsonObject } from "type-fest";
import hashPrompt from "./hashPrompt";
export const generateNewCell = async (variantId: string, scenarioId: string) => {
const variant = await prisma.promptVariant.findUnique({
@@ -19,10 +20,6 @@ export const generateNewCell = async (variantId: string, scenarioId: string) =>
if (!variant || !scenario) return null;
const prompt = await constructPrompt(variant, scenario.variableValues);
const inputHash = crypto.createHash("sha256").update(JSON.stringify(prompt)).digest("hex");
let cell = await prisma.scenarioVariantCell.findUnique({
where: {
promptVariantId_testScenarioId: {
@@ -37,10 +34,29 @@ export const generateNewCell = async (variantId: string, scenarioId: string) =>
if (cell) return cell;
const parsedConstructFn = await parseConstructFn(
variant.constructFn,
scenario.variableValues as JsonObject,
);
if ("error" in parsedConstructFn) {
return await prisma.scenarioVariantCell.create({
data: {
promptVariantId: variantId,
testScenarioId: scenarioId,
statusCode: 400,
errorMessage: parsedConstructFn.error,
},
});
}
const inputHash = hashPrompt(parsedConstructFn);
cell = await prisma.scenarioVariantCell.create({
data: {
promptVariantId: variantId,
testScenarioId: scenarioId,
prompt: parsedConstructFn.modelInput as unknown as Prisma.InputJsonValue,
},
include: {
modelOutput: true,
@@ -48,9 +64,7 @@ export const generateNewCell = async (variantId: string, scenarioId: string) =>
});
const matchingModelOutput = await prisma.modelOutput.findFirst({
where: {
inputHash,
},
where: { inputHash },
});
let newModelOutput;

View File

@@ -7,7 +7,7 @@ import { type SupportedModel, type OpenAIChatModel } from "../types";
import { env } from "~/env.mjs";
import { countOpenAIChatTokens } from "~/utils/countTokens";
import { rateLimitErrorMessage } from "~/sharedStrings";
import { modelStats } from "../modelStats";
import { modelStats } from "../../modelProviders/modelStats";
export type CompletionResponse = {
output: ChatCompletion | null;

View File

@@ -1,7 +1,6 @@
import { OpenAIChatModel, type SupportedModel } from "../types";
import openAIChatApiShape from "~/codegen/openai.types.ts.txt";
import { type SupportedModel } from "../types";
export const getApiShapeForModel = (model: SupportedModel) => {
if (model in OpenAIChatModel) return openAIChatApiShape;
// if (model in OpenAIChatModel) return openAIChatApiShape;
return "";
};

View File

@@ -0,0 +1,37 @@
import crypto from "crypto";
import { type JsonValue } from "type-fest";
import { type ParsedConstructFn } from "./parseConstructFn";
function sortKeys(obj: JsonValue): JsonValue {
if (typeof obj !== "object" || obj === null) {
// Not an object or array, return as is
return obj;
}
if (Array.isArray(obj)) {
return obj.map(sortKeys);
}
// Get keys and sort them
const keys = Object.keys(obj).sort();
const sortedObj = {};
for (const key of keys) {
// @ts-expect-error not worth fixing types
// eslint-disable-next-line @typescript-eslint/no-unsafe-argument
sortedObj[key] = sortKeys(obj[key]);
}
return sortedObj;
}
export default function hashPrompt(prompt: ParsedConstructFn<any>): string {
// Sort object keys recursively
const sortedObj = sortKeys(prompt as unknown as JsonValue);
// Convert to JSON and hash it
const str = JSON.stringify(sortedObj);
const hash = crypto.createHash("sha256");
hash.update(str);
return hash.digest("hex");
}

View File

@@ -0,0 +1,45 @@
import { expect, test } from "vitest";
import parseConstructFn from "./parseConstructFn";
import assert from "assert";
// Note: this has to be run with `vitest --no-threads` option or else
// isolated-vm seems to throw errors
test("parseConstructFn", async () => {
const constructed = await parseConstructFn(
`
// These sometimes have a comment
definePrompt("openai/ChatCompletion", {
model: "gpt-3.5-turbo-0613",
messages: [
{
role: "user",
content: \`What is the capital of \${scenario.country}?\`
}
]
})
`,
{ country: "Bolivia" },
);
expect(constructed).toEqual({
modelProvider: "openai/ChatCompletion",
model: "gpt-3.5-turbo-0613",
modelInput: {
messages: [
{
content: "What is the capital of Bolivia?",
role: "user",
},
],
model: "gpt-3.5-turbo-0613",
},
});
});
test("bad syntax", async () => {
const parsed = await parseConstructFn(`definePrompt("openai/ChatCompletion", {`);
assert("error" in parsed);
expect(parsed.error).toContain("Unexpected end of input");
});

View File

@@ -0,0 +1,92 @@
import modelProviders from "~/modelProviders";
import ivm from "isolated-vm";
import { isObject, isString } from "lodash-es";
import { type JsonObject } from "type-fest";
import { validate } from "jsonschema";
export type ParsedConstructFn<T extends keyof typeof modelProviders> = {
modelProvider: T;
model: keyof (typeof modelProviders)[T]["models"];
modelInput: Parameters<(typeof modelProviders)[T]["getModel"]>[0];
};
const isolate = new ivm.Isolate({ memoryLimit: 128 });
export default async function parseConstructFn(
constructFn: string,
scenario: JsonObject | undefined = {},
): Promise<ParsedConstructFn<keyof typeof modelProviders> | { error: string }> {
try {
const modifiedConstructFn = constructFn.replace(
"definePrompt(",
"global.prompt = definePrompt(",
);
const code = `
const scenario = ${JSON.stringify(scenario ?? {}, null, 2)};
const definePrompt = (modelProvider, input) => ({
modelProvider,
input
})
${modifiedConstructFn}
`;
const context = await isolate.createContext();
const jail = context.global;
await jail.set("global", jail.derefInto());
const script = await isolate.compileScript(code);
await script.run(context);
const promptReference = (await context.global.get("prompt")) as ivm.Reference;
const prompt = await promptReference.copy();
if (!isObject(prompt)) {
return { error: "definePrompt did not return an object" };
}
if (!("modelProvider" in prompt) || !isString(prompt.modelProvider)) {
return { error: "definePrompt did not return a valid modelProvider" };
}
const provider =
prompt.modelProvider in modelProviders &&
modelProviders[prompt.modelProvider as keyof typeof modelProviders];
if (!provider) {
return { error: "definePrompt did not return a known modelProvider" };
}
if (!("input" in prompt) || !isObject(prompt.input)) {
return { error: "definePrompt did not return an input" };
}
const validationResult = validate(prompt.input, provider.inputSchema);
if (!validationResult.valid)
return {
error: `definePrompt did not return a valid input: ${validationResult.errors
.map((e) => e.stack)
.join(", ")}`,
};
// We've validated the JSON schema so this should be safe
const input = prompt.input as Parameters<(typeof provider)["getModel"]>[0];
const model = provider.getModel(input);
if (!model) {
return {
error: `definePrompt did not return a known model for the provider ${prompt.modelProvider}`,
};
}
return {
modelProvider: prompt.modelProvider as keyof typeof modelProviders,
model,
modelInput: input,
};
} catch (e) {
const msg =
isObject(e) && "message" in e && isString(e.message)
? e.message
: "unknown error parsing definePrompt script";
return { error: msg };
}
}

View File

@@ -1,7 +0,0 @@
import { isObject } from "lodash-es";
import { type JSONSerializable } from "../types";
export const shouldStream = (config: JSONSerializable): boolean => {
const shouldStream = isObject(config) && "stream" in config && config.stream === true;
return shouldStream;
};

View File

@@ -1,7 +1,6 @@
import { type RouterOutputs } from "~/utils/api";
import { type SliceCreator } from "./store";
import loader from "@monaco-editor/loader";
import openAITypes from "~/codegen/openai.types.ts.txt";
import formatPromptConstructor from "~/utils/formatPromptConstructor";
export const editorBackground = "#fafafa";
@@ -20,7 +19,10 @@ export const createVariantEditorSlice: SliceCreator<SharedVariantEditorSlice> =
// We only want to run this client-side
if (typeof window === "undefined") return;
const monaco = await loader.init();
const [monaco, promptTypes] = await Promise.all([
loader.init(),
get().api?.client.experiments.promptTypes.query(),
]);
monaco.editor.defineTheme("customTheme", {
base: "vs",
@@ -37,14 +39,9 @@ export const createVariantEditorSlice: SliceCreator<SharedVariantEditorSlice> =
lib: ["esnext"],
});
monaco.editor.createModel(
`
${openAITypes}
declare var prompt: components["schemas"]["CreateChatCompletionRequest"];
`,
"typescript",
monaco.Uri.parse("file:///openai.types.ts"),
monaco.languages.typescript.typescriptDefaults.addExtraLib(
promptTypes ?? "",
"file:///PromptTypes.d.ts",
);
monaco.languages.registerDocumentFormattingEditProvider("typescript", {
@@ -64,7 +61,6 @@ export const createVariantEditorSlice: SliceCreator<SharedVariantEditorSlice> =
get().sharedVariantEditor.updateScenariosModel();
},
scenarios: [],
// scenariosModel: null,
setScenarios: (scenarios) => {
set((state) => {
state.sharedVariantEditor.scenarios = scenarios;

View File

@@ -5,11 +5,14 @@ import {
type SharedVariantEditorSlice,
createVariantEditorSlice,
} from "./sharedVariantEditor.slice";
import { type APIClient } from "~/utils/api";
export type State = {
drawerOpen: boolean;
openDrawer: () => void;
closeDrawer: () => void;
api: APIClient | null;
setApi: (api: APIClient) => void;
sharedVariantEditor: SharedVariantEditorSlice;
};
@@ -20,6 +23,12 @@ export type GetFn = Parameters<SliceCreator<unknown>>[1];
const useBaseStore = create<State, [["zustand/immer", never]]>(
immer((set, get, ...rest) => ({
api: null,
setApi: (api) =>
set((state) => {
state.api = api;
}),
drawerOpen: false,
openDrawer: () =>
set((state) => {
@@ -34,5 +43,3 @@ const useBaseStore = create<State, [["zustand/immer", never]]>(
);
export const useAppStore = createSelectors(useBaseStore);
useAppStore.getState().sharedVariantEditor.loadMonaco().catch(console.error);

View File

@@ -15,3 +15,16 @@ export function useSyncVariantEditor() {
}
}, [scenarios.data]);
}
export function SyncAppStore() {
const utils = api.useContext();
const setApi = useAppStore((state) => state.setApi);
useEffect(() => {
console.log("setting api", utils);
setApi(utils);
}, [utils, setApi]);
return null;
}

View File

@@ -65,3 +65,5 @@ export type RouterInputs = inferRouterInputs<AppRouter>;
* @example type HelloOutput = RouterOutputs['example']['hello']
*/
export type RouterOutputs = inferRouterOutputs<AppRouter>;
export type APIClient = ReturnType<typeof api.useContext>;

View File

@@ -16,7 +16,7 @@ export function stripTypes(tsCode: string): string {
const result = babel.transform(tsCode, options);
return result.code ?? tsCode;
} catch (error) {
console.error("Error stripping types", error);
// console.error("Error stripping types", error);
return tsCode;
}
}

11
vitest.config.ts Normal file
View File

@@ -0,0 +1,11 @@
import tsconfigPaths from "vite-tsconfig-paths";
import { configDefaults, defineConfig, type UserConfig } from "vitest/config";
const config = defineConfig({
test: {
...configDefaults, // Extending Vitest's default options
},
plugins: [tsconfigPaths()],
}) as UserConfig;
export default config;