Compare commits

..

16 Commits

Author SHA1 Message Date
Kyle Corbitt
156f248c3a add created_at and updated_at to users 2023-08-01 16:43:35 -07:00
Kyle Corbitt
65a76cddc5 world champs signup
Basic landing page to sign up for the "world champs"
2023-08-01 13:07:14 -07:00
David Corbitt
c88266bcd4 Add twitter card 2023-08-01 12:47:05 -07:00
David Corbitt
1bf9554eca Comment out getServerSideProps in [id].tsx 2023-08-01 01:29:28 -07:00
arcticfly
1fb428ef4a Add scenario editing modal, twitter sentiment seeding (#101)
* testing agi-eval benchmark

* Add scenario modal editor

* Add initial values to ScenarioEditorModal

* Add seedTwitterSentiment.ts

---------

Co-authored-by: Kyle Corbitt <kyle@corbt.com>
2023-08-01 01:26:43 -07:00
Kyle Corbitt
6316eaae6d dummy key at build time 2023-07-31 18:03:51 -07:00
Kyle Corbitt
8513924ea5 give the openai lib a dummy default value to try to fix the build 2023-07-31 17:39:45 -07:00
arcticfly
51d64baae9 Add og image for experiments (#100)
* Add og image for experiments

* Update meta tags

* Fix prettier

* Add key to meta tags

* Remove ngrok from og:image
2023-07-31 16:50:15 -07:00
arcticfly
26b6fa4f0c Requeue rate-limited query model tasks (#99)
* Continue polling stats until all evals complete

* Return evaluation changes early, before it has run

* Add task for running new eval

* requeue rate-limited tasks

* Fix prettier
2023-07-26 16:30:50 -07:00
Kyle Corbitt
807665fdc1 Merge pull request #98 from OpenPipe/anthropic
Add Anthropic provider
2023-07-26 15:44:39 -07:00
Kyle Corbitt
d6597d2c8a Add Anthropic provider
Adds support for Claude 1 and 2 in experiments! 🎉
2023-07-26 15:37:27 -07:00
arcticfly
566d67bf48 Change prompt variant section background color (#97)
* Change backgroundColor of variant editor section

* Move refresh control out of scrollable area
2023-07-26 10:51:23 -07:00
arcticfly
d4fb8b689a Ensure evals run properly (#96)
* Run evals against llama output

* Continue polling in OutputCell until evals complete

* Remove unnecessary check
2023-07-25 20:01:58 -07:00
arcticfly
98b231c8bd Store multiple ModelResponses (#95)
* Store multiple ModelResponses

* Fix prettier

* Add CellContent container
2023-07-25 18:54:38 -07:00
Kyle Corbitt
45afb1f1f4 Merge pull request #94 from OpenPipe/scenario-shape
updated replicate llama2 models
2023-07-25 17:40:05 -07:00
arcticfly
223b990005 Change experiment button styling (#93)
* Change button styling

* Fix prettier
2023-07-25 11:41:02 -07:00
68 changed files with 2162 additions and 434 deletions

View File

@@ -37,6 +37,7 @@ const config = {
"warn",
{ vars: "all", varsIgnorePattern: "^_", args: "after-used", argsIgnorePattern: "^_" },
],
"react/no-unescaped-entities": "off",
},
};

View File

@@ -1,2 +1,2 @@
src/codegen/openai.schema.json
*.schema.json
pnpm-lock.yaml

View File

@@ -13,10 +13,13 @@ declare module "nextjs-routes" {
export type Route =
| StaticRoute<"/account/signin">
| DynamicRoute<"/api/auth/[...nextauth]", { "nextauth": string[] }>
| StaticRoute<"/api/experiments/og-image">
| DynamicRoute<"/api/trpc/[trpc]", { "trpc": string }>
| DynamicRoute<"/experiments/[id]", { "id": string }>
| StaticRoute<"/experiments">
| StaticRoute<"/">;
| StaticRoute<"/">
| StaticRoute<"/world-champs">
| StaticRoute<"/world-champs/signup">;
interface StaticRoute<Pathname> {
pathname: Pathname;

View File

@@ -45,6 +45,7 @@ Natively supports [OpenAI function calls](https://openai.com/blog/function-calli
- All models available through the OpenAI [chat completion API](https://platform.openai.com/docs/guides/gpt/chat-completions-api)
- Llama2 [7b chat](https://replicate.com/a16z-infra/llama7b-v2-chat), [13b chat](https://replicate.com/a16z-infra/llama13b-v2-chat), [70b chat](https://replicate.com/replicate/llama70b-v2-chat).
- Anthropic's [Claude 1 Instant](https://www.anthropic.com/index/introducing-claude) and [Claude 2](https://www.anthropic.com/index/claude-2)
## Running Locally

View File

@@ -21,6 +21,7 @@
"check": "concurrently 'pnpm lint' 'pnpm tsc' 'pnpm prettier . --check'"
},
"dependencies": {
"@anthropic-ai/sdk": "^0.5.8",
"@apidevtools/json-schema-ref-parser": "^10.1.0",
"@babel/preset-typescript": "^7.22.5",
"@babel/standalone": "^7.22.9",
@@ -40,6 +41,7 @@
"@trpc/next": "^10.26.0",
"@trpc/react-query": "^10.26.0",
"@trpc/server": "^10.26.0",
"@vercel/og": "^0.5.9",
"ast-types": "^0.14.2",
"chroma-js": "^2.4.2",
"concurrently": "^8.2.0",
@@ -105,6 +107,7 @@
"@types/uuid": "^9.0.2",
"@typescript-eslint/eslint-plugin": "^5.59.6",
"@typescript-eslint/parser": "^5.59.6",
"csv-parse": "^5.4.0",
"eslint": "^8.40.0",
"eslint-config-next": "^13.4.2",
"eslint-plugin-unused-imports": "^2.0.0",

154
pnpm-lock.yaml generated
View File

@@ -1,10 +1,13 @@
lockfileVersion: '6.0'
lockfileVersion: '6.1'
settings:
autoInstallPeers: true
excludeLinksFromLockfile: false
dependencies:
'@anthropic-ai/sdk':
specifier: ^0.5.8
version: 0.5.8
'@apidevtools/json-schema-ref-parser':
specifier: ^10.1.0
version: 10.1.0
@@ -62,6 +65,9 @@ dependencies:
'@trpc/server':
specifier: ^10.26.0
version: 10.26.0
'@vercel/og':
specifier: ^0.5.9
version: 0.5.9
ast-types:
specifier: ^0.14.2
version: 0.14.2
@@ -253,6 +259,9 @@ devDependencies:
'@typescript-eslint/parser':
specifier: ^5.59.6
version: 5.59.6(eslint@8.40.0)(typescript@5.0.4)
csv-parse:
specifier: ^5.4.0
version: 5.4.0
eslint:
specifier: ^8.40.0
version: 8.40.0
@@ -298,6 +307,22 @@ packages:
'@jridgewell/gen-mapping': 0.3.3
'@jridgewell/trace-mapping': 0.3.18
/@anthropic-ai/sdk@0.5.8:
resolution: {integrity: sha512-iHenjcE2Q/az6VZiP1DueOSvKNRmxsly6Rx2yjJBoy7OBYVFGVjEdgs2mPQHtTX0ibKAR7tPq6F6MQbKDPWcKg==}
dependencies:
'@types/node': 18.16.0
'@types/node-fetch': 2.6.4
abort-controller: 3.0.0
agentkeepalive: 4.3.0
digest-fetch: 1.3.0
form-data-encoder: 1.7.2
formdata-node: 4.4.1
node-fetch: 2.6.12
transitivePeerDependencies:
- encoding
- supports-color
dev: false
/@apidevtools/json-schema-ref-parser@10.1.0:
resolution: {integrity: sha512-3e+viyMuXdrcK8v5pvP+SDoAQ77FH6OyRmuK48SZKmdHJRFm87RsSs8qm6kP39a/pOPURByJw+OXzQIqcfmKtA==}
engines: {node: '>= 16'}
@@ -2631,10 +2656,24 @@ packages:
resolution: {integrity: sha512-PDNlhP/1vyTgmNyiucGqGCdXIp7HIkkvKO50si3y3PcceeHvqtiKPaH1iJdz63jCWMVMbj2MElSxXPOeBvEVIQ==}
requiresBuild: true
/@resvg/resvg-wasm@2.4.1:
resolution: {integrity: sha512-yi6R0HyHtsoWTRA06Col4WoDs7SvlXU3DLMNP2bdAgs7HK18dTEVl1weXgxRzi8gwLteGUbIg29zulxIB3GSdg==}
engines: {node: '>= 10'}
dev: false
/@rushstack/eslint-patch@1.3.2:
resolution: {integrity: sha512-V+MvGwaHH03hYhY+k6Ef/xKd6RYlc4q8WBx+2ANmipHJcKuktNcI/NgEsJgdSUF6Lw32njT6OnrRsKYCdgHjYw==}
dev: true
/@shuding/opentype.js@1.4.0-beta.0:
resolution: {integrity: sha512-3NgmNyH3l/Hv6EvsWJbsvpcpUba6R8IREQ83nH83cyakCw7uM1arZKNfHwv1Wz6jgqrF/j4x5ELvR6PnK9nTcA==}
engines: {node: '>= 8.0.0'}
hasBin: true
dependencies:
fflate: 0.7.4
string.prototype.codepointat: 0.2.1
dev: false
/@sinclair/typebox@0.27.8:
resolution: {integrity: sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==}
dev: true
@@ -3164,6 +3203,15 @@ packages:
eslint-visitor-keys: 3.4.1
dev: true
/@vercel/og@0.5.9:
resolution: {integrity: sha512-CtjaV/BVHtNCjRtxGqn8Q6AKFLqcG34Byxr91+mY+4eqyp/09LVe9jEeY9WXjbaKvu8syWPMteTpY+YQUQYzSg==}
engines: {node: '>=16'}
dependencies:
'@resvg/resvg-wasm': 2.4.1
satori: 0.10.1
yoga-wasm-web: 0.3.3
dev: false
/@vitest/expect@0.33.0:
resolution: {integrity: sha512-sVNf+Gla3mhTCxNJx+wJLDPp/WcstOe0Ksqz4Vec51MmgMth/ia0MGFEkIZmVGeTL5HtjYR4Wl/ZxBxBXZJTzQ==}
dependencies:
@@ -3610,6 +3658,11 @@ packages:
resolution: {integrity: sha512-Y5gU45svrR5tI2Vt/X9GPd3L0HNIKzGu202EjxrXMpuc2V2CiKgemAbUUsqYmZJvPtCXoUKjNZwBJzsNScUbXA==}
dev: false
/base64-js@0.0.8:
resolution: {integrity: sha512-3XSA2cR/h/73EzlXXdU6YNycmYI7+kicTxks4eJg2g39biHR84slg2+des+p7iHYhbRg/udIS4TD53WabcOUkw==}
engines: {node: '>= 0.4'}
dev: false
/base64-js@1.5.1:
resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==}
dev: false
@@ -3731,6 +3784,10 @@ packages:
resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==}
engines: {node: '>=6'}
/camelize@1.0.1:
resolution: {integrity: sha512-dU+Tx2fsypxTgtLoE36npi3UqcjSSMNYfkqgmoEhtZrraP5VWq0K7FkWVTYa8eMPtnU/G2txVsfdCJTn9uzpuQ==}
dev: false
/caniuse-lite@1.0.30001517:
resolution: {integrity: sha512-Vdhm5S11DaFVLlyiKu4hiUTkpZu+y1KA/rZZqVQfOD5YdDT/eQKlkt7NaE0WGOFgX32diqt9MiP9CAiFeRklaA==}
@@ -3999,12 +4056,33 @@ packages:
resolution: {integrity: sha512-mCxBlsHFYh9C+HVpiEacem8FEBnMXgU9gy4zmNC+SXAZNB/1idgp/aulFJ4FgCi7GPEVbfyng092GqL2k2rmow==}
dev: false
/css-background-parser@0.1.0:
resolution: {integrity: sha512-2EZLisiZQ+7m4wwur/qiYJRniHX4K5Tc9w93MT3AS0WS1u5kaZ4FKXlOTBhOjc+CgEgPiGY+fX1yWD8UwpEqUA==}
dev: false
/css-box-model@1.2.1:
resolution: {integrity: sha512-a7Vr4Q/kd/aw96bnJG332W9V9LkJO69JRcaCYDUqjp6/z0w6VcZjgAcTbgFxEPfBgdnAwlh3iwu+hLopa+flJw==}
dependencies:
tiny-invariant: 1.3.1
dev: false
/css-box-shadow@1.0.0-3:
resolution: {integrity: sha512-9jaqR6e7Ohds+aWwmhe6wILJ99xYQbfmK9QQB9CcMjDbTxPZjwEmUQpU91OG05Xgm8BahT5fW+svbsQGjS/zPg==}
dev: false
/css-color-keywords@1.0.0:
resolution: {integrity: sha512-FyyrDHZKEjXDpNJYvVsV960FiqQyXc/LlYmsxl2BcdMb2WPx0OGRVgTg55rPSyLSNMqP52R9r8geSp7apN3Ofg==}
engines: {node: '>=4'}
dev: false
/css-to-react-native@3.2.0:
resolution: {integrity: sha512-e8RKaLXMOFii+02mOlqwjbD00KSEKqblnpO9e++1aXS1fPQOpS1YoqdVHBqPjHNoxeF2mimzVqawm2KCbEdtHQ==}
dependencies:
camelize: 1.0.1
css-color-keywords: 1.0.0
postcss-value-parser: 4.2.0
dev: false
/csstype@2.6.21:
resolution: {integrity: sha512-Z1PhmomIfypOpoMjRQB70jfvy/wxT50qW08YXO5lMIJkrdq4yOTR+AW7FqutScmB9NkLwxo+jU+kZLbofZZq/w==}
dev: false
@@ -4012,6 +4090,10 @@ packages:
/csstype@3.1.2:
resolution: {integrity: sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ==}
/csv-parse@5.4.0:
resolution: {integrity: sha512-JiQosUWiOFgp4hQn0an+SBoV9IKdqzhROM0iiN4LB7UpfJBlsSJlWl9nq4zGgxgMAzHJ6V4t29VAVD+3+2NJAg==}
dev: true
/d@1.0.1:
resolution: {integrity: sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA==}
dependencies:
@@ -4205,6 +4287,10 @@ packages:
/electron-to-chromium@1.4.465:
resolution: {integrity: sha512-XQcuHvEJRMU97UJ75e170mgcITZoz0lIyiaVjk6R+NMTJ8KBIvUHYd1779swgOppUlzxR+JsLpq59PumaXS1jQ==}
/emoji-regex@10.2.1:
resolution: {integrity: sha512-97g6QgOk8zlDRdgq1WxwgTMgEWGVAQvB5Fdpgc1MkNy56la5SKP9GsMXKDOdqwn90/41a8yPwIGk1Y6WVbeMQA==}
dev: false
/emoji-regex@8.0.0:
resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==}
dev: false
@@ -4920,6 +5006,10 @@ packages:
resolution: {integrity: sha512-FJqqoDBR00Mdj9ppamLa/Y7vxm+PRmNWA67N846RvsoYVMKB4q3y/de5PA7gUmRMYK/8CMz2GDZQmCRN1wBcWA==}
dev: false
/fflate@0.7.4:
resolution: {integrity: sha512-5u2V/CDW15QM1XbbgS+0DfPxVB+jUKhWEKuuFuHncbk3tEEqzmoXL+2KyOFuKGqOnmdIy0/davWF1CkuwtibCw==}
dev: false
/file-entry-cache@6.0.1:
resolution: {integrity: sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==}
engines: {node: ^10.12.0 || >=12.0.0}
@@ -5305,6 +5395,11 @@ packages:
space-separated-tokens: 1.1.5
dev: false
/hex-rgb@4.3.0:
resolution: {integrity: sha512-Ox1pJVrDCyGHMG9CFg1tmrRUMRPRsAWYc/PinY0XzJU4K7y7vjNoLKIQ7BR5UJMCxNN8EM1MNDmHWA/B3aZUuw==}
engines: {node: '>=6'}
dev: false
/highlight.js@10.7.3:
resolution: {integrity: sha512-tzcUFauisWKNHaRkN4Wjl/ZA07gENAjFl3J/c480dprkGTg5EQstgaNFqBfUqCq54kZRIEcreTsAgF/m2quD7A==}
dev: false
@@ -5770,6 +5865,13 @@ packages:
type-check: 0.4.0
dev: true
/linebreak@1.1.0:
resolution: {integrity: sha512-MHp03UImeVhB7XZtjd0E4n6+3xr5Dq/9xI/5FptGk5FrbDR3zagPa2DS6U8ks/3HjbKWG9Q1M2ufOzxV2qLYSQ==}
dependencies:
base64-js: 0.0.8
unicode-trie: 2.0.0
dev: false
/lines-and-columns@1.2.4:
resolution: {integrity: sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==}
dev: false
@@ -6374,12 +6476,23 @@ packages:
resolution: {integrity: sha512-HAKu/fG3HpHFO0AA8WE8q2g+gBJaZ9MG7fcKk+IJPLTGAD6Psw4443l+9DGRbOIh3/aXr7Phy0TjilYivJo5XQ==}
dev: false
/pako@0.2.9:
resolution: {integrity: sha512-NUcwaKxUxWrZLpDG+z/xZaCgQITkA/Dv4V/T6bw7VON6l1Xz/VnrBqrYjZQ12TamKHzITTfOEIYUj48y2KXImA==}
dev: false
/parent-module@1.0.1:
resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==}
engines: {node: '>=6'}
dependencies:
callsites: 3.1.0
/parse-css-color@0.2.1:
resolution: {integrity: sha512-bwS/GGIFV3b6KS4uwpzCFj4w297Yl3uqnSgIPsoQkx7GMLROXfMnWvxfNkL0oh8HVhZA4hvJoEoEIqonfJ3BWg==}
dependencies:
color-name: 1.1.4
hex-rgb: 4.3.0
dev: false
/parse-entities@2.0.0:
resolution: {integrity: sha512-kkywGpCcRYhqQIchaWqZ875wzpS/bMKhz5HnN3p7wveJTkTtyAB/AlnS0f8DFSqYW1T82t6yEAkEcB+A1I3MbQ==}
dependencies:
@@ -6546,6 +6659,10 @@ packages:
engines: {node: '>=4'}
dev: false
/postcss-value-parser@4.2.0:
resolution: {integrity: sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==}
dev: false
/postcss@8.4.14:
resolution: {integrity: sha512-E398TUmfAYFPBSdzgeieK2Y1+1cpdxJx8yXbK/m57nRhKSmk1GB2tO4lbLBtlkfPQTDKfe4Xqv1ASWPpayPEig==}
engines: {node: ^10 || ^12 || >=14}
@@ -7122,6 +7239,22 @@ packages:
resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==}
dev: false
/satori@0.10.1:
resolution: {integrity: sha512-F4bTCkDp931tLb7+UCNPBuSQwXhikrUkI4fBQo6fA8lF0Evqqgg3nDyUpRktQpR5Ry1DIiIVqLyEwkAms87ykg==}
engines: {node: '>=16'}
dependencies:
'@shuding/opentype.js': 1.4.0-beta.0
css-background-parser: 0.1.0
css-box-shadow: 1.0.0-3
css-to-react-native: 3.2.0
emoji-regex: 10.2.1
escape-html: 1.0.3
linebreak: 1.1.0
parse-css-color: 0.2.1
postcss-value-parser: 4.2.0
yoga-wasm-web: 0.3.3
dev: false
/scheduler@0.23.0:
resolution: {integrity: sha512-CtuThmgHNg7zIZWAXi3AsyIzA3n4xx7aNyjwC2VJldO2LMVDhFK+63xGqq6CsJH4rTAt6/M+N4GhZiDYPx9eUw==}
dependencies:
@@ -7349,6 +7482,10 @@ packages:
strip-ansi: 6.0.1
dev: false
/string.prototype.codepointat@0.2.1:
resolution: {integrity: sha512-2cBVCj6I4IOvEnjgO/hWqXjqBGsY+zwPmHl12Srk9IXSZ56Jwwmy+66XO5Iut/oQVR7t5ihYdLB0GMa4alEUcg==}
dev: false
/string.prototype.matchall@4.0.8:
resolution: {integrity: sha512-6zOCOcJ+RJAQshcTvXPHoxoQGONa3e/Lqx90wUA+wEzX78sg5Bo+1tQo4N0pohS0erG9qtCqJDjNCQBjeWVxyg==}
dependencies:
@@ -7575,6 +7712,10 @@ packages:
globrex: 0.1.2
dev: true
/tiny-inflate@1.0.3:
resolution: {integrity: sha512-pkY1fj1cKHb2seWDy0B16HeWyczlJA9/WW3u3c4z/NiWDsO3DOU5D7nhTLE9CF0yXv/QZFY7sEJmj24dK+Rrqw==}
dev: false
/tiny-invariant@1.3.1:
resolution: {integrity: sha512-AD5ih2NlSssTCwsMznbvwMZpJ1cbhkGd2uueNxzv2jDlEeZdU04JQfRnggJQ8DrcVBGjAsCKwFBbDlVNtEMlzw==}
dev: false
@@ -7781,6 +7922,13 @@ packages:
busboy: 1.6.0
dev: true
/unicode-trie@2.0.0:
resolution: {integrity: sha512-x7bc76x0bm4prf1VLg79uhAzKw8DVboClSN5VxJuQ+LKDOVEW9CdH+VY7SP+vX7xCYQqzzgQpFqz15zeLvAtZQ==}
dependencies:
pako: 0.2.9
tiny-inflate: 1.0.3
dev: false
/unpipe@1.0.0:
resolution: {integrity: sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==}
engines: {node: '>= 0.8'}
@@ -8286,6 +8434,10 @@ packages:
engines: {node: '>=12.20'}
dev: true
/yoga-wasm-web@0.3.3:
resolution: {integrity: sha512-N+d4UJSJbt/R3wqY7Coqs5pcV0aUj2j9IaQ3rNj9bVCLld8tTGKRa2USARjnvZJWVx1NDmQev8EknoczaOQDOA==}
dev: false
/zod@3.21.4:
resolution: {integrity: sha512-m46AKbrzKVzOzs/DZgVnG5H55N1sv1M8qZU3A8RIKbs3mrACDNeIOeilDymVb2HdmP8uwshOCF4uJ8uM9rCqJw==}
dev: false

View File

@@ -0,0 +1,84 @@
Text,sentiment,emotion
@dell your customer service is horrible especially agent syedfaisal who has made this experience of purchasing a new computer downright awful and Ill reconsider ever buying a Dell in the future @DellTech,negative,anger
@zacokalo @Dell @DellCares @Dell give the man what he paid for!,neutral,anger
"COOKING STREAM DAY!!! Ty to @Alienware for sponsoring this stream! Ill be making a bunch of Japanese Alien themed foods hehe
Come check it out! https://t.co/m06tJQ06zk
#alienwarepartner #intelgaming @Dell @IntelGaming https://t.co/qOdQX2E8VD",positive,joy
@emijuju_ @Alienware @Dell @intel Beautiful 😍❤️😻,positive,joy
"What's your biggest data management challenge? • Cloud complexity? • Lengthy tech refresh cycles? • Capital budget constraints? Solve your challenges with as-a-Storage. Get simplicity, agility &amp; control with @Dell #APEX. https://t.co/mCblMtH931 https://t.co/eepKNZ4Ai3",neutral,optimism
"This week we were at the ""Top Gun"" themed @Dell Product Expo. Eddie Muñoz met Maverick look-alike, California Tom Cruise (Jerome LeBlanc)!
""I feel the need, the need for speed."" - Maverick
#topgun #topgunmaverick #dell #delltechnologies #lockncharge https://t.co/QHYH2EbMjq",positive,joy
"Itsss been more than a week...i m following up with dell for troubleshootings...my https://t.co/lWhg2YKhQa suffering so as my hard earned money...hightly disappointed...contd..
@DellCares @Dell",negative,sadness
"@ashu_k7 @Dell Pathetic!!!!! I Dont mind taking legal action, this is deficency of service for which the customer is nt getting help..",negative,anger
@ashu_k7 @Dell Making life unhappy is the new tag line of #Dell,negative,sadness
"@Dell If you are buying a Dell, make sure you are making your life hell.
Better buy other laptops. If you wanted to opt for Dell better opt for garbage on the streets.",negative,anger
"MY DESK'S FINAL FORM? Seriously, I'm finally happy with my monitor setup here... and I'll keep this setup whenever I move... FOREVER. What do you think?
https://t.co/WJZ2JXtOnX
@Alienware @Dell cheers. https://t.co/6Whhldfpv0",positive,joy
"@Dell Dell Alienware computer has had software problems with SupportAssist since purchase. Dell, despite paying for Premium Support, has never fixed issues. Latest solution was to erase everything and reload....SupportAssist still doesn't work.",negative,anger
"HUGE congratulations to Startup Battle 3.0 winner ➡️ @Ox_Fulfillment x @cyborgcharu for being featured in @BusinessInsider &amp; @Dell showcasing the journey at Ox! 🚀🚀🚀
We love to see our portfolio companies continuing to BUILD SOMETHING FROM NOTHING! 🔥 https://t.co/awBkn5ippB",positive,joy
@Dell happy Friday!,positive,joy
"@intel Core i5 1135G7 - 4732 points
@intel Core i5 1235 - 6619 points
@Dell Latitude 5420 x 5430.
Cinebench R23. Good job Intel!",positive,joy
@Dell india we purchased 52 docking station and we have around 100 users using dell laptop as well as dell monitor now they are refusing to replace my faulty product and disconnecting my every call....,negative,anger
"It's another year ans another day But cant fill it in yet the child hood dreams.
It's my birthdy today. Can anyone of you guys bless me with a simplest gaming oc that can run
@DOTA2 ?
@Dell @HP @VastGG @Acer @Alienware @Lenovo @toshiba @IBM @Fujitsu_Global @NEC https://t.co/69G8tL9sN8",neutral,joy
"@idoccor @Dell That's always the decision—wait, or, look elsewhere. In this case, I think I unfortunately need to wait since there are only two monitors with these specs and I don't like the other one 😂",negative,sadness
"@MichaelDell @Dell @DellCares For how long this will continue. It is high time you either fix the problem for good or replace the complete laptop. Spent over 60+ hours with Customer Care teams, which is not helping. Cannot keep going on like this.",negative,anger
"@Dell @DellCares but no, not really",neutral,sadness
"Business innovation requires insight, agility and efficiency. How do you get there? RP PRO, LLC recommends starting by proactively managing IT infrastructure with #OpenManage Systems from @Dell. https://t.co/fBcK1lfFMu https://t.co/xWHLkkHCjn",neutral,optimism
@Dell Yessirrrrr #NationalCoffeeDay,positive,joy
"New blog post from @Dell shared on https://t.co/EgfPChB8AT
Re-routing Our Connected and Autonomous Future https://t.co/AW8EHQrbd6
#future #futuretech #techinnovation https://t.co/koX8stKPsr",neutral,joy
"In a free-market economy, the folks @IronMountain can set prices as they see fit. Their customers are also free to find better prices at competitors like @Dell
@H3CGlobal @HPE
https://t.co/reZ56DNTBI",neutral,optimism
"Delighted to chat with many of our partners here in person at @Intel Innovation! @Dell, @Lenovo, @Supermicro_SMCI, @QuantaQCT #IntelON https://t.co/BxIeGW8deN",positive,joy
"A special gracias to our Startup Chica San Antonio 2022 sponsors @eBay, @jcpenney, @Barbie, @HEB, @Dell, @Honda, @SouthsideSATX💜✨ https://t.co/lZ6WWkziHl",positive,joy
"When your team decides to start supporting developers, your #ops must change too. More from @cote and @Dell Developer Community Manager @barton808: https://t.co/W6f1oMiTgV",neutral,optimism
@EmDStowers @LASERGIANT1 @ohwormongod @Ludovician_Vega @Dell our boy snitchin,neutral,anger
A 1st place dmi:Design Value Award goes to @Dell for a packaging modernization initiative that helped them get closer to their corporate Moonshot Sustainability Goal of 100% recycled or renewable packaging by 2030. More at https://t.co/dnhZWWLCQC #designvalue #DVA22,positive,optimism
Reducing deployment and maintenance complexity is the goal behind @dell and @WindRiver's new collaboration. https://t.co/2PxQgPuHUU,positive,optimism
@jaserhunter @Dell Love the sales pitch lol,positive,joy
@Dell india we purchased 52 docking station and we have around 100 users using dell laptop as well as dell monitor now they are refusing to replace my faulty product and disconnecting my every call....,negative,anger
@ashu_k7 @Dell One more example.. their technical support is also worse. https://t.co/20atSgI4fg,negative,anger
*angry screeches about @Dell proprietary MBR windows 8.1 partitions not being able to save as an img in clonezilla *,negative,anger
@socialitebooks @BBYC_Gamers @Dell @Alienware @BestBuyCanada @intelcanada Congratulations!!!,positive,joy
"Thank you to the @dell team for coming out to volunteer today! We truly appreciate your hard work and look forward to seeing you again soon!
If you and your team are interested in helping out at the UMLAUF, visit our website for more information: https://t.co/lVfsZT2ogS https://t.co/eLz0FY0y4M",positive,joy
"@TheCaramelGamer @intel @bravadogaming @Intel_Africa @Dell @DellTech @DellTechMEA @Alienware @IntelUK we love to see it.
Also also actually actually whoever did that artwork? 🔥🔥🔥 am a fan.",positive,joy
"LOVING MY DELL 2 IN 1 LAPTOP
YAYY 🥳🥳
@Dell #DellInspiron #DellLaptop https://t.co/vib96jf3tC",positive,joy
@Azure @OracleItalia @AWS_Italy @lenovoitalia @Dell discussing the future of #HPC during the #hpcroundtable22 in Turin today #highperformancecomputing https://t.co/jJ1WqBulPF,neutral,joy
Attracting talent @AmericanChamber. @marg_cola @Dell speaks of quality of life connectivity and the Opportunity for development being so crucial. Housing availability is now impacting on decision making for potential candidates. #WhyCork,positive,optimism
.@Dell partners with @WindRiver on modular cloud-native telecommunications infrastructure https://t.co/4SWATspwCP @SiliconANGLE @Mike_Wheatley @holgermu @constellationr,neutral,joy
@Dell Not buy Dell Inspiron laptop,neutral,sadness
"@dell #delltechforum reminding us IDC have predicted that by 2024, 50% of everything we consume in technology will be as a service https://t.co/3UBiZJX0LE",neutral,optimism
@RachMurph @HETTShow @Dell Thank you for coming! Great evening,positive,joy
Congratulations to Jason M of Moncton NB on winning a @Dell @Alienware m15 R7 15.6″ gaming laptop from @BestBuyCanada and @intelcanada's gaming days #contest on the blog. Visit https://t.co/VryaY5Rvv9 to learn about tech and for chances to win new tech. https://t.co/T6n0dzF6oL,positive,joy
@MattVisiwig @Dell Sour taste for sure 😶 But don't let ego distract you from what you really want to buy 😁,neutral,optimism
"Massive thank you goes to sponsors @HendersonLoggie @lindsaysnews @Dell @unity, all of our fantastic judges and mentors and the team at @EGX and @ExCeLLondon.
Big congratulations also to all of our other @AbertayDare teams - an amazing year! #Dare2022 https://t.co/jYe4agO7lW",positive,joy
"@timetcetera @rahaug Nah, I just need @Dell to start paying me comissions 😂",neutral,joy
"""Whether youre an engineer, a designer, or work in supply chain management or sales, there are always opportunities to think about sustainability and how you can do things more efficiently."" 👏 — Oliver Campbell, Director of Packaging Engineering, @Dell https://t.co/vUJLTWNFwP https://t.co/GJWAzGfAxJ",positive,optimism
"Hi, my name is @listerepvp and I support @Dell, always.",positive,joy
1 Text sentiment emotion
2 @dell your customer service is horrible especially agent syedfaisal who has made this experience of purchasing a new computer downright awful and I’ll reconsider ever buying a Dell in the future @DellTech negative anger
3 @zacokalo @Dell @DellCares @Dell give the man what he paid for! neutral anger
4 COOKING STREAM DAY!!! Ty to @Alienware for sponsoring this stream! I’ll be making a bunch of Japanese Alien themed foods hehe Come check it out! https://t.co/m06tJQ06zk #alienwarepartner #intelgaming @Dell @IntelGaming https://t.co/qOdQX2E8VD positive joy
5 @emijuju_ @Alienware @Dell @intel Beautiful 😍❤️😻 positive joy
6 What's your biggest data management challenge? • Cloud complexity? • Lengthy tech refresh cycles? • Capital budget constraints? Solve your challenges with as-a-Storage. Get simplicity, agility &amp; control with @Dell #APEX. https://t.co/mCblMtH931 https://t.co/eepKNZ4Ai3 neutral optimism
7 This week we were at the "Top Gun" themed @Dell Product Expo. Eddie Muñoz met Maverick look-alike, California Tom Cruise (Jerome LeBlanc)! "I feel the need, the need for speed." - Maverick #topgun #topgunmaverick #dell #delltechnologies #lockncharge https://t.co/QHYH2EbMjq positive joy
8 Itsss been more than a week...i m following up with dell for troubleshootings...my https://t.co/lWhg2YKhQa suffering so as my hard earned money...hightly disappointed...contd.. @DellCares @Dell negative sadness
9 @ashu_k7 @Dell Pathetic!!!!! I Dont mind taking legal action, this is deficency of service for which the customer is nt getting help.. negative anger
10 @ashu_k7 @Dell Making life unhappy is the new tag line of #Dell negative sadness
11 @Dell If you are buying a Dell, make sure you are making your life hell. Better buy other laptops. If you wanted to opt for Dell better opt for garbage on the streets. negative anger
12 MY DESK'S FINAL FORM? Seriously, I'm finally happy with my monitor setup here... and I'll keep this setup whenever I move... FOREVER. What do you think? https://t.co/WJZ2JXtOnX @Alienware @Dell cheers. https://t.co/6Whhldfpv0 positive joy
13 @Dell Dell Alienware computer has had software problems with SupportAssist since purchase. Dell, despite paying for Premium Support, has never fixed issues. Latest solution was to erase everything and reload....SupportAssist still doesn't work. negative anger
14 HUGE congratulations to Startup Battle 3.0 winner ➡️ @Ox_Fulfillment x @cyborgcharu for being featured in @BusinessInsider &amp; @Dell showcasing the journey at Ox! 🚀🚀🚀 We love to see our portfolio companies continuing to BUILD SOMETHING FROM NOTHING! 🔥 https://t.co/awBkn5ippB positive joy
15 @Dell happy Friday! positive joy
16 @intel Core i5 1135G7 - 4732 points @intel Core i5 1235 - 6619 points @Dell Latitude 5420 x 5430. Cinebench R23. Good job Intel! positive joy
17 @Dell india we purchased 52 docking station and we have around 100 users using dell laptop as well as dell monitor now they are refusing to replace my faulty product and disconnecting my every call.... negative anger
18 It's another year ans another day But cant fill it in yet the child hood dreams. It's my birthdy today. Can anyone of you guys bless me with a simplest gaming oc that can run @DOTA2 ? @Dell @HP @VastGG @Acer @Alienware @Lenovo @toshiba @IBM @Fujitsu_Global @NEC https://t.co/69G8tL9sN8 neutral joy
19 @idoccor @Dell That's always the decision—wait, or, look elsewhere. In this case, I think I unfortunately need to wait since there are only two monitors with these specs and I don't like the other one 😂 negative sadness
20 @MichaelDell @Dell @DellCares For how long this will continue. It is high time you either fix the problem for good or replace the complete laptop. Spent over 60+ hours with Customer Care teams, which is not helping. Cannot keep going on like this. negative anger
21 @Dell @DellCares but no, not really neutral sadness
22 Business innovation requires insight, agility and efficiency. How do you get there? RP PRO, LLC recommends starting by proactively managing IT infrastructure with #OpenManage Systems from @Dell. https://t.co/fBcK1lfFMu https://t.co/xWHLkkHCjn neutral optimism
23 @Dell Yessirrrrr #NationalCoffeeDay positive joy
24 New blog post from @Dell shared on https://t.co/EgfPChB8AT Re-routing Our Connected and Autonomous Future https://t.co/AW8EHQrbd6 #future #futuretech #techinnovation https://t.co/koX8stKPsr neutral joy
25 In a free-market economy, the folks @IronMountain can set prices as they see fit. Their customers are also free to find better prices at competitors like @Dell @H3CGlobal @HPE https://t.co/reZ56DNTBI neutral optimism
26 Delighted to chat with many of our partners here in person at @Intel Innovation! @Dell, @Lenovo, @Supermicro_SMCI, @QuantaQCT #IntelON https://t.co/BxIeGW8deN positive joy
27 A special gracias to our Startup Chica San Antonio 2022 sponsors @eBay, @jcpenney, @Barbie, @HEB, @Dell, @Honda, @SouthsideSATX💜✨ https://t.co/lZ6WWkziHl positive joy
28 When your team decides to start supporting developers, your #ops must change too. More from @cote and @Dell Developer Community Manager @barton808: https://t.co/W6f1oMiTgV neutral optimism
29 @EmDStowers @LASERGIANT1 @ohwormongod @Ludovician_Vega @Dell our boy snitchin neutral anger
30 A 1st place dmi:Design Value Award goes to @Dell for a packaging modernization initiative that helped them get closer to their corporate Moonshot Sustainability Goal of 100% recycled or renewable packaging by 2030. More at https://t.co/dnhZWWLCQC #designvalue #DVA22 positive optimism
31 Reducing deployment and maintenance complexity is the goal behind @dell and @WindRiver's new collaboration. https://t.co/2PxQgPuHUU positive optimism
32 @jaserhunter @Dell Love the sales pitch lol positive joy
33 @Dell india we purchased 52 docking station and we have around 100 users using dell laptop as well as dell monitor now they are refusing to replace my faulty product and disconnecting my every call.... negative anger
34 @ashu_k7 @Dell One more example.. their technical support is also worse. https://t.co/20atSgI4fg negative anger
35 *angry screeches about @Dell proprietary MBR windows 8.1 partitions not being able to save as an img in clonezilla * negative anger
36 @socialitebooks @BBYC_Gamers @Dell @Alienware @BestBuyCanada @intelcanada Congratulations!!! positive joy
37 Thank you to the @dell team for coming out to volunteer today! We truly appreciate your hard work and look forward to seeing you again soon! If you and your team are interested in helping out at the UMLAUF, visit our website for more information: https://t.co/lVfsZT2ogS https://t.co/eLz0FY0y4M positive joy
38 @TheCaramelGamer @intel @bravadogaming @Intel_Africa @Dell @DellTech @DellTechMEA @Alienware @IntelUK we love to see it. Also also actually actually whoever did that artwork? 🔥🔥🔥 am a fan. positive joy
39 LOVING MY DELL 2 IN 1 LAPTOP YAYY 🥳🥳 @Dell #DellInspiron #DellLaptop https://t.co/vib96jf3tC positive joy
40 @Azure @OracleItalia @AWS_Italy @lenovoitalia @Dell discussing the future of #HPC during the #hpcroundtable22 in Turin today #highperformancecomputing https://t.co/jJ1WqBulPF neutral joy
41 Attracting talent @AmericanChamber. @marg_cola @Dell speaks of quality of life connectivity and the Opportunity for development being so crucial. Housing availability is now impacting on decision making for potential candidates. #WhyCork positive optimism
42 .@Dell partners with @WindRiver on modular cloud-native telecommunications infrastructure https://t.co/4SWATspwCP @SiliconANGLE @Mike_Wheatley @holgermu @constellationr neutral joy
43 @Dell Not buy Dell Inspiron laptop neutral sadness
44 @dell #delltechforum reminding us IDC have predicted that by 2024, 50% of everything we consume in technology will be as a service https://t.co/3UBiZJX0LE neutral optimism
45 @RachMurph @HETTShow @Dell Thank you for coming! Great evening positive joy
46 Congratulations to Jason M of Moncton NB on winning a @Dell @Alienware m15 R7 15.6″ gaming laptop from @BestBuyCanada and @intelcanada's gaming days #contest on the blog. Visit https://t.co/VryaY5Rvv9 to learn about tech and for chances to win new tech. https://t.co/T6n0dzF6oL positive joy
47 @MattVisiwig @Dell Sour taste for sure 😶 But don't let ego distract you from what you really want to buy 😁 neutral optimism
48 Massive thank you goes to sponsors @HendersonLoggie @lindsaysnews @Dell @unity, all of our fantastic judges and mentors and the team at @EGX and @ExCeLLondon. Big congratulations also to all of our other @AbertayDare teams - an amazing year! #Dare2022 https://t.co/jYe4agO7lW positive joy
49 @timetcetera @rahaug Nah, I just need @Dell to start paying me comissions 😂 neutral joy
50 "Whether you’re an engineer, a designer, or work in supply chain management or sales, there are always opportunities to think about sustainability and how you can do things more efficiently." 👏 — Oliver Campbell, Director of Packaging Engineering, @Dell https://t.co/vUJLTWNFwP https://t.co/GJWAzGfAxJ positive optimism
51 Hi, my name is @listerepvp and I support @Dell, always. positive joy

View File

@@ -0,0 +1,52 @@
-- DropForeignKey
ALTER TABLE "ModelOutput" DROP CONSTRAINT "ModelOutput_scenarioVariantCellId_fkey";
-- DropForeignKey
ALTER TABLE "OutputEvaluation" DROP CONSTRAINT "OutputEvaluation_modelOutputId_fkey";
-- DropIndex
DROP INDEX "OutputEvaluation_modelOutputId_evaluationId_key";
-- AlterTable
ALTER TABLE "OutputEvaluation" RENAME COLUMN "modelOutputId" TO "modelResponseId";
-- AlterTable
ALTER TABLE "ScenarioVariantCell" DROP COLUMN "retryTime",
DROP COLUMN "statusCode",
ADD COLUMN "jobQueuedAt" TIMESTAMP(3),
ADD COLUMN "jobStartedAt" TIMESTAMP(3);
ALTER TABLE "ModelOutput" RENAME TO "ModelResponse";
ALTER TABLE "ModelResponse"
ADD COLUMN "requestedAt" TIMESTAMP(3),
ADD COLUMN "receivedAt" TIMESTAMP(3),
ADD COLUMN "statusCode" INTEGER,
ADD COLUMN "errorMessage" TEXT,
ADD COLUMN "retryTime" TIMESTAMP(3),
ADD COLUMN "outdated" BOOLEAN NOT NULL DEFAULT false;
-- 3. Remove the unnecessary column
ALTER TABLE "ModelResponse"
DROP COLUMN "timeToComplete";
-- AlterTable
ALTER TABLE "ModelResponse" RENAME CONSTRAINT "ModelOutput_pkey" TO "ModelResponse_pkey";
ALTER TABLE "ModelResponse" ALTER COLUMN "output" DROP NOT NULL;
-- DropIndex
DROP INDEX "ModelOutput_scenarioVariantCellId_key";
-- AddForeignKey
ALTER TABLE "ModelResponse" ADD CONSTRAINT "ModelResponse_scenarioVariantCellId_fkey" FOREIGN KEY ("scenarioVariantCellId") REFERENCES "ScenarioVariantCell"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- RenameIndex
ALTER INDEX "ModelOutput_inputHash_idx" RENAME TO "ModelResponse_inputHash_idx";
-- CreateIndex
CREATE UNIQUE INDEX "OutputEvaluation_modelResponseId_evaluationId_key" ON "OutputEvaluation"("modelResponseId", "evaluationId");
-- AddForeignKey
ALTER TABLE "OutputEvaluation" ADD CONSTRAINT "OutputEvaluation_modelResponseId_fkey" FOREIGN KEY ("modelResponseId") REFERENCES "ModelResponse"("id") ON DELETE CASCADE ON UPDATE CASCADE;

View File

@@ -0,0 +1,16 @@
-- CreateTable
CREATE TABLE "WorldChampEntrant" (
"id" UUID NOT NULL,
"userId" UUID NOT NULL,
"approved" BOOLEAN NOT NULL DEFAULT false,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL,
CONSTRAINT "WorldChampEntrant_pkey" PRIMARY KEY ("id")
);
-- CreateIndex
CREATE UNIQUE INDEX "WorldChampEntrant_userId_key" ON "WorldChampEntrant"("userId");
-- AddForeignKey
ALTER TABLE "WorldChampEntrant" ADD CONSTRAINT "WorldChampEntrant_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;

View File

@@ -0,0 +1,3 @@
-- AlterTable
ALTER TABLE "User" ADD COLUMN "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
ADD COLUMN "updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP;

View File

@@ -90,12 +90,11 @@ enum CellRetrievalStatus {
model ScenarioVariantCell {
id String @id @default(uuid()) @db.Uuid
statusCode Int?
errorMessage String?
retryTime DateTime?
retrievalStatus CellRetrievalStatus @default(COMPLETE)
modelOutput ModelOutput?
jobQueuedAt DateTime?
jobStartedAt DateTime?
modelResponses ModelResponse[]
errorMessage String? // Contains errors that occurred independently of model responses
promptVariantId String @db.Uuid
promptVariant PromptVariant @relation(fields: [promptVariantId], references: [id], onDelete: Cascade)
@@ -110,24 +109,28 @@ model ScenarioVariantCell {
@@unique([promptVariantId, testScenarioId])
}
model ModelOutput {
model ModelResponse {
id String @id @default(uuid()) @db.Uuid
inputHash String
output Json
timeToComplete Int @default(0)
requestedAt DateTime?
receivedAt DateTime?
output Json?
cost Float?
promptTokens Int?
completionTokens Int?
statusCode Int?
errorMessage String?
retryTime DateTime?
outdated Boolean @default(false)
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
scenarioVariantCellId String @db.Uuid
scenarioVariantCell ScenarioVariantCell @relation(fields: [scenarioVariantCellId], references: [id], onDelete: Cascade)
outputEvaluations OutputEvaluation[]
outputEvaluations OutputEvaluation[]
@@unique([scenarioVariantCellId])
@@index([inputHash])
}
@@ -147,8 +150,8 @@ model Evaluation {
experimentId String @db.Uuid
experiment Experiment @relation(fields: [experimentId], references: [id], onDelete: Cascade)
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
outputEvaluations OutputEvaluation[]
}
@@ -159,8 +162,8 @@ model OutputEvaluation {
result Float
details String?
modelOutputId String @db.Uuid
modelOutput ModelOutput @relation(fields: [modelOutputId], references: [id], onDelete: Cascade)
modelResponseId String @db.Uuid
modelResponse ModelResponse @relation(fields: [modelResponseId], references: [id], onDelete: Cascade)
evaluationId String @db.Uuid
evaluation Evaluation @relation(fields: [evaluationId], references: [id], onDelete: Cascade)
@@ -168,7 +171,7 @@ model OutputEvaluation {
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
@@unique([modelOutputId, evaluationId])
@@unique([modelResponseId, evaluationId])
}
model Organization {
@@ -176,8 +179,8 @@ model Organization {
personalOrgUserId String? @unique @db.Uuid
PersonalOrgUser User? @relation(fields: [personalOrgUserId], references: [id], onDelete: Cascade)
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
organizationUsers OrganizationUser[]
experiments Experiment[]
}
@@ -205,6 +208,20 @@ model OrganizationUser {
@@unique([organizationId, userId])
}
model WorldChampEntrant {
id String @id @default(uuid()) @db.Uuid
userId String @db.Uuid
user User @relation(fields: [userId], references: [id], onDelete: Cascade)
approved Boolean @default(false)
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
@@unique([userId])
}
model Account {
id String @id @default(uuid()) @db.Uuid
userId String @db.Uuid
@@ -242,6 +259,10 @@ model User {
sessions Session[]
organizationUsers OrganizationUser[]
organizations Organization[]
worldChampEntrant WorldChampEntrant?
createdAt DateTime @default(now())
updatedAt DateTime @default(now()) @updatedAt
}
model VerificationToken {

127
prisma/seedAgiEval.ts Normal file
View File

@@ -0,0 +1,127 @@
import { prisma } from "~/server/db";
import { generateNewCell } from "~/server/utils/generateNewCell";
import dedent from "dedent";
import { execSync } from "child_process";
import fs from "fs";
const defaultId = "11111111-1111-1111-1111-111111111112";
await prisma.organization.deleteMany({
where: { id: defaultId },
});
// If there's an existing org, just seed into it
const org =
(await prisma.organization.findFirst({})) ??
(await prisma.organization.create({
data: { id: defaultId },
}));
// Clone the repo from git@github.com:microsoft/AGIEval.git into a tmp dir if it doesn't exist
const tmpDir = "/tmp/agi-eval";
if (!fs.existsSync(tmpDir)) {
execSync(`git clone git@github.com:microsoft/AGIEval.git ${tmpDir}`);
}
const datasets = [
"sat-en",
"sat-math",
"lsat-rc",
"lsat-ar",
"aqua-rat",
"logiqa-en",
"lsat-lr",
"math",
];
type Scenario = {
passage: string | null;
question: string;
options: string[] | null;
label: string;
};
for (const dataset of datasets) {
const experimentName = `AGI-Eval: ${dataset}`;
const oldExperiment = await prisma.experiment.findFirst({
where: {
label: experimentName,
organizationId: org.id,
},
});
if (oldExperiment) {
await prisma.experiment.deleteMany({
where: { id: oldExperiment.id },
});
}
const experiment = await prisma.experiment.create({
data: {
id: oldExperiment?.id ?? undefined,
label: experimentName,
organizationId: org.id,
},
});
const scenarios: Scenario[] = fs
.readFileSync(`${tmpDir}/data/v1/${dataset}.jsonl`, "utf8")
.split("\n")
.filter((line) => line.length > 0)
.map((line) => JSON.parse(line) as Scenario);
console.log("scenarios", scenarios.length);
await prisma.testScenario.createMany({
data: scenarios.slice(0, 30).map((scenario, i) => ({
experimentId: experiment.id,
sortIndex: i,
variableValues: {
passage: scenario.passage,
question: scenario.question,
options: scenario.options?.join("\n"),
label: scenario.label,
},
})),
});
await prisma.templateVariable.createMany({
data: ["passage", "question", "options", "label"].map((label) => ({
experimentId: experiment.id,
label,
})),
});
await prisma.promptVariant.createMany({
data: [
{
experimentId: experiment.id,
label: "Prompt Variant 1",
sortIndex: 0,
model: "gpt-3.5-turbo-0613",
modelProvider: "openai/ChatCompletion",
constructFnVersion: 1,
constructFn: dedent`
definePrompt("openai/ChatCompletion", {
model: "gpt-3.5-turbo-0613",
messages: [
{
role: "user",
content: \`Passage: ${"$"}{scenario.passage}\n\nQuestion: ${"$"}{scenario.question}\n\nOptions: ${"$"}{scenario.options}\n\n Respond with just the letter of the best option in the format Answer: (A).\`
}
],
temperature: 0,
})`,
},
],
});
await prisma.evaluation.createMany({
data: [
{
experimentId: experiment.id,
label: "Eval",
evalType: "CONTAINS",
value: "Answer: ({{label}})",
},
],
});
}

View File

@@ -0,0 +1,113 @@
import { prisma } from "~/server/db";
import dedent from "dedent";
import fs from "fs";
import { parse } from "csv-parse/sync";
const defaultId = "11111111-1111-1111-1111-111111111112";
await prisma.organization.deleteMany({
where: { id: defaultId },
});
// If there's an existing org, just seed into it
const org =
(await prisma.organization.findFirst({})) ??
(await prisma.organization.create({
data: { id: defaultId },
}));
type Scenario = {
text: string;
sentiment: string;
emotion: string;
};
const experimentName = `Twitter Sentiment Analysis`;
const oldExperiment = await prisma.experiment.findFirst({
where: {
label: experimentName,
organizationId: org.id,
},
});
if (oldExperiment) {
await prisma.experiment.deleteMany({
where: { id: oldExperiment.id },
});
}
const experiment = await prisma.experiment.create({
data: {
id: oldExperiment?.id ?? undefined,
label: experimentName,
organizationId: org.id,
},
});
const content = fs.readFileSync("./prisma/datasets/validated_tweets.csv", "utf8");
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const records: any[] = parse(content, { delimiter: ",", from_line: 2 });
console.log("records", records);
const scenarios: Scenario[] = records.map((row) => ({
text: row[0],
sentiment: row[1],
emotion: row[2],
}));
console.log("scenarios", scenarios.length);
await prisma.testScenario.createMany({
data: scenarios.slice(0, 30).map((scenario, i) => ({
experimentId: experiment.id,
sortIndex: i,
variableValues: {
text: scenario.text,
sentiment: scenario.sentiment,
emotion: scenario.emotion,
},
})),
});
await prisma.templateVariable.createMany({
data: ["text", "sentiment", "emotion"].map((label) => ({
experimentId: experiment.id,
label,
})),
});
await prisma.promptVariant.createMany({
data: [
{
experimentId: experiment.id,
label: "Prompt Variant 1",
sortIndex: 0,
model: "gpt-3.5-turbo-0613",
modelProvider: "openai/ChatCompletion",
constructFnVersion: 1,
constructFn: dedent`
definePrompt("openai/ChatCompletion", {
model: "gpt-3.5-turbo-0613",
messages: [
{
role: "user",
content: \`Text: ${"$"}{scenario.text}\n\nRespond with the sentiment (negative|neutral|positive) and emotion (optimism|joy|anger|sadness) of the tweet in this format: "answer: <sentiment>-<emotion>".\`
}
],
temperature: 0,
})`,
},
],
});
await prisma.evaluation.createMany({
data: [
{
experimentId: experiment.id,
label: "Eval",
evalType: "CONTAINS",
value: "answer: {{sentiment}}-{{emotion}}",
},
],
});

Binary file not shown.

BIN
public/og.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

View File

@@ -36,17 +36,9 @@ export const DeleteButton = () => {
return (
<>
<Button
size="sm"
variant={{ base: "outline", lg: "ghost" }}
colorScheme="red"
fontWeight="normal"
onClick={onOpen}
>
<Button size="sm" variant="ghost" colorScheme="red" fontWeight="normal" onClick={onOpen}>
<Icon as={BsTrash} boxSize={4} />
<Text display={{ base: "none", lg: "block" }} ml={2}>
Delete Experiment
</Text>
<Text ml={2}>Delete Experiment</Text>
</Button>
<AlertDialog isOpen={isOpen} leastDestructiveRef={cancelRef} onClose={onClose}>

View File

@@ -37,7 +37,6 @@ export const FloatingLabelInput = ({
borderColor={isFocused ? "blue.500" : "gray.400"}
autoComplete="off"
value={value}
maxHeight={32}
overflowY="auto"
overflowX="hidden"
{...props}

View File

@@ -0,0 +1,19 @@
import { type StackProps, VStack } from "@chakra-ui/react";
import { CellOptions } from "./CellOptions";
export const CellContent = ({
hardRefetch,
hardRefetching,
children,
...props
}: {
hardRefetch: () => void;
hardRefetching: boolean;
} & StackProps) => (
<VStack w="full" alignItems="flex-start" {...props}>
<CellOptions refetchingOutput={hardRefetching} refetchOutput={hardRefetch} />
<VStack w="full" alignItems="flex-start" maxH={500} overflowY="auto">
{children}
</VStack>
</VStack>
);

View File

@@ -1,4 +1,4 @@
import { Button, HStack, Icon, Tooltip } from "@chakra-ui/react";
import { Button, HStack, Icon, Spinner, Tooltip } from "@chakra-ui/react";
import { BsArrowClockwise } from "react-icons/bs";
import { useExperimentAccess } from "~/utils/hooks";
@@ -12,7 +12,7 @@ export const CellOptions = ({
const { canModify } = useExperimentAccess();
return (
<HStack justifyContent="flex-end" w="full">
{!refetchingOutput && canModify && (
{canModify && (
<Tooltip label="Refetch output" aria-label="refetch output">
<Button
size="xs"
@@ -28,7 +28,7 @@ export const CellOptions = ({
onClick={refetchOutput}
aria-label="refetch output"
>
<Icon as={BsArrowClockwise} boxSize={4} />
<Icon as={refetchingOutput ? Spinner : BsArrowClockwise} boxSize={4} />
</Button>
</Tooltip>
)}

View File

@@ -1,16 +1,19 @@
import { api } from "~/utils/api";
import { type PromptVariant, type Scenario } from "../types";
import { Spinner, Text, Center, VStack } from "@chakra-ui/react";
import { Text, VStack } from "@chakra-ui/react";
import { useExperiment, useHandledAsyncCallback } from "~/utils/hooks";
import SyntaxHighlighter from "react-syntax-highlighter";
import { docco } from "react-syntax-highlighter/dist/cjs/styles/hljs";
import stringify from "json-stringify-pretty-compact";
import { type ReactElement, useState, useEffect } from "react";
import { type ReactElement, useState, useEffect, Fragment } from "react";
import useSocket from "~/utils/useSocket";
import { OutputStats } from "./OutputStats";
import { ErrorHandler } from "./ErrorHandler";
import { CellOptions } from "./CellOptions";
import { RetryCountdown } from "./RetryCountdown";
import frontendModelProviders from "~/modelProviders/frontendModelProviders";
import { ResponseLog } from "./ResponseLog";
import { CellContent } from "./CellContent";
const WAITING_MESSAGE_INTERVAL = 20000;
export default function OutputCell({
scenario,
@@ -60,51 +63,97 @@ export default function OutputCell({
const awaitingOutput =
!cell ||
!cell.evalsComplete ||
cell.retrievalStatus === "PENDING" ||
cell.retrievalStatus === "IN_PROGRESS" ||
hardRefetching;
useEffect(() => setRefetchInterval(awaitingOutput ? 1000 : 0), [awaitingOutput]);
const modelOutput = cell?.modelOutput;
// TODO: disconnect from socket if we're not streaming anymore
const streamedMessage = useSocket<OutputSchema>(cell?.id);
if (!vars) return null;
if (disabledReason) return <Text color="gray.500">{disabledReason}</Text>;
if (awaitingOutput && !streamedMessage)
return (
<Center h="100%" w="100%">
<Spinner />
</Center>
);
if (!cell && !fetchingOutput)
return (
<VStack>
<CellOptions refetchingOutput={hardRefetching} refetchOutput={hardRefetch} />
<CellContent hardRefetching={hardRefetching} hardRefetch={hardRefetch}>
<Text color="gray.500">Error retrieving output</Text>
</VStack>
</CellContent>
);
if (cell && cell.errorMessage) {
return (
<VStack>
<CellOptions refetchingOutput={hardRefetching} refetchOutput={hardRefetch} />
<ErrorHandler cell={cell} refetchOutput={hardRefetch} />
</VStack>
<CellContent hardRefetching={hardRefetching} hardRefetch={hardRefetch}>
<Text color="red.500">{cell.errorMessage}</Text>
</CellContent>
);
}
const normalizedOutput = modelOutput
? provider.normalizeOutput(modelOutput.output)
if (disabledReason) return <Text color="gray.500">{disabledReason}</Text>;
const mostRecentResponse = cell?.modelResponses[cell.modelResponses.length - 1];
const showLogs = !streamedMessage && !mostRecentResponse?.output;
if (showLogs)
return (
<CellContent
hardRefetching={hardRefetching}
hardRefetch={hardRefetch}
alignItems="flex-start"
fontFamily="inconsolata, monospace"
spacing={0}
>
{cell?.jobQueuedAt && <ResponseLog time={cell.jobQueuedAt} title="Job queued" />}
{cell?.jobStartedAt && <ResponseLog time={cell.jobStartedAt} title="Job started" />}
{cell?.modelResponses?.map((response) => {
let numWaitingMessages = 0;
const relativeWaitingTime = response.receivedAt
? response.receivedAt.getTime()
: Date.now();
if (response.requestedAt) {
numWaitingMessages = Math.floor(
(relativeWaitingTime - response.requestedAt.getTime()) / WAITING_MESSAGE_INTERVAL,
);
}
return (
<Fragment key={response.id}>
{response.requestedAt && (
<ResponseLog time={response.requestedAt} title="Request sent to API" />
)}
{response.requestedAt &&
Array.from({ length: numWaitingMessages }, (_, i) => (
<ResponseLog
key={`waiting-${i}`}
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
time={new Date(response.requestedAt!.getTime() + i * WAITING_MESSAGE_INTERVAL)}
title="Waiting for response"
/>
))}
{response.receivedAt && (
<ResponseLog
time={response.receivedAt}
title="Response received from API"
message={`statusCode: ${response.statusCode ?? ""}\n ${
response.errorMessage ?? ""
}`}
/>
)}
</Fragment>
);
}) ?? null}
{mostRecentResponse?.retryTime && (
<RetryCountdown retryTime={mostRecentResponse.retryTime} />
)}
</CellContent>
);
const normalizedOutput = mostRecentResponse?.output
? provider.normalizeOutput(mostRecentResponse?.output)
: streamedMessage
? provider.normalizeOutput(streamedMessage)
: null;
if (modelOutput && normalizedOutput?.type === "json") {
if (mostRecentResponse?.output && normalizedOutput?.type === "json") {
return (
<VStack
w="100%"
@@ -114,8 +163,13 @@ export default function OutputCell({
overflowX="hidden"
justifyContent="space-between"
>
<VStack w="full" flex={1} spacing={0}>
<CellOptions refetchingOutput={hardRefetching} refetchOutput={hardRefetch} />
<CellContent
hardRefetching={hardRefetching}
hardRefetch={hardRefetch}
w="full"
flex={1}
spacing={0}
>
<SyntaxHighlighter
customStyle={{ overflowX: "unset", width: "100%", flex: 1 }}
language="json"
@@ -127,8 +181,8 @@ export default function OutputCell({
>
{stringify(normalizedOutput.value, { maxLength: 40 })}
</SyntaxHighlighter>
</VStack>
<OutputStats modelOutput={modelOutput} scenario={scenario} />
</CellContent>
<OutputStats modelResponse={mostRecentResponse} scenario={scenario} />
</VStack>
);
}
@@ -138,10 +192,13 @@ export default function OutputCell({
return (
<VStack w="100%" h="100%" justifyContent="space-between" whiteSpace="pre-wrap">
<VStack w="full" alignItems="flex-start" spacing={0}>
<CellOptions refetchingOutput={hardRefetching} refetchOutput={hardRefetch} />
<Text>{contentToDisplay}</Text>
<CellContent hardRefetching={hardRefetching} hardRefetch={hardRefetch}>
<Text>{contentToDisplay}</Text>
</CellContent>
</VStack>
{modelOutput && <OutputStats modelOutput={modelOutput} scenario={scenario} />}
{mostRecentResponse?.output && (
<OutputStats modelResponse={mostRecentResponse} scenario={scenario} />
)}
</VStack>
);
}

View File

@@ -7,28 +7,32 @@ import { CostTooltip } from "~/components/tooltip/CostTooltip";
const SHOW_TIME = true;
export const OutputStats = ({
modelOutput,
modelResponse,
}: {
modelOutput: NonNullable<
NonNullable<RouterOutputs["scenarioVariantCells"]["get"]>["modelOutput"]
modelResponse: NonNullable<
NonNullable<RouterOutputs["scenarioVariantCells"]["get"]>["modelResponses"][0]
>;
scenario: Scenario;
}) => {
const timeToComplete = modelOutput.timeToComplete;
const timeToComplete =
modelResponse.receivedAt && modelResponse.requestedAt
? modelResponse.receivedAt.getTime() - modelResponse.requestedAt.getTime()
: 0;
const promptTokens = modelOutput.promptTokens;
const completionTokens = modelOutput.completionTokens;
const promptTokens = modelResponse.promptTokens;
const completionTokens = modelResponse.completionTokens;
return (
<HStack w="full" align="center" color="gray.500" fontSize="2xs" mt={{ base: 0, md: 1 }}>
<HStack flex={1}>
{modelOutput.outputEvaluations.map((evaluation) => {
{modelResponse.outputEvaluations.map((evaluation) => {
const passed = evaluation.result > 0.5;
return (
<Tooltip
isDisabled={!evaluation.details}
label={evaluation.details}
key={evaluation.id}
shouldWrapChildren
>
<HStack spacing={0}>
<Text>{evaluation.evaluation.label}</Text>
@@ -42,15 +46,15 @@ export const OutputStats = ({
);
})}
</HStack>
{modelOutput.cost && (
{modelResponse.cost && (
<CostTooltip
promptTokens={promptTokens}
completionTokens={completionTokens}
cost={modelOutput.cost}
cost={modelResponse.cost}
>
<HStack spacing={0}>
<Icon as={BsCurrencyDollar} />
<Text mr={1}>{modelOutput.cost.toFixed(3)}</Text>
<Text mr={1}>{modelResponse.cost.toFixed(3)}</Text>
</HStack>
</CostTooltip>
)}

View File

@@ -0,0 +1,22 @@
import { HStack, VStack, Text } from "@chakra-ui/react";
import dayjs from "dayjs";
export const ResponseLog = ({
time,
title,
message,
}: {
time: Date;
title: string;
message?: string;
}) => {
return (
<VStack spacing={0} alignItems="flex-start">
<HStack>
<Text>{dayjs(time).format("HH:mm:ss")}</Text>
<Text>{title}</Text>
</HStack>
{message && <Text pl={4}>{message}</Text>}
</VStack>
);
};

View File

@@ -1,21 +1,12 @@
import { type ScenarioVariantCell } from "@prisma/client";
import { VStack, Text } from "@chakra-ui/react";
import { Text } from "@chakra-ui/react";
import { useEffect, useState } from "react";
import pluralize from "pluralize";
export const ErrorHandler = ({
cell,
refetchOutput,
}: {
cell: ScenarioVariantCell;
refetchOutput: () => void;
}) => {
export const RetryCountdown = ({ retryTime }: { retryTime: Date }) => {
const [msToWait, setMsToWait] = useState(0);
useEffect(() => {
if (!cell.retryTime) return;
const initialWaitTime = cell.retryTime.getTime() - Date.now();
const initialWaitTime = retryTime.getTime() - Date.now();
const msModuloOneSecond = initialWaitTime % 1000;
let remainingTime = initialWaitTime - msModuloOneSecond;
setMsToWait(remainingTime);
@@ -36,18 +27,13 @@ export const ErrorHandler = ({
clearInterval(interval);
clearTimeout(timeout);
};
}, [cell.retryTime, cell.statusCode, setMsToWait, refetchOutput]);
}, [retryTime]);
if (msToWait <= 0) return null;
return (
<VStack w="full">
<Text color="red.600" wordBreak="break-word">
{cell.errorMessage}
</Text>
{msToWait > 0 && (
<Text color="red.600" fontSize="sm">
Retrying in {pluralize("second", Math.ceil(msToWait / 1000), true)}...
</Text>
)}
</VStack>
<Text color="red.600" fontSize="sm">
Retrying in {pluralize("second", Math.ceil(msToWait / 1000), true)}...
</Text>
);
};

View File

@@ -1,15 +1,27 @@
import { type DragEvent } from "react";
import { useEffect, type DragEvent } from "react";
import { api } from "~/utils/api";
import { isEqual } from "lodash-es";
import { type Scenario } from "./types";
import { useExperiment, useExperimentAccess, useHandledAsyncCallback } from "~/utils/hooks";
import { useState } from "react";
import { Box, Button, Flex, HStack, Icon, Spinner, Stack, Tooltip, VStack } from "@chakra-ui/react";
import {
Box,
Button,
HStack,
Icon,
IconButton,
Spinner,
Stack,
Tooltip,
VStack,
Text,
} from "@chakra-ui/react";
import { cellPadding } from "../constants";
import { BsX } from "react-icons/bs";
import { BsArrowsAngleExpand, BsX } from "react-icons/bs";
import { RiDraggable } from "react-icons/ri";
import { FloatingLabelInput } from "./FloatingLabelInput";
import { ScenarioEditorModal } from "./ScenarioEditorModal";
export default function ScenarioEditor({
scenario,
@@ -28,6 +40,10 @@ export default function ScenarioEditor({
const [values, setValues] = useState<Record<string, string>>(savedValues);
useEffect(() => {
if (savedValues) setValues(savedValues);
}, [savedValues]);
const experiment = useExperiment();
const vars = api.templateVars.list.useQuery({ experimentId: experiment.data?.id ?? "" });
@@ -71,83 +87,98 @@ export default function ScenarioEditor({
[reorderMutation, scenario.id],
);
return (
<HStack
alignItems="flex-start"
px={cellPadding.x}
py={cellPadding.y}
spacing={0}
height="100%"
draggable={!variableInputHovered}
onDragStart={(e) => {
e.dataTransfer.setData("text/plain", scenario.id);
e.currentTarget.style.opacity = "0.4";
}}
onDragEnd={(e) => {
e.currentTarget.style.opacity = "1";
}}
onDragOver={(e) => {
e.preventDefault();
setIsDragTarget(true);
}}
onDragLeave={() => {
setIsDragTarget(false);
}}
onDrop={onReorder}
backgroundColor={isDragTarget ? "gray.100" : "transparent"}
>
{canModify && props.canHide && (
<Stack
alignSelf="flex-start"
opacity={props.hovered ? 1 : 0}
spacing={0}
ml={-cellPadding.x}
>
<Tooltip label="Hide scenario" hasArrow>
{/* for some reason the tooltip can't position itself properly relative to the icon without the wrapping box */}
<Button
variant="unstyled"
color="gray.400"
height="unset"
width="unset"
minW="unset"
onClick={onHide}
_hover={{
color: "gray.800",
cursor: "pointer",
}}
>
<Icon as={hidingInProgress ? Spinner : BsX} boxSize={hidingInProgress ? 4 : 6} />
</Button>
</Tooltip>
<Icon
as={RiDraggable}
boxSize={6}
color="gray.400"
_hover={{ color: "gray.800", cursor: "pointer" }}
/>
</Stack>
)}
const [scenarioEditorModalOpen, setScenarioEditorModalOpen] = useState(false);
{variableLabels.length === 0 ? (
<Box color="gray.500">{vars.data ? "No scenario variables configured" : "Loading..."}</Box>
) : (
<VStack spacing={4} flex={1} py={2}>
{variableLabels.map((key) => {
const value = values[key] ?? "";
const layoutDirection = value.length > 20 ? "column" : "row";
return (
<Flex
key={key}
direction={layoutDirection}
alignItems={layoutDirection === "column" ? "flex-start" : "center"}
flexWrap="wrap"
width="full"
return (
<>
<HStack
alignItems="flex-start"
px={cellPadding.x}
py={cellPadding.y}
spacing={0}
height="100%"
draggable={!variableInputHovered}
onDragStart={(e) => {
e.dataTransfer.setData("text/plain", scenario.id);
e.currentTarget.style.opacity = "0.4";
}}
onDragEnd={(e) => {
e.currentTarget.style.opacity = "1";
}}
onDragOver={(e) => {
e.preventDefault();
setIsDragTarget(true);
}}
onDragLeave={() => {
setIsDragTarget(false);
}}
onDrop={onReorder}
backgroundColor={isDragTarget ? "gray.100" : "transparent"}
>
{canModify && props.canHide && (
<Stack
alignSelf="flex-start"
opacity={props.hovered ? 1 : 0}
spacing={0}
ml={-cellPadding.x}
>
<Tooltip label="Hide scenario" hasArrow>
{/* for some reason the tooltip can't position itself properly relative to the icon without the wrapping box */}
<Button
variant="unstyled"
color="gray.400"
height="unset"
width="unset"
minW="unset"
onClick={onHide}
_hover={{
color: "gray.800",
cursor: "pointer",
}}
>
<Icon as={hidingInProgress ? Spinner : BsX} boxSize={hidingInProgress ? 4 : 6} />
</Button>
</Tooltip>
<Icon
as={RiDraggable}
boxSize={6}
color="gray.400"
_hover={{ color: "gray.800", cursor: "pointer" }}
/>
</Stack>
)}
{variableLabels.length === 0 ? (
<Box color="gray.500">
{vars.data ? "No scenario variables configured" : "Loading..."}
</Box>
) : (
<VStack spacing={4} flex={1} py={2}>
<HStack justifyContent="space-between" w="100%">
<Text color="gray.500">Scenario</Text>
<IconButton
className="fullscreen-toggle"
aria-label="Maximize"
icon={<BsArrowsAngleExpand />}
onClick={() => setScenarioEditorModalOpen(true)}
boxSize={6}
borderRadius={4}
p={1.5}
minW={0}
colorScheme="gray"
color="gray.500"
variant="ghost"
/>
</HStack>
{variableLabels.map((key) => {
const value = values[key] ?? "";
return (
<FloatingLabelInput
key={key}
label={key}
isDisabled={!canModify}
style={{ width: "100%" }}
maxHeight={32}
value={value}
onChange={(e) => {
setValues((prev) => ({ ...prev, [key]: e.target.value }));
@@ -162,27 +193,34 @@ export default function ScenarioEditor({
onMouseEnter={() => setVariableInputHovered(true)}
onMouseLeave={() => setVariableInputHovered(false)}
/>
</Flex>
);
})}
{hasChanged && (
<HStack justify="right">
<Button
size="sm"
onMouseDown={() => {
setValues(savedValues);
}}
colorScheme="gray"
>
Reset
</Button>
<Button size="sm" onMouseDown={onSave} colorScheme="blue">
Save
</Button>
</HStack>
)}
</VStack>
);
})}
{hasChanged && (
<HStack justify="right">
<Button
size="sm"
onMouseDown={() => {
setValues(savedValues);
}}
colorScheme="gray"
>
Reset
</Button>
<Button size="sm" onMouseDown={onSave} colorScheme="blue">
Save
</Button>
</HStack>
)}
</VStack>
)}
</HStack>
{scenarioEditorModalOpen && (
<ScenarioEditorModal
scenarioId={scenario.id}
initialValues={savedValues}
onClose={() => setScenarioEditorModalOpen(false)}
/>
)}
</HStack>
</>
);
}

View File

@@ -0,0 +1,132 @@
import {
Button,
HStack,
Icon,
Modal,
ModalBody,
ModalCloseButton,
ModalContent,
ModalFooter,
ModalHeader,
ModalOverlay,
Spinner,
Text,
VStack,
} from "@chakra-ui/react";
import { useEffect, useState } from "react";
import { BsFileTextFill } from "react-icons/bs";
import { isEqual } from "lodash-es";
import { api } from "~/utils/api";
import {
useScenario,
useHandledAsyncCallback,
useExperiment,
useExperimentAccess,
} from "~/utils/hooks";
import { FloatingLabelInput } from "./FloatingLabelInput";
export const ScenarioEditorModal = ({
scenarioId,
initialValues,
onClose,
}: {
scenarioId: string;
initialValues: Record<string, string>;
onClose: () => void;
}) => {
const utils = api.useContext();
const experiment = useExperiment();
const { canModify } = useExperimentAccess();
const scenario = useScenario(scenarioId);
const savedValues = scenario.data?.variableValues as Record<string, string>;
const [values, setValues] = useState<Record<string, string>>(initialValues);
useEffect(() => {
if (savedValues) setValues(savedValues);
}, [savedValues]);
const hasChanged = !isEqual(savedValues, values);
const mutation = api.scenarios.replaceWithValues.useMutation();
const [onSave, saving] = useHandledAsyncCallback(async () => {
await mutation.mutateAsync({
id: scenarioId,
values,
});
await utils.scenarios.list.invalidate();
}, [mutation, values]);
console.log("scenario", scenario);
const vars = api.templateVars.list.useQuery({ experimentId: experiment.data?.id ?? "" });
const variableLabels = vars.data?.map((v) => v.label) ?? [];
return (
<Modal
isOpen
onClose={onClose}
size={{ base: "xl", sm: "2xl", md: "3xl", lg: "5xl", xl: "7xl" }}
>
<ModalOverlay />
<ModalContent w={1200}>
<ModalHeader>
<HStack>
<Icon as={BsFileTextFill} />
<Text>Scenario</Text>
</HStack>
</ModalHeader>
<ModalCloseButton />
<ModalBody maxW="unset">
<VStack spacing={8}>
{values &&
variableLabels.map((key) => {
const value = values[key] ?? "";
return (
<FloatingLabelInput
key={key}
label={key}
isDisabled={!canModify}
_disabled={{ opacity: 1 }}
style={{ width: "100%" }}
value={value}
onChange={(e) => {
setValues((prev) => ({ ...prev, [key]: e.target.value }));
}}
onKeyDown={(e) => {
if (e.key === "Enter" && (e.metaKey || e.ctrlKey)) {
e.preventDefault();
e.currentTarget.blur();
onSave();
}
}}
/>
);
})}
</VStack>
</ModalBody>
<ModalFooter>
{canModify && (
<HStack>
<Button
colorScheme="gray"
onClick={() => setValues(savedValues)}
minW={24}
isDisabled={!hasChanged}
>
<Text>Reset</Text>
</Button>
<Button colorScheme="blue" onClick={onSave} minW={24} isDisabled={!hasChanged}>
{saving ? <Spinner boxSize={4} /> : <Text>Save</Text>}
</Button>
</HStack>
)}
</ModalFooter>
</ModalContent>
</Modal>
);
};

View File

@@ -21,17 +21,14 @@ export default function VariantStats(props: { variant: PromptVariant }) {
completionTokens: 0,
scenarioCount: 0,
outputCount: 0,
awaitingRetrievals: false,
awaitingEvals: false,
},
refetchInterval,
},
);
// Poll every two seconds while we are waiting for LLM retrievals to finish
useEffect(
() => setRefetchInterval(data.awaitingRetrievals ? 2000 : 0),
[data.awaitingRetrievals],
);
useEffect(() => setRefetchInterval(data.awaitingEvals ? 5000 : 0), [data.awaitingEvals]);
const [passColor, neutralColor, failColor] = useToken("colors", [
"green.500",
@@ -51,12 +48,12 @@ export default function VariantStats(props: { variant: PromptVariant }) {
fontSize="xs"
py={cellPadding.y}
>
{showNumFinished && (
<Text>
{data.outputCount} / {data.scenarioCount}
</Text>
)}
<HStack px={cellPadding.x}>
{showNumFinished && (
<Text>
{data.outputCount} / {data.scenarioCount}
</Text>
)}
{data.evalResults.map((result) => {
const passedFrac = result.passCount / result.totalCount;
return (
@@ -69,7 +66,7 @@ export default function VariantStats(props: { variant: PromptVariant }) {
);
})}
</HStack>
{data.overallCost && !data.awaitingRetrievals && (
{data.overallCost && (
<CostTooltip
promptTokens={data.promptTokens}
completionTokens={data.completionTokens}

View File

@@ -33,7 +33,7 @@ export default function OutputsTable({ experimentId }: { experimentId: string |
<Grid
pt={4}
pb={24}
pl={4}
pl={8}
display="grid"
gridTemplateColumns={`250px repeat(${variants.data.length}, minmax(300px, 1fr)) auto`}
sx={{
@@ -53,6 +53,7 @@ export default function OutputsTable({ experimentId }: { experimentId: string |
colStart: i + 2,
borderLeftWidth: i === 0 ? 1 : 0,
marginLeft: i === 0 ? "-1px" : 0,
backgroundColor: "gray.100",
};
return (
<Fragment key={variant.uiId}>

View File

@@ -1,11 +1,4 @@
import { type GridItemProps, type SystemStyleObject } from "@chakra-ui/react";
export const stickyHeaderStyle: SystemStyleObject = {
position: "sticky",
top: "0",
backgroundColor: "#fff",
zIndex: 10,
};
import { type GridItemProps } from "@chakra-ui/react";
export const borders: GridItemProps = {
borderRightWidth: 1,

View File

@@ -6,7 +6,6 @@ import { useExperimentAccess, useHandledAsyncCallback } from "~/utils/hooks";
import { HStack, Icon, Text, GridItem, type GridItemProps } from "@chakra-ui/react"; // Changed here
import { cellPadding, headerMinHeight } from "../constants";
import AutoResizeTextArea from "../AutoResizeTextArea";
import { stickyHeaderStyle } from "../OutputsTable/styles";
import VariantHeaderMenuButton from "./VariantHeaderMenuButton";
export default function VariantHeader(
@@ -53,7 +52,17 @@ export default function VariantHeader(
if (!canModify) {
return (
<GridItem padding={0} sx={stickyHeaderStyle} borderTopWidth={1} {...gridItemProps}>
<GridItem
padding={0}
sx={{
position: "sticky",
top: "0",
// Ensure that the menu always appears above the sticky header of other variants
zIndex: menuOpen ? "dropdown" : 10,
}}
borderTopWidth={1}
{...gridItemProps}
>
<Text fontSize={16} fontWeight="bold" px={cellPadding.x} py={cellPadding.y}>
{variant.label}
</Text>
@@ -65,15 +74,16 @@ export default function VariantHeader(
<GridItem
padding={0}
sx={{
...stickyHeaderStyle,
position: "sticky",
top: "0",
// Ensure that the menu always appears above the sticky header of other variants
zIndex: menuOpen ? "dropdown" : stickyHeaderStyle.zIndex,
zIndex: menuOpen ? "dropdown" : 10,
}}
borderTopWidth={1}
{...gridItemProps}
>
<HStack
spacing={4}
spacing={2}
alignItems="flex-start"
minH={headerMinHeight}
draggable={!isInputHovered}
@@ -92,7 +102,8 @@ export default function VariantHeader(
setIsDragTarget(false);
}}
onDrop={onReorder}
backgroundColor={isDragTarget ? "gray.100" : "transparent"}
backgroundColor={isDragTarget ? "gray.200" : "gray.100"}
h="full"
>
<Icon
as={RiDraggable}

View File

@@ -24,7 +24,7 @@ export const HeaderButtons = () => {
colorScheme={canModify ? undefined : "orange"}
bgColor={canModify ? undefined : "orange.400"}
minW={0}
variant={canModify ? "ghost" : "solid"}
variant={{ base: "solid", md: canModify ? "ghost" : "solid" }}
>
{isForking ? <Spinner boxSize={5} /> : <Icon as={TbGitFork} boxSize={5} />}
<Text ml={2}>Fork</Text>

View File

@@ -84,7 +84,11 @@ const NavSidebar = () => {
/>
)}
</VStack>
{user ? <UserMenu user={user} /> : <Divider />}
{user ? (
<UserMenu user={user} borderColor={"gray.200"} borderTopWidth={1} borderBottomWidth={1} />
) : (
<Divider />
)}
<VStack spacing={0} align="center">
<Link
href="https://github.com/openpipe/openpipe"

View File

@@ -8,12 +8,16 @@ import {
PopoverTrigger,
PopoverContent,
Link,
useColorMode,
type StackProps,
} from "@chakra-ui/react";
import { type Session } from "next-auth";
import { signOut } from "next-auth/react";
import { BsBoxArrowRight, BsChevronRight, BsPersonCircle } from "react-icons/bs";
export default function UserMenu({ user }: { user: Session }) {
export default function UserMenu({ user, ...rest }: { user: Session } & StackProps) {
const { colorMode } = useColorMode();
const profileImage = user.user.image ? (
<Image src={user.user.image} alt="profile picture" boxSize={8} borderRadius="50%" />
) : (
@@ -29,12 +33,10 @@ export default function UserMenu({ user }: { user: Session }) {
px={3}
spacing={3}
py={2}
borderColor={"gray.200"}
borderTopWidth={1}
borderBottomWidth={1}
{...rest}
cursor="pointer"
_hover={{
bgColor: "gray.200",
bgColor: colorMode === "light" ? "gray.200" : "gray.700",
}}
>
{profileImage}

View File

@@ -18,6 +18,7 @@ export const env = createEnv({
GITHUB_CLIENT_SECRET: z.string().min(1),
OPENAI_API_KEY: z.string().min(1),
REPLICATE_API_TOKEN: z.string().default("placeholder"),
ANTHROPIC_API_KEY: z.string().default("placeholder"),
},
/**
@@ -44,6 +45,7 @@ export const env = createEnv({
GITHUB_CLIENT_ID: process.env.GITHUB_CLIENT_ID,
GITHUB_CLIENT_SECRET: process.env.GITHUB_CLIENT_SECRET,
REPLICATE_API_TOKEN: process.env.REPLICATE_API_TOKEN,
ANTHROPIC_API_KEY: process.env.ANTHROPIC_API_KEY,
},
/**
* Run `build` or `dev` with `SKIP_ENV_VALIDATION` to skip env validation.

View File

@@ -0,0 +1,69 @@
/* eslint-disable @typescript-eslint/no-var-requires */
import YAML from "yaml";
import fs from "fs";
import path from "path";
import { openapiSchemaToJsonSchema } from "@openapi-contrib/openapi-schema-to-json-schema";
import $RefParser from "@apidevtools/json-schema-ref-parser";
import { type JSONObject } from "superjson/dist/types";
import assert from "assert";
import { type JSONSchema4Object } from "json-schema";
import { isObject } from "lodash-es";
// @ts-expect-error for some reason missing from types
import parserEstree from "prettier/plugins/estree";
import parserBabel from "prettier/plugins/babel";
import prettier from "prettier/standalone";
const OPENAPI_URL =
"https://raw.githubusercontent.com/tryAGI/Anthropic/1c0871e861de60a4c3a843cb90e17d63e86c234a/docs/openapi.yaml";
// Fetch the openapi document
const response = await fetch(OPENAPI_URL);
const openApiYaml = await response.text();
// Parse the yaml document
let schema = YAML.parse(openApiYaml) as JSONObject;
schema = openapiSchemaToJsonSchema(schema);
const jsonSchema = await $RefParser.dereference(schema);
assert("components" in jsonSchema);
const completionRequestSchema = jsonSchema.components.schemas
.CreateCompletionRequest as JSONSchema4Object;
// We need to do a bit of surgery here since the Monaco editor doesn't like
// the fact that the schema says `model` can be either a string or an enum,
// and displays a warning in the editor. Let's stick with just an enum for
// now and drop the string option.
assert(
"properties" in completionRequestSchema &&
isObject(completionRequestSchema.properties) &&
"model" in completionRequestSchema.properties &&
isObject(completionRequestSchema.properties.model),
);
const modelProperty = completionRequestSchema.properties.model;
assert(
"oneOf" in modelProperty &&
Array.isArray(modelProperty.oneOf) &&
modelProperty.oneOf.length === 2 &&
isObject(modelProperty.oneOf[1]) &&
"enum" in modelProperty.oneOf[1],
"Expected model to have oneOf length of 2",
);
modelProperty.type = "string";
modelProperty.enum = modelProperty.oneOf[1].enum;
delete modelProperty["oneOf"];
// Get the directory of the current script
const currentDirectory = path.dirname(import.meta.url).replace("file://", "");
// Write the JSON schema to a file in the current directory
fs.writeFileSync(
path.join(currentDirectory, "input.schema.json"),
await prettier.format(JSON.stringify(completionRequestSchema, null, 2), {
parser: "json",
plugins: [parserBabel, parserEstree],
}),
);

View File

@@ -0,0 +1,129 @@
{
"type": "object",
"properties": {
"model": {
"description": "The model that will complete your prompt.\nAs we improve Claude, we develop new versions of it that you can query.\nThis parameter controls which version of Claude answers your request.\nRight now we are offering two model families: Claude, and Claude Instant.\nYou can use them by setting model to \"claude-2\" or \"claude-instant-1\", respectively.\nSee models for additional details.\n",
"x-oaiTypeLabel": "string",
"type": "string",
"enum": [
"claude-2",
"claude-2.0",
"claude-instant-1",
"claude-instant-1.1"
]
},
"prompt": {
"description": "The prompt that you want Claude to complete.\n\nFor proper response generation you will need to format your prompt as follows:\n\\n\\nHuman: ${userQuestion}\\n\\nAssistant:\nSee our comments on prompts for more context.\n",
"default": "<|endoftext|>",
"nullable": true,
"oneOf": [
{
"type": "string",
"default": "",
"example": "This is a test."
},
{
"type": "array",
"items": {
"type": "string",
"default": "",
"example": "This is a test."
}
},
{
"type": "array",
"minItems": 1,
"items": {
"type": "integer"
},
"example": "[1212, 318, 257, 1332, 13]"
},
{
"type": "array",
"minItems": 1,
"items": {
"type": "array",
"minItems": 1,
"items": {
"type": "integer"
}
},
"example": "[[1212, 318, 257, 1332, 13]]"
}
]
},
"max_tokens_to_sample": {
"type": "integer",
"minimum": 1,
"default": 256,
"example": 256,
"nullable": true,
"description": "The maximum number of tokens to generate before stopping.\n\nNote that our models may stop before reaching this maximum. This parameter only specifies the absolute maximum number of tokens to generate.\n"
},
"temperature": {
"type": "number",
"minimum": 0,
"maximum": 1,
"default": 1,
"example": 1,
"nullable": true,
"description": "Amount of randomness injected into the response.\n\nDefaults to 1. Ranges from 0 to 1. Use temp closer to 0 for analytical / multiple choice, and closer to 1 for creative and generative tasks.\n"
},
"top_p": {
"type": "number",
"minimum": 0,
"maximum": 1,
"default": 1,
"example": 1,
"nullable": true,
"description": "Use nucleus sampling.\n\nIn nucleus sampling, we compute the cumulative distribution over all the options \nfor each subsequent token in decreasing probability order and cut it off once \nit reaches a particular probability specified by top_p. You should either alter temperature or top_p, but not both.\n"
},
"top_k": {
"type": "number",
"minimum": 0,
"default": 5,
"example": 5,
"nullable": true,
"description": "Only sample from the top K options for each subsequent token.\n\nUsed to remove \"long tail\" low probability responses. Learn more technical details here.\n"
},
"stream": {
"description": "Whether to incrementally stream the response using server-sent events.\nSee this guide to SSE events for details.type: boolean\n",
"nullable": true,
"default": false
},
"stop_sequences": {
"description": "Sequences that will cause the model to stop generating completion text.\nOur models stop on \"\\n\\nHuman:\", and may include additional built-in stop sequences in the future. By providing the stop_sequences parameter, you may include additional strings that will cause the model to stop generating.\n",
"default": null,
"nullable": true,
"oneOf": [
{
"type": "string",
"default": "<|endoftext|>",
"example": "\n",
"nullable": true
},
{
"type": "array",
"minItems": 1,
"maxItems": 4,
"items": {
"type": "string",
"example": "[\"\\n\"]"
}
}
]
},
"metadata": {
"type": "object",
"properties": {
"user_id": {
"type": "string",
"example": "13803d75-b4b5-4c3e-b2a2-6f21399b021b",
"description": "An external identifier for the user who is associated with the request.\n\nThis should be a uuid, hash value, or other opaque identifier. Anthropic may use this id to help detect abuse. \nDo not include any identifying information such as name, email address, or phone number.\n"
}
},
"description": "An object describing metadata about the request.\n"
}
},
"required": ["model", "prompt", "max_tokens_to_sample"]
}

View File

@@ -0,0 +1,40 @@
import { type Completion } from "@anthropic-ai/sdk/resources";
import { type SupportedModel } from ".";
import { type FrontendModelProvider } from "../types";
import { refinementActions } from "./refinementActions";
const frontendModelProvider: FrontendModelProvider<SupportedModel, Completion> = {
name: "Replicate Llama2",
models: {
"claude-2.0": {
name: "Claude 2.0",
contextWindow: 100000,
promptTokenPrice: 11.02 / 1000000,
completionTokenPrice: 32.68 / 1000000,
speed: "medium",
provider: "anthropic",
learnMoreUrl: "https://www.anthropic.com/product",
},
"claude-instant-1.1": {
name: "Claude Instant 1.1",
contextWindow: 100000,
promptTokenPrice: 1.63 / 1000000,
completionTokenPrice: 5.51 / 1000000,
speed: "fast",
provider: "anthropic",
learnMoreUrl: "https://www.anthropic.com/product",
},
},
refinementActions,
normalizeOutput: (output) => {
return {
type: "text",
value: output.completion,
};
},
};
export default frontendModelProvider;

View File

@@ -0,0 +1,86 @@
import { env } from "~/env.mjs";
import { type CompletionResponse } from "../types";
import Anthropic, { APIError } from "@anthropic-ai/sdk";
import { type Completion, type CompletionCreateParams } from "@anthropic-ai/sdk/resources";
import { isObject, isString } from "lodash-es";
const anthropic = new Anthropic({
apiKey: env.ANTHROPIC_API_KEY,
});
export async function getCompletion(
input: CompletionCreateParams,
onStream: ((partialOutput: Completion) => void) | null,
): Promise<CompletionResponse<Completion>> {
const start = Date.now();
let finalCompletion: Completion | null = null;
try {
if (onStream) {
const resp = await anthropic.completions.create(
{ ...input, stream: true },
{
maxRetries: 0,
},
);
for await (const part of resp) {
if (finalCompletion === null) {
finalCompletion = part;
} else {
finalCompletion = { ...part, completion: finalCompletion.completion + part.completion };
}
onStream(finalCompletion);
}
if (!finalCompletion) {
return {
type: "error",
message: "Streaming failed to return a completion",
autoRetry: false,
};
}
} else {
const resp = await anthropic.completions.create(
{ ...input, stream: false },
{
maxRetries: 0,
},
);
finalCompletion = resp;
}
const timeToComplete = Date.now() - start;
return {
type: "success",
statusCode: 200,
value: finalCompletion,
timeToComplete,
};
} catch (error: unknown) {
console.log("CAUGHT ERROR", error);
if (error instanceof APIError) {
const message =
isObject(error.error) &&
"error" in error.error &&
isObject(error.error.error) &&
"message" in error.error.error &&
isString(error.error.error.message)
? error.error.error.message
: error.message;
return {
type: "error",
message,
autoRetry: error.status === 429 || error.status === 503,
statusCode: error.status,
};
} else {
return {
type: "error",
message: (error as Error).message,
autoRetry: true,
};
}
}
}

View File

@@ -0,0 +1,34 @@
import { type JSONSchema4 } from "json-schema";
import { type ModelProvider } from "../types";
import inputSchema from "./codegen/input.schema.json";
import { getCompletion } from "./getCompletion";
import frontendModelProvider from "./frontend";
import type { Completion, CompletionCreateParams } from "@anthropic-ai/sdk/resources";
const supportedModels = ["claude-2.0", "claude-instant-1.1"] as const;
export type SupportedModel = (typeof supportedModels)[number];
export type AnthropicProvider = ModelProvider<SupportedModel, CompletionCreateParams, Completion>;
const modelProvider: AnthropicProvider = {
getModel: (input) => {
if (supportedModels.includes(input.model as SupportedModel))
return input.model as SupportedModel;
const modelMaps: Record<string, SupportedModel> = {
"claude-2": "claude-2.0",
"claude-instant-1": "claude-instant-1.1",
};
if (input.model in modelMaps) return modelMaps[input.model] as SupportedModel;
return null;
},
inputSchema: inputSchema as JSONSchema4,
canStream: true,
getCompletion,
...frontendModelProvider,
};
export default modelProvider;

View File

@@ -0,0 +1,3 @@
import { type RefinementAction } from "../types";
export const refinementActions: Record<string, RefinementAction> = {};

View File

@@ -1,15 +1,15 @@
import openaiChatCompletionFrontend from "./openai-ChatCompletion/frontend";
import replicateLlama2Frontend from "./replicate-llama2/frontend";
import anthropicFrontend from "./anthropic/frontend";
import { type SupportedProvider, type FrontendModelProvider } from "./types";
// TODO: make sure we get a typescript error if you forget to add a provider here
// Keep attributes here that need to be accessible from the frontend. We can't
// just include them in the default `modelProviders` object because it has some
// transient dependencies that can only be imported on the server.
const frontendModelProviders: Record<SupportedProvider, FrontendModelProvider<any, any>> = {
"openai/ChatCompletion": openaiChatCompletionFrontend,
"replicate/llama2": replicateLlama2Frontend,
anthropic: anthropicFrontend,
};
export default frontendModelProviders;

View File

@@ -1,10 +1,12 @@
import openaiChatCompletion from "./openai-ChatCompletion";
import replicateLlama2 from "./replicate-llama2";
import anthropic from "./anthropic";
import { type SupportedProvider, type ModelProvider } from "./types";
const modelProviders: Record<SupportedProvider, ModelProvider<any, any, any>> = {
"openai/ChatCompletion": openaiChatCompletion,
"replicate/llama2": replicateLlama2,
anthropic,
};
export default modelProviders;

View File

@@ -120,7 +120,6 @@ export async function getCompletion(
cost,
};
} catch (error: unknown) {
console.error("ERROR IS", error);
if (error instanceof APIError) {
return {
type: "error",

View File

@@ -10,7 +10,7 @@ const replicate = new Replicate({
const modelIds: Record<ReplicateLlama2Input["model"], string> = {
"7b-chat": "5ec5fdadd80ace49f5a2b2178cceeb9f2f77c493b85b1131002c26e6b2b13184",
"13b-chat": "6b4da803a2382c08868c5af10a523892f38e2de1aafb2ee55b020d9efef2fdb8",
"70b-chat": "2d19859030ff705a87c746f7e96eea03aefb71f166725aee39692f1476566d48",
"70b-chat": "2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1",
};
export async function getCompletion(

View File

@@ -6,6 +6,7 @@ import { z } from "zod";
export const ZodSupportedProvider = z.union([
z.literal("openai/ChatCompletion"),
z.literal("replicate/llama2"),
z.literal("anthropic"),
]);
export type SupportedProvider = z.infer<typeof ZodSupportedProvider>;

View File

@@ -21,6 +21,17 @@ const MyApp: AppType<{ session: Session | null }> = ({
name="viewport"
content="width=device-width, initial-scale=1, maximum-scale=1, user-scalable=0"
/>
<meta name="og:title" content="OpenPipe: Open-Source Lab for LLMs" key="title" />
<meta
name="og:description"
content="OpenPipe is a powerful playground for quickly optimizing performance, cost, and speed across models."
key="description"
/>
<meta name="og:image" content="/og.png" key="og-image" />
<meta property="og:image:height" content="630" />
<meta property="og:image:width" content="1200" />
<meta name="twitter:card" content="summary_large_image" />
<meta name="twitter:image" content="/og.png" />
</Head>
<SessionProvider session={session}>
<SyncAppStore />

View File

@@ -0,0 +1,81 @@
import { ImageResponse } from "@vercel/og";
import { type NextApiRequest, type NextApiResponse } from "next";
export const config = {
runtime: "experimental-edge",
};
const inconsolataRegularFontP = fetch(
new URL("../../../../public/fonts/Inconsolata_SemiExpanded-Medium.ttf", import.meta.url),
).then((res) => res.arrayBuffer());
const OgImage = async (req: NextApiRequest, res: NextApiResponse) => {
// @ts-expect-error - nextUrl is not defined on NextApiRequest for some reason
const searchParams = req.nextUrl?.searchParams as URLSearchParams;
const experimentLabel = searchParams.get("experimentLabel");
const variantsCount = searchParams.get("variantsCount");
const scenariosCount = searchParams.get("scenariosCount");
const inconsolataRegularFont = await inconsolataRegularFontP;
return new ImageResponse(
(
<div
style={{
width: "100%",
height: "100%",
display: "flex",
flexDirection: "column",
alignItems: "center",
justifyContent: "center",
fontSize: 48,
padding: "48px",
background: "white",
position: "relative",
}}
>
<div
style={{
position: "absolute",
top: 0,
left: 0,
display: "flex",
alignItems: "center",
padding: 48,
}}
>
{/* eslint-disable-next-line @next/next/no-img-element */}
<img
src="https://app.openpipe.ai/logo.svg"
alt="OpenPipe Logo"
height={100}
width={120}
/>
<div style={{ marginLeft: 24, fontSize: 64, fontFamily: "Inconsolata" }}>OpenPipe</div>
</div>
<div style={{ display: "flex", fontSize: 72, marginTop: 108 }}>{experimentLabel}</div>
<div style={{ display: "flex", flexDirection: "column", marginTop: 36 }}>
<div style={{ display: "flex" }}>
<span style={{ width: 320 }}>Variants:</span> {variantsCount}
</div>
<div style={{ display: "flex", marginTop: 24 }}>
<span style={{ width: 320 }}>Scenarios:</span> {scenariosCount}
</div>
</div>
</div>
),
{
fonts: [
{
name: "inconsolata",
data: inconsolataRegularFont,
style: "normal",
weight: 400,
},
],
},
);
};
export default OgImage;

View File

@@ -22,13 +22,42 @@ import { useExperiment, useHandledAsyncCallback } from "~/utils/hooks";
import { useAppStore } from "~/state/store";
import { useSyncVariantEditor } from "~/state/sync";
import { HeaderButtons } from "~/components/experiments/HeaderButtons/HeaderButtons";
import Head from "next/head";
// TODO: import less to fix deployment with server side props
// export const getServerSideProps = async (context: GetServerSidePropsContext<{ id: string }>) => {
// const experimentId = context.params?.id as string;
// const helpers = createServerSideHelpers({
// router: appRouter,
// ctx: createInnerTRPCContext({ session: null }),
// transformer: superjson, // optional - adds superjson serialization
// });
// // prefetch query
// await helpers.experiments.stats.prefetch({ id: experimentId });
// return {
// props: {
// trpcState: helpers.dehydrate(),
// },
// };
// };
export default function Experiment() {
const router = useRouter();
const experiment = useExperiment();
const utils = api.useContext();
useSyncVariantEditor();
const experiment = useExperiment();
const experimentStats = api.experiments.stats.useQuery(
{ id: router.query.id as string },
{
enabled: !!router.query.id,
},
);
const stats = experimentStats.data;
useEffect(() => {
useAppStore.getState().sharedVariantEditor.loadMonaco().catch(console.error);
});
@@ -62,53 +91,65 @@ export default function Experiment() {
const canModify = experiment.data?.access.canModify ?? false;
return (
<AppShell title={experiment.data?.label}>
<VStack h="full">
<Flex
px={4}
py={2}
w="full"
direction={{ base: "column", sm: "row" }}
alignItems={{ base: "flex-start", sm: "center" }}
>
<Breadcrumb flex={1}>
<BreadcrumbItem>
<Link href="/experiments">
<Flex alignItems="center" _hover={{ textDecoration: "underline" }}>
<Icon as={RiFlaskLine} boxSize={4} mr={2} /> Experiments
</Flex>
</Link>
</BreadcrumbItem>
<BreadcrumbItem isCurrentPage>
{canModify ? (
<Input
size="sm"
value={label}
onChange={(e) => setLabel(e.target.value)}
onBlur={onSaveLabel}
borderWidth={1}
borderColor="transparent"
fontSize={16}
px={0}
minW={{ base: 100, lg: 300 }}
flex={1}
_hover={{ borderColor: "gray.300" }}
_focus={{ borderColor: "blue.500", outline: "none" }}
/>
) : (
<Text fontSize={16} px={0} minW={{ base: 100, lg: 300 }} flex={1}>
{experiment.data?.label}
</Text>
)}
</BreadcrumbItem>
</Breadcrumb>
<HeaderButtons />
</Flex>
<ExperimentSettingsDrawer />
<Box w="100%" overflowX="auto" flex={1}>
<OutputsTable experimentId={router.query.id as string | undefined} />
</Box>
</VStack>
</AppShell>
<>
{stats && (
<Head>
<meta property="og:title" content={stats.experimentLabel} key="title" />
<meta
property="og:image"
content={`/api/experiments/og-image?experimentLabel=${stats.experimentLabel}&variantsCount=${stats.promptVariantCount}&scenariosCount=${stats.testScenarioCount}`}
key="og-image"
/>
</Head>
)}
<AppShell title={experiment.data?.label}>
<VStack h="full">
<Flex
px={4}
py={2}
w="full"
direction={{ base: "column", sm: "row" }}
alignItems={{ base: "flex-start", sm: "center" }}
>
<Breadcrumb flex={1}>
<BreadcrumbItem>
<Link href="/experiments">
<Flex alignItems="center" _hover={{ textDecoration: "underline" }}>
<Icon as={RiFlaskLine} boxSize={4} mr={2} /> Experiments
</Flex>
</Link>
</BreadcrumbItem>
<BreadcrumbItem isCurrentPage>
{canModify ? (
<Input
size="sm"
value={label}
onChange={(e) => setLabel(e.target.value)}
onBlur={onSaveLabel}
borderWidth={1}
borderColor="transparent"
fontSize={16}
px={0}
minW={{ base: 100, lg: 300 }}
flex={1}
_hover={{ borderColor: "gray.300" }}
_focus={{ borderColor: "blue.500", outline: "none" }}
/>
) : (
<Text fontSize={16} px={0} minW={{ base: 100, lg: 300 }} flex={1}>
{experiment.data?.label}
</Text>
)}
</BreadcrumbItem>
</Breadcrumb>
<HeaderButtons />
</Flex>
<ExperimentSettingsDrawer />
<Box w="100%" overflowX="auto" flex={1}>
<OutputsTable experimentId={router.query.id as string | undefined} />
</Box>
</VStack>
</AppShell>
</>
);
}

View File

@@ -0,0 +1,15 @@
import { type GetServerSideProps } from "next";
// eslint-disable-next-line @typescript-eslint/require-await
export const getServerSideProps: GetServerSideProps = async () => {
return {
redirect: {
destination: "/world-champs/signup",
permanent: false,
},
};
};
export default function WorldChamps() {
return null;
}

View File

@@ -0,0 +1,201 @@
import {
Box,
type BoxProps,
Button,
DarkMode,
GlobalStyle,
HStack,
Heading,
Icon,
Link,
Table,
Tbody,
Td,
Text,
type TextProps,
Th,
Tr,
VStack,
useInterval,
} from "@chakra-ui/react";
import { signIn, useSession } from "next-auth/react";
import Head from "next/head";
import { useCallback, useState } from "react";
import { BsGithub } from "react-icons/bs";
import UserMenu from "~/components/nav/UserMenu";
import { api } from "~/utils/api";
import dayjs from "~/utils/dayjs";
import { useHandledAsyncCallback } from "~/utils/hooks";
// Shows how long until the competition starts. Refreshes every second
function CountdownTimer(props: { date: Date } & TextProps) {
const [now, setNow] = useState(dayjs(0));
useInterval(() => {
setNow(dayjs());
}, 1000);
const { date, ...rest } = props;
const kickoff = dayjs(props.date);
const diff = kickoff.diff(now, "second");
const days = Math.floor(diff / 86400);
const hours = Math.floor((diff % 86400) / 3600);
const minutes = Math.floor((diff % 3600) / 60);
const seconds = Math.floor(diff % 60);
return (
<Text {...rest}>
<Text as="span" fontWeight="bold">
Kickoff in
</Text>{" "}
{days}d {hours}h {minutes}m {seconds}s
</Text>
);
}
function ApplicationStatus(props: BoxProps) {
const user = useSession().data;
const entrant = api.worldChamps.userStatus.useQuery().data;
const applyMutation = api.worldChamps.apply.useMutation();
const utils = api.useContext();
const [onSignIn] = useHandledAsyncCallback(async () => {
await signIn("github");
}, []);
const [onApply] = useHandledAsyncCallback(async () => {
await applyMutation.mutateAsync();
await utils.worldChamps.userStatus.invalidate();
}, []);
const Wrapper = useCallback(
(wrapperProps: BoxProps) => (
<Box {...props} {...wrapperProps} minH="120px" alignItems="center" justifyItems="center" />
),
[props],
);
if (user === null) {
return (
<Wrapper>
<Button onClick={onSignIn} colorScheme="orange" leftIcon={<Icon as={BsGithub} />}>
Connect GitHub to apply
</Button>
</Wrapper>
);
} else if (user) {
return (
<Wrapper>
<HStack spacing={8}>
<UserMenu user={user} borderRadius={2} borderColor={"gray.700"} borderWidth={1} pr={6} />
<Box flex={1}>
{entrant?.approved ? (
<Text fontSize="sm">
You're accepted! We'll send you more details before August 14th.
</Text>
) : entrant ? (
<Text fontSize="sm">
Application submitted successfully! We'll notify you by email before August 14th.
</Text>
) : (
<Button onClick={onApply} colorScheme="orange">
Apply to compete
</Button>
)}
</Box>
</HStack>
</Wrapper>
);
}
return <Wrapper />;
}
export default function Signup() {
return (
<DarkMode>
<GlobalStyle />
<Head>
<title>🏆 Prompt Engineering World Championships</title>
<meta property="og:title" content="🏆 Prompt Engineering World Championships" key="title" />
<meta
property="og:description"
content="Think you have what it takes to be the best? Compete with the world's top prompt engineers and see where you rank!"
key="description"
/>
</Head>
<Box bgColor="gray.900" color="gray.200" minH="100vh" w="full">
<VStack mx="auto" py={24} maxW="2xl" align="start" fontSize="lg">
<Heading size="lg">🏆 Prompt Engineering World Championships</Heading>
<CountdownTimer
date={new Date("2023-08-14T00:00:00Z")}
fontSize="2xl"
alignSelf="center"
color="gray.500"
/>
<ApplicationStatus py={8} alignSelf="center" />
<Text fontSize="lg">
Think you have what it takes to be the best? Compete with the world's top prompt
engineers and see where you rank!
</Text>
<Heading size="lg" pt={12} alignSelf="left">
Event Details
</Heading>
<Table variant="simple">
<Tbody>
<Tr>
<Th>Kickoff</Th>
<Td>August 14</Td>
</Tr>
<Tr>
<Th>Prize</Th>
<Td>$15,000 grand prize + smaller category prizes.</Td>
</Tr>
<Tr>
<Th>Events</Th>
<Td>
Optimize prompts for multiple tasks selected from academic benchmarks and
real-world applications.
</Td>
</Tr>
<Tr>
<Th>Models</Th>
<Td>Separate "weight classes" for GPT 3.5, Claude Instant, and Llama 2.</Td>
</Tr>
<Tr>
<Th>Qualifications</Th>
<Td>Open to entrants with any level of experience.</Td>
</Tr>
<Tr>
<Th>Certificates</Th>
<Td>Certificate of mastery for all qualifying participants.</Td>
</Tr>
<Tr>
<Th>Cost</Th>
<Td>
<strong>Free</strong>. We'll cover your inference budget.
</Td>
</Tr>
<Tr>
<Th>Questions?</Th>
<Td>
<Link href="mailto:world-champs@openpipe.ai" textDecor="underline">
Email us
</Link>{" "}
with any follow-up questions!
</Td>
</Tr>
</Tbody>
</Table>
</VStack>
</Box>
</DarkMode>
);
}

View File

@@ -5,6 +5,7 @@ import { scenariosRouter } from "./routers/scenarios.router";
import { scenarioVariantCellsRouter } from "./routers/scenarioVariantCells.router";
import { templateVarsRouter } from "./routers/templateVariables.router";
import { evaluationsRouter } from "./routers/evaluations.router";
import { worldChampsRouter } from "./routers/worldChamps.router";
/**
* This is the primary router for your server.
@@ -18,6 +19,7 @@ export const appRouter = createTRPCRouter({
scenarioVariantCells: scenarioVariantCellsRouter,
templateVars: templateVarsRouter,
evaluations: evaluationsRouter,
worldChamps: worldChampsRouter,
});
// export type definition of API

View File

@@ -2,7 +2,7 @@ import { EvalType } from "@prisma/client";
import { z } from "zod";
import { createTRPCRouter, protectedProcedure, publicProcedure } from "~/server/api/trpc";
import { prisma } from "~/server/db";
import { runAllEvals } from "~/server/utils/evaluations";
import { queueRunNewEval } from "~/server/tasks/runNewEval.task";
import { requireCanModifyExperiment, requireCanViewExperiment } from "~/utils/accessControl";
export const evaluationsRouter = createTRPCRouter({
@@ -40,9 +40,7 @@ export const evaluationsRouter = createTRPCRouter({
},
});
// TODO: this may be a bad UX for slow evals (eg. GPT-4 evals) Maybe need
// to kick off a background job or something instead
await runAllEvals(input.experimentId);
await queueRunNewEval(input.experimentId);
}),
update: protectedProcedure
@@ -78,7 +76,7 @@ export const evaluationsRouter = createTRPCRouter({
});
// Re-run all evals. Other eval results will already be cached, so this
// should only re-run the updated one.
await runAllEvals(evaluation.experimentId);
await queueRunNewEval(experimentId);
}),
delete: protectedProcedure

View File

@@ -15,6 +15,33 @@ import userOrg from "~/server/utils/userOrg";
import generateTypes from "~/modelProviders/generateTypes";
export const experimentsRouter = createTRPCRouter({
stats: publicProcedure.input(z.object({ id: z.string() })).query(async ({ input, ctx }) => {
await requireCanViewExperiment(input.id, ctx);
const [experiment, promptVariantCount, testScenarioCount] = await prisma.$transaction([
prisma.experiment.findFirstOrThrow({
where: { id: input.id },
}),
prisma.promptVariant.count({
where: {
experimentId: input.id,
visible: true,
},
}),
prisma.testScenario.count({
where: {
experimentId: input.id,
visible: true,
},
}),
]);
return {
experimentLabel: experiment.label,
promptVariantCount,
testScenarioCount,
};
}),
list: protectedProcedure.query(async ({ ctx }) => {
// Anyone can list experiments
requireNothing(ctx);
@@ -118,7 +145,7 @@ export const experimentsRouter = createTRPCRouter({
},
},
include: {
modelOutput: {
modelResponses: {
include: {
outputEvaluations: true,
},
@@ -177,11 +204,11 @@ export const experimentsRouter = createTRPCRouter({
}
const cellsToCreate: Prisma.ScenarioVariantCellCreateManyInput[] = [];
const modelOutputsToCreate: Prisma.ModelOutputCreateManyInput[] = [];
const modelResponsesToCreate: Prisma.ModelResponseCreateManyInput[] = [];
const outputEvaluationsToCreate: Prisma.OutputEvaluationCreateManyInput[] = [];
for (const cell of existingCells) {
const newCellId = uuidv4();
const { modelOutput, ...cellData } = cell;
const { modelResponses, ...cellData } = cell;
cellsToCreate.push({
...cellData,
id: newCellId,
@@ -189,20 +216,20 @@ export const experimentsRouter = createTRPCRouter({
testScenarioId: existingToNewScenarioIds.get(cell.testScenarioId) ?? "",
prompt: (cell.prompt as Prisma.InputJsonValue) ?? undefined,
});
if (modelOutput) {
const newModelOutputId = uuidv4();
const { outputEvaluations, ...modelOutputData } = modelOutput;
modelOutputsToCreate.push({
...modelOutputData,
id: newModelOutputId,
for (const modelResponse of modelResponses) {
const newModelResponseId = uuidv4();
const { outputEvaluations, ...modelResponseData } = modelResponse;
modelResponsesToCreate.push({
...modelResponseData,
id: newModelResponseId,
scenarioVariantCellId: newCellId,
output: (modelOutput.output as Prisma.InputJsonValue) ?? undefined,
output: (modelResponse.output as Prisma.InputJsonValue) ?? undefined,
});
for (const evaluation of outputEvaluations) {
outputEvaluationsToCreate.push({
...evaluation,
id: uuidv4(),
modelOutputId: newModelOutputId,
modelResponseId: newModelResponseId,
evaluationId: existingToNewEvaluationIds.get(evaluation.evaluationId) ?? "",
});
}
@@ -245,8 +272,8 @@ export const experimentsRouter = createTRPCRouter({
prisma.scenarioVariantCell.createMany({
data: cellsToCreate,
}),
prisma.modelOutput.createMany({
data: modelOutputsToCreate,
prisma.modelResponse.createMany({
data: modelResponsesToCreate,
}),
prisma.evaluation.createMany({
data: evaluationsToCreate,

View File

@@ -1,6 +1,7 @@
import { z } from "zod";
import { createTRPCRouter, protectedProcedure, publicProcedure } from "~/server/api/trpc";
import { prisma } from "~/server/db";
import { Prisma } from "@prisma/client";
import { generateNewCell } from "~/server/utils/generateNewCell";
import userError from "~/server/utils/error";
import { recordExperimentUpdated } from "~/server/utils/recordExperimentUpdated";
@@ -51,7 +52,9 @@ export const promptVariantsRouter = createTRPCRouter({
id: true,
},
where: {
modelOutput: {
modelResponse: {
outdated: false,
output: { not: Prisma.AnyNull },
scenarioVariantCell: {
promptVariant: {
id: input.variantId,
@@ -93,14 +96,23 @@ export const promptVariantsRouter = createTRPCRouter({
where: {
promptVariantId: input.variantId,
testScenario: { visible: true },
modelOutput: {
is: {},
modelResponses: {
some: {
outdated: false,
output: {
not: Prisma.AnyNull,
},
},
},
},
});
const overallTokens = await prisma.modelOutput.aggregate({
const overallTokens = await prisma.modelResponse.aggregate({
where: {
outdated: false,
output: {
not: Prisma.AnyNull,
},
scenarioVariantCell: {
promptVariantId: input.variantId,
testScenario: {
@@ -118,16 +130,9 @@ export const promptVariantsRouter = createTRPCRouter({
const promptTokens = overallTokens._sum?.promptTokens ?? 0;
const completionTokens = overallTokens._sum?.completionTokens ?? 0;
const awaitingRetrievals = !!(await prisma.scenarioVariantCell.findFirst({
where: {
promptVariantId: input.variantId,
testScenario: { visible: true },
// Check if is PENDING or IN_PROGRESS
retrievalStatus: {
in: ["PENDING", "IN_PROGRESS"],
},
},
}));
const awaitingEvals = !!evalResults.find(
(result) => result.totalCount < scenarioCount * evals.length,
);
return {
evalResults,
@@ -136,7 +141,7 @@ export const promptVariantsRouter = createTRPCRouter({
overallCost: overallTokens._sum?.cost ?? 0,
scenarioCount,
outputCount,
awaitingRetrievals,
awaitingEvals,
};
}),

View File

@@ -19,27 +19,45 @@ export const scenarioVariantCellsRouter = createTRPCRouter({
});
await requireCanViewExperiment(experimentId, ctx);
return await prisma.scenarioVariantCell.findUnique({
where: {
promptVariantId_testScenarioId: {
promptVariantId: input.variantId,
testScenarioId: input.scenarioId,
const [cell, numTotalEvals] = await prisma.$transaction([
prisma.scenarioVariantCell.findUnique({
where: {
promptVariantId_testScenarioId: {
promptVariantId: input.variantId,
testScenarioId: input.scenarioId,
},
},
},
include: {
modelOutput: {
include: {
outputEvaluations: {
include: {
evaluation: {
select: { label: true },
include: {
modelResponses: {
where: {
outdated: false,
},
include: {
outputEvaluations: {
include: {
evaluation: {
select: { label: true },
},
},
},
},
},
},
},
});
}),
prisma.evaluation.count({
where: { experimentId },
}),
]);
if (!cell) return null;
const lastResponse = cell.modelResponses?.[cell.modelResponses?.length - 1];
const evalsComplete = lastResponse?.outputEvaluations?.length === numTotalEvals;
return {
...cell,
evalsComplete,
};
}),
forceRefetch: protectedProcedure
.input(
@@ -62,7 +80,6 @@ export const scenarioVariantCellsRouter = createTRPCRouter({
testScenarioId: input.scenarioId,
},
},
include: { modelOutput: true },
});
if (!cell) {
@@ -70,12 +87,12 @@ export const scenarioVariantCellsRouter = createTRPCRouter({
return;
}
if (cell.modelOutput) {
// TODO: Maybe keep these around to show previous generations?
await prisma.modelOutput.delete({
where: { id: cell.modelOutput.id },
});
}
await prisma.modelResponse.updateMany({
where: { scenarioVariantCellId: cell.id },
data: {
outdated: true,
},
});
await queueQueryModel(cell.id, true);
}),

View File

@@ -41,7 +41,21 @@ export const scenariosRouter = createTRPCRouter({
count,
};
}),
get: protectedProcedure.input(z.object({ id: z.string() })).query(async ({ input, ctx }) => {
const scenario = await prisma.testScenario.findUnique({
where: {
id: input.id,
},
});
if (!scenario) {
throw new Error(`Scenario with id ${input.id} does not exist`);
}
await requireCanViewExperiment(scenario.experimentId, ctx);
return scenario;
}),
create: protectedProcedure
.input(
z.object({

View File

@@ -0,0 +1,36 @@
import { createTRPCRouter, protectedProcedure, publicProcedure } from "~/server/api/trpc";
import { prisma } from "~/server/db";
import { requireNothing } from "~/utils/accessControl";
export const worldChampsRouter = createTRPCRouter({
userStatus: publicProcedure.query(async ({ input, ctx }) => {
const userId = ctx.session?.user.id;
if (!userId) {
return null;
}
return await prisma.worldChampEntrant.findUnique({
where: { userId },
});
}),
apply: protectedProcedure.mutation(async ({ ctx }) => {
const userId = ctx.session.user.id;
requireNothing(ctx);
const existingEntrant = await prisma.worldChampEntrant.findUnique({
where: { userId },
});
if (existingEntrant) {
return existingEntrant;
}
return await prisma.worldChampEntrant.create({
data: {
userId,
},
});
}),
});

View File

@@ -40,7 +40,7 @@ const noOp = () => {};
*
* @see https://create.t3.gg/en/usage/trpc#-serverapitrpcts
*/
const createInnerTRPCContext = (opts: CreateContextOptions) => {
export const createInnerTRPCContext = (opts: CreateContextOptions) => {
return {
session: opts.session,
prisma,

View File

@@ -0,0 +1,12 @@
#! /bin/bash
set -e
cd "$(dirname "$0")/../../.."
set -o allexport
source .env
set +o allexport
echo "Connecting to prod db"
DATABASE_URL=$PROD_DATABASE_URL pnpm prisma studio

View File

@@ -7,9 +7,9 @@ function defineTask<TPayload>(
taskIdentifier: string,
taskHandler: (payload: TPayload, helpers: Helpers) => Promise<void>,
) {
const enqueue = async (payload: TPayload) => {
const enqueue = async (payload: TPayload, runAt?: Date) => {
console.log("Enqueuing task", taskIdentifier, payload);
await quickAddJob({ connectionString: env.DATABASE_URL }, taskIdentifier, payload);
await quickAddJob({ connectionString: env.DATABASE_URL }, taskIdentifier, payload, { runAt });
};
const handler = (payload: TPayload, helpers: Helpers) => {

View File

@@ -6,15 +6,15 @@ import { wsConnection } from "~/utils/wsConnection";
import { runEvalsForOutput } from "../utils/evaluations";
import hashPrompt from "../utils/hashPrompt";
import parseConstructFn from "../utils/parseConstructFn";
import { sleep } from "../utils/sleep";
import defineTask from "./defineTask";
export type QueryModelJob = {
cellId: string;
stream: boolean;
numPreviousTries: number;
};
const MAX_AUTO_RETRIES = 10;
const MAX_AUTO_RETRIES = 50;
const MIN_DELAY = 500; // milliseconds
const MAX_DELAY = 15000; // milliseconds
@@ -26,20 +26,12 @@ function calculateDelay(numPreviousTries: number): number {
export const queryModel = defineTask<QueryModelJob>("queryModel", async (task) => {
console.log("RUNNING TASK", task);
const { cellId, stream } = task;
const { cellId, stream, numPreviousTries } = task;
const cell = await prisma.scenarioVariantCell.findUnique({
where: { id: cellId },
include: { modelOutput: true },
include: { modelResponses: true },
});
if (!cell) {
await prisma.scenarioVariantCell.update({
where: { id: cellId },
data: {
statusCode: 404,
errorMessage: "Cell not found",
retrievalStatus: "ERROR",
},
});
return;
}
@@ -51,6 +43,7 @@ export const queryModel = defineTask<QueryModelJob>("queryModel", async (task) =
where: { id: cellId },
data: {
retrievalStatus: "IN_PROGRESS",
jobStartedAt: new Date(),
},
});
@@ -61,7 +54,6 @@ export const queryModel = defineTask<QueryModelJob>("queryModel", async (task) =
await prisma.scenarioVariantCell.update({
where: { id: cellId },
data: {
statusCode: 404,
errorMessage: "Prompt Variant not found",
retrievalStatus: "ERROR",
},
@@ -76,7 +68,6 @@ export const queryModel = defineTask<QueryModelJob>("queryModel", async (task) =
await prisma.scenarioVariantCell.update({
where: { id: cellId },
data: {
statusCode: 404,
errorMessage: "Scenario not found",
retrievalStatus: "ERROR",
},
@@ -90,7 +81,6 @@ export const queryModel = defineTask<QueryModelJob>("queryModel", async (task) =
await prisma.scenarioVariantCell.update({
where: { id: cellId },
data: {
statusCode: 400,
errorMessage: prompt.error,
retrievalStatus: "ERROR",
},
@@ -106,58 +96,79 @@ export const queryModel = defineTask<QueryModelJob>("queryModel", async (task) =
}
: null;
for (let i = 0; true; i++) {
const response = await provider.getCompletion(prompt.modelInput, onStream);
if (response.type === "success") {
const inputHash = hashPrompt(prompt);
const inputHash = hashPrompt(prompt);
const modelOutput = await prisma.modelOutput.create({
data: {
scenarioVariantCellId: cellId,
inputHash,
output: response.value as Prisma.InputJsonObject,
timeToComplete: response.timeToComplete,
promptTokens: response.promptTokens,
completionTokens: response.completionTokens,
cost: response.cost,
let modelResponse = await prisma.modelResponse.create({
data: {
inputHash,
scenarioVariantCellId: cellId,
requestedAt: new Date(),
},
});
const response = await provider.getCompletion(prompt.modelInput, onStream);
if (response.type === "success") {
modelResponse = await prisma.modelResponse.update({
where: { id: modelResponse.id },
data: {
output: response.value as Prisma.InputJsonObject,
statusCode: response.statusCode,
receivedAt: new Date(),
promptTokens: response.promptTokens,
completionTokens: response.completionTokens,
cost: response.cost,
},
});
await prisma.scenarioVariantCell.update({
where: { id: cellId },
data: {
retrievalStatus: "COMPLETE",
},
});
await runEvalsForOutput(variant.experimentId, scenario, modelResponse, prompt.modelProvider);
} else {
const shouldRetry = response.autoRetry && numPreviousTries < MAX_AUTO_RETRIES;
const delay = calculateDelay(numPreviousTries);
const retryTime = new Date(Date.now() + delay);
await prisma.modelResponse.update({
where: { id: modelResponse.id },
data: {
statusCode: response.statusCode,
errorMessage: response.message,
receivedAt: new Date(),
retryTime: shouldRetry ? retryTime : null,
},
});
if (shouldRetry) {
await queryModel.enqueue(
{
cellId,
stream,
numPreviousTries: numPreviousTries + 1,
},
});
retryTime,
);
await prisma.scenarioVariantCell.update({
where: { id: cellId },
data: {
statusCode: response.statusCode,
retrievalStatus: "COMPLETE",
retrievalStatus: "PENDING",
},
});
await runEvalsForOutput(variant.experimentId, scenario, modelOutput);
break;
} else {
const shouldRetry = response.autoRetry && i < MAX_AUTO_RETRIES;
const delay = calculateDelay(i);
await prisma.scenarioVariantCell.update({
where: { id: cellId },
data: {
errorMessage: response.message,
statusCode: response.statusCode,
retryTime: shouldRetry ? new Date(Date.now() + delay) : null,
retrievalStatus: "ERROR",
},
});
if (shouldRetry) {
await sleep(delay);
} else {
break;
}
}
}
});
export const queueQueryModel = async (cellId: string, stream: boolean) => {
console.log("queueQueryModel", cellId, stream);
await Promise.all([
prisma.scenarioVariantCell.update({
where: {
@@ -166,10 +177,9 @@ export const queueQueryModel = async (cellId: string, stream: boolean) => {
data: {
retrievalStatus: "PENDING",
errorMessage: null,
jobQueuedAt: new Date(),
},
}),
await queryModel.enqueue({ cellId, stream }),
console.log("queued"),
queryModel.enqueue({ cellId, stream, numPreviousTries: 0 }),
]);
};

View File

@@ -0,0 +1,17 @@
import { runAllEvals } from "../utils/evaluations";
import defineTask from "./defineTask";
export type RunNewEvalJob = {
experimentId: string;
};
// When a new eval is created, we want to run it on all existing outputs, but return the new eval first
export const runNewEval = defineTask<RunNewEvalJob>("runNewEval", async (task) => {
console.log("RUNNING TASK", task);
const { experimentId } = task;
await runAllEvals(experimentId);
});
export const queueRunNewEval = async (experimentId: string) => {
await runNewEval.enqueue({ experimentId });
};

View File

@@ -3,10 +3,11 @@ import "dotenv/config";
import { env } from "~/env.mjs";
import { queryModel } from "./queryModel.task";
import { runNewEval } from "./runNewEval.task";
console.log("Starting worker");
const registeredTasks = [queryModel];
const registeredTasks = [queryModel, runNewEval];
const taskList = registeredTasks.reduce((acc, task) => {
acc[task.task.identifier] = task.task.handler;
@@ -16,7 +17,7 @@ const taskList = registeredTasks.reduce((acc, task) => {
// Run a worker to execute jobs:
const runner = await run({
connectionString: env.DATABASE_URL,
concurrency: 20,
concurrency: 50,
// Install signal handlers for graceful shutdown on SIGINT, SIGTERM, etc
noHandleSignals: false,
pollInterval: 1000,

View File

@@ -1,19 +1,25 @@
import { type ModelOutput, type Evaluation } from "@prisma/client";
import { type ModelResponse, type Evaluation, Prisma } from "@prisma/client";
import { prisma } from "../db";
import { runOneEval } from "./runOneEval";
import { type Scenario } from "~/components/OutputsTable/types";
import { type SupportedProvider } from "~/modelProviders/types";
const saveResult = async (evaluation: Evaluation, scenario: Scenario, modelOutput: ModelOutput) => {
const result = await runOneEval(evaluation, scenario, modelOutput);
const runAndSaveEval = async (
evaluation: Evaluation,
scenario: Scenario,
modelResponse: ModelResponse,
provider: SupportedProvider,
) => {
const result = await runOneEval(evaluation, scenario, modelResponse, provider);
return await prisma.outputEvaluation.upsert({
where: {
modelOutputId_evaluationId: {
modelOutputId: modelOutput.id,
modelResponseId_evaluationId: {
modelResponseId: modelResponse.id,
evaluationId: evaluation.id,
},
},
create: {
modelOutputId: modelOutput.id,
modelResponseId: modelResponse.id,
evaluationId: evaluation.id,
...result,
},
@@ -26,20 +32,28 @@ const saveResult = async (evaluation: Evaluation, scenario: Scenario, modelOutpu
export const runEvalsForOutput = async (
experimentId: string,
scenario: Scenario,
modelOutput: ModelOutput,
modelResponse: ModelResponse,
provider: SupportedProvider,
) => {
const evaluations = await prisma.evaluation.findMany({
where: { experimentId },
});
await Promise.all(
evaluations.map(async (evaluation) => await saveResult(evaluation, scenario, modelOutput)),
evaluations.map(
async (evaluation) => await runAndSaveEval(evaluation, scenario, modelResponse, provider),
),
);
};
// Will not run eval-output pairs that already exist in the database
export const runAllEvals = async (experimentId: string) => {
const outputs = await prisma.modelOutput.findMany({
const outputs = await prisma.modelResponse.findMany({
where: {
outdated: false,
output: {
not: Prisma.AnyNull,
},
scenarioVariantCell: {
promptVariant: {
experimentId,
@@ -54,6 +68,7 @@ export const runAllEvals = async (experimentId: string) => {
scenarioVariantCell: {
include: {
testScenario: true,
promptVariant: true,
},
},
outputEvaluations: true,
@@ -65,13 +80,18 @@ export const runAllEvals = async (experimentId: string) => {
await Promise.all(
outputs.map(async (output) => {
const unrunEvals = evals.filter(
const evalsToBeRun = evals.filter(
(evaluation) => !output.outputEvaluations.find((e) => e.evaluationId === evaluation.id),
);
await Promise.all(
unrunEvals.map(async (evaluation) => {
await saveResult(evaluation, output.scenarioVariantCell.testScenario, output);
evalsToBeRun.map(async (evaluation) => {
await runAndSaveEval(
evaluation,
output.scenarioVariantCell.testScenario,
output,
output.scenarioVariantCell.promptVariant.modelProvider as SupportedProvider,
);
}),
);
}),

View File

@@ -1,4 +1,4 @@
import { type Prisma } from "@prisma/client";
import { Prisma } from "@prisma/client";
import { prisma } from "../db";
import parseConstructFn from "./parseConstructFn";
import { type JsonObject } from "type-fest";
@@ -35,7 +35,7 @@ export const generateNewCell = async (
},
},
include: {
modelOutput: true,
modelResponses: true,
},
});
@@ -51,8 +51,6 @@ export const generateNewCell = async (
data: {
promptVariantId: variantId,
testScenarioId: scenarioId,
statusCode: 400,
errorMessage: parsedConstructFn.error,
retrievalStatus: "ERROR",
},
});
@@ -69,36 +67,55 @@ export const generateNewCell = async (
retrievalStatus: "PENDING",
},
include: {
modelOutput: true,
modelResponses: true,
},
});
const matchingModelOutput = await prisma.modelOutput.findFirst({
where: { inputHash },
const matchingModelResponse = await prisma.modelResponse.findFirst({
where: {
inputHash,
output: {
not: Prisma.AnyNull,
},
},
orderBy: {
receivedAt: "desc",
},
include: {
scenarioVariantCell: true,
},
take: 1,
});
if (matchingModelOutput) {
const newModelOutput = await prisma.modelOutput.create({
if (matchingModelResponse) {
const newModelResponse = await prisma.modelResponse.create({
data: {
...omit(matchingModelOutput, ["id"]),
...omit(matchingModelResponse, ["id", "scenarioVariantCell"]),
scenarioVariantCellId: cell.id,
output: matchingModelOutput.output as Prisma.InputJsonValue,
output: matchingModelResponse.output as Prisma.InputJsonValue,
},
});
await prisma.scenarioVariantCell.update({
where: { id: cell.id },
data: { retrievalStatus: "COMPLETE" },
data: {
retrievalStatus: "COMPLETE",
jobStartedAt: matchingModelResponse.scenarioVariantCell.jobStartedAt,
jobQueuedAt: matchingModelResponse.scenarioVariantCell.jobQueuedAt,
},
});
// Copy over all eval results as well
await Promise.all(
(
await prisma.outputEvaluation.findMany({ where: { modelOutputId: matchingModelOutput.id } })
await prisma.outputEvaluation.findMany({
where: { modelResponseId: matchingModelResponse.id },
})
).map(async (evaluation) => {
await prisma.outputEvaluation.create({
data: {
...omit(evaluation, ["id"]),
modelOutputId: newModelOutput.id,
modelResponseId: newModelResponse.id,
},
});
}),

View File

@@ -2,4 +2,5 @@ import { env } from "~/env.mjs";
import OpenAI from "openai";
export const openai = new OpenAI({ apiKey: env.OPENAI_API_KEY });
// Set a dummy key so it doesn't fail at build time
export const openai = new OpenAI({ apiKey: env.OPENAI_API_KEY ?? "dummy-key" });

View File

@@ -1,13 +1,14 @@
import { type Evaluation, type ModelOutput, type TestScenario } from "@prisma/client";
import { type ChatCompletion } from "openai/resources/chat";
import { type Evaluation, type ModelResponse, type TestScenario } from "@prisma/client";
import { type VariableMap, fillTemplate, escapeRegExp, escapeQuotes } from "./fillTemplate";
import { openai } from "./openai";
import dedent from "dedent";
import modelProviders from "~/modelProviders/modelProviders";
import { type SupportedProvider } from "~/modelProviders/types";
export const runGpt4Eval = async (
evaluation: Evaluation,
scenario: TestScenario,
message: ChatCompletion.Choice.Message,
stringifiedOutput: string,
): Promise<{ result: number; details: string }> => {
const output = await openai.chat.completions.create({
model: "gpt-4-0613",
@@ -26,11 +27,7 @@ export const runGpt4Eval = async (
},
{
role: "user",
content: `The full output of the simpler message:\n---\n${JSON.stringify(
message.content ?? message.function_call,
null,
2,
)}`,
content: `The full output of the simpler message:\n---\n${stringifiedOutput}`,
},
],
function_call: {
@@ -70,15 +67,16 @@ export const runGpt4Eval = async (
export const runOneEval = async (
evaluation: Evaluation,
scenario: TestScenario,
modelOutput: ModelOutput,
modelResponse: ModelResponse,
provider: SupportedProvider,
): Promise<{ result: number; details?: string }> => {
const output = modelOutput.output as unknown as ChatCompletion;
const message = output?.choices?.[0]?.message;
const modelProvider = modelProviders[provider];
const message = modelProvider.normalizeOutput(modelResponse.output);
if (!message) return { result: 0 };
const stringifiedMessage = message.content ?? JSON.stringify(message.function_call);
const stringifiedOutput =
message.type === "json" ? JSON.stringify(message.value, null, 2) : message.value;
const matchRegex = escapeRegExp(
fillTemplate(escapeQuotes(evaluation.value), scenario.variableValues as VariableMap),
@@ -86,10 +84,10 @@ export const runOneEval = async (
switch (evaluation.evalType) {
case "CONTAINS":
return { result: stringifiedMessage.match(matchRegex) !== null ? 1 : 0 };
return { result: stringifiedOutput.match(matchRegex) !== null ? 1 : 0 };
case "DOES_NOT_CONTAIN":
return { result: stringifiedMessage.match(matchRegex) === null ? 1 : 0 };
return { result: stringifiedOutput.match(matchRegex) === null ? 1 : 0 };
case "GPT4_EVAL":
return await runGpt4Eval(evaluation, scenario, message);
return await runGpt4Eval(evaluation, scenario, stringifiedOutput);
}
};

View File

@@ -7,3 +7,5 @@ dayjs.extend(relativeTime);
export const formatTimePast = (date: Date) =>
dayjs.duration(dayjs(date).diff(dayjs())).humanize(true);
export default dayjs;

View File

@@ -107,4 +107,8 @@ export const useScenarios = () => {
);
};
export const useScenario = (scenarioId: string) => {
return api.scenarios.get.useQuery({ id: scenarioId });
};
export const useVisibleScenarioIds = () => useScenarios().data?.scenarios.map((s) => s.id) ?? [];