format with prettier 3

This commit is contained in:
Kyle Corbitt
2023-07-08 22:12:47 -07:00
parent 6b32619e87
commit a8db6cadfd
34 changed files with 201 additions and 480 deletions

1
.prettierignore Normal file
View File

@@ -0,0 +1 @@
src/codegen/openai.schema.json

1
.prettierrc Normal file
View File

@@ -0,0 +1 @@
{ "printWidth": 100 }

View File

@@ -1,5 +1,4 @@
# <img src="https://github.com/openpipe/openpipe/assets/41524992/3fec1f7f-f55d-43e9-bfb9-fa709a618b49" width="36" height="36"> OpenPipe
# <img src="https://github.com/openpipe/openpipe/assets/41524992/3fec1f7f-f55d-43e9-bfb9-fa709a618b49" width="36" height="36"> OpenPipe
OpenPipe is a flexible playground for comparing and optimizing LLM prompts. It lets you quickly generate, test and compare candidate prompts with realistic sample data.
@@ -16,7 +15,7 @@ Set up multiple prompt configurations and compare their output side-by-side. Eac
Inspect prompt completions side-by-side.
**Test Many Inputs**
OpenPipe lets you *template* a prompt. Use the templating feature to run the prompts you're testing against many potential inputs for broader coverage of your problem space than you'd get with manual testing.
OpenPipe lets you _template_ a prompt. Use the templating feature to run the prompts you're testing against many potential inputs for broader coverage of your problem space than you'd get with manual testing.
**🪄 Auto-generate Test Scenarios**
OpenPipe includes a tool to generate new test scenarios based on your existing prompts and scenarios. Just click "Autogenerate Scenario" to try it out!
@@ -32,6 +31,7 @@ Natively supports [OpenAI function calls](https://openai.com/blog/function-calli
<img height="300px" alt="function calls" src="https://github.com/openpipe/openpipe/assets/176426/48ad13fe-af2f-4294-bf32-62015597fd9b">
## Supported Models
OpenPipe currently supports GPT-3.5 and GPT-4. Wider model support is planned.
## Running Locally

View File

@@ -74,6 +74,7 @@
"eslint": "^8.40.0",
"eslint-config-next": "^13.4.2",
"eslint-plugin-unused-imports": "^2.0.0",
"prettier": "^3.0.0",
"prisma": "^4.14.0",
"typescript": "^5.0.4",
"yaml": "^2.3.1"
@@ -83,8 +84,5 @@
},
"prisma": {
"seed": "tsx prisma/seed.ts"
},
"prettier": {
"printWidth": 100
}
}

View File

@@ -1,19 +1,19 @@
{
"name": "",
"short_name": "",
"icons": [
{
"src": "/favicons/android-chrome-192x192.png",
"sizes": "192x192",
"type": "image/png"
},
{
"src": "/favicons/android-chrome-512x512.png",
"sizes": "512x512",
"type": "image/png"
}
],
"theme_color": "#ffffff",
"background_color": "#ffffff",
"display": "standalone"
"name": "",
"short_name": "",
"icons": [
{
"src": "/favicons/android-chrome-192x192.png",
"sizes": "192x192",
"type": "image/png"
},
{
"src": "/favicons/android-chrome-512x512.png",
"sizes": "512x512",
"type": "image/png"
}
],
"theme_color": "#ffffff",
"background_color": "#ffffff",
"display": "standalone"
}

View File

@@ -39,7 +39,7 @@ const convertOpenApiToJsonSchema = async (url: string) => {
// Write the JSON schema to a file in the current directory
fs.writeFileSync(
path.join(currentDirectory, "openai.schema.json"),
JSON.stringify(jsonSchema, null, 2)
JSON.stringify(jsonSchema, null, 2),
);
};

View File

@@ -20,9 +20,7 @@
"/chat/completions": {
"post": {
"operationId": "createChatCompletion",
"tags": [
"OpenAI"
],
"tags": ["OpenAI"],
"summary": "Creates a model response for the given chat conversation.",
"requestBody": {
"required": true,
@@ -64,9 +62,7 @@
"/completions": {
"post": {
"operationId": "createCompletion",
"tags": [
"OpenAI"
],
"tags": ["OpenAI"],
"summary": "Creates a completion for the provided prompt and parameters.",
"requestBody": {
"required": true,
@@ -107,9 +103,7 @@
"/edits": {
"post": {
"operationId": "createEdit",
"tags": [
"OpenAI"
],
"tags": ["OpenAI"],
"summary": "Creates a new edit for the provided input, instruction, and parameters.",
"requestBody": {
"required": true,
@@ -150,9 +144,7 @@
"/images/generations": {
"post": {
"operationId": "createImage",
"tags": [
"OpenAI"
],
"tags": ["OpenAI"],
"summary": "Creates an image given a prompt.",
"requestBody": {
"required": true,
@@ -194,9 +186,7 @@
"/images/edits": {
"post": {
"operationId": "createImageEdit",
"tags": [
"OpenAI"
],
"tags": ["OpenAI"],
"summary": "Creates an edited or extended image given an original image and a prompt.",
"requestBody": {
"required": true,
@@ -237,9 +227,7 @@
"/images/variations": {
"post": {
"operationId": "createImageVariation",
"tags": [
"OpenAI"
],
"tags": ["OpenAI"],
"summary": "Creates a variation of a given image.",
"requestBody": {
"required": true,
@@ -280,9 +268,7 @@
"/embeddings": {
"post": {
"operationId": "createEmbedding",
"tags": [
"OpenAI"
],
"tags": ["OpenAI"],
"summary": "Creates an embedding vector representing the input text.",
"requestBody": {
"required": true,
@@ -323,9 +309,7 @@
"/audio/transcriptions": {
"post": {
"operationId": "createTranscription",
"tags": [
"OpenAI"
],
"tags": ["OpenAI"],
"summary": "Transcribes audio into the input language.",
"requestBody": {
"required": true,
@@ -367,9 +351,7 @@
"/audio/translations": {
"post": {
"operationId": "createTranslation",
"tags": [
"OpenAI"
],
"tags": ["OpenAI"],
"summary": "Translates audio into English.",
"requestBody": {
"required": true,
@@ -411,9 +393,7 @@
"/files": {
"get": {
"operationId": "listFiles",
"tags": [
"OpenAI"
],
"tags": ["OpenAI"],
"summary": "Returns a list of files that belong to the user's organization.",
"responses": {
"200": {
@@ -441,9 +421,7 @@
},
"post": {
"operationId": "createFile",
"tags": [
"OpenAI"
],
"tags": ["OpenAI"],
"summary": "Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit.\n",
"requestBody": {
"required": true,
@@ -483,9 +461,7 @@
"/files/{file_id}": {
"delete": {
"operationId": "deleteFile",
"tags": [
"OpenAI"
],
"tags": ["OpenAI"],
"summary": "Delete a file.",
"parameters": [
{
@@ -524,9 +500,7 @@
},
"get": {
"operationId": "retrieveFile",
"tags": [
"OpenAI"
],
"tags": ["OpenAI"],
"summary": "Returns information about a specific file.",
"parameters": [
{
@@ -567,9 +541,7 @@
"/files/{file_id}/content": {
"get": {
"operationId": "downloadFile",
"tags": [
"OpenAI"
],
"tags": ["OpenAI"],
"summary": "Returns the contents of the specified file",
"parameters": [
{
@@ -609,9 +581,7 @@
"/fine-tunes": {
"post": {
"operationId": "createFineTune",
"tags": [
"OpenAI"
],
"tags": ["OpenAI"],
"summary": "Creates a job that fine-tunes a specified model from a given dataset.\n\nResponse includes details of the enqueued job including job status and the name of the fine-tuned models once complete.\n\n[Learn more about Fine-tuning](/docs/guides/fine-tuning)\n",
"requestBody": {
"required": true,
@@ -649,9 +619,7 @@
},
"get": {
"operationId": "listFineTunes",
"tags": [
"OpenAI"
],
"tags": ["OpenAI"],
"summary": "List your organization's fine-tuning jobs\n",
"responses": {
"200": {
@@ -681,9 +649,7 @@
"/fine-tunes/{fine_tune_id}": {
"get": {
"operationId": "retrieveFineTune",
"tags": [
"OpenAI"
],
"tags": ["OpenAI"],
"summary": "Gets info about the fine-tune job.\n\n[Learn more about Fine-tuning](/docs/guides/fine-tuning)\n",
"parameters": [
{
@@ -725,9 +691,7 @@
"/fine-tunes/{fine_tune_id}/cancel": {
"post": {
"operationId": "cancelFineTune",
"tags": [
"OpenAI"
],
"tags": ["OpenAI"],
"summary": "Immediately cancel a fine-tune job.\n",
"parameters": [
{
@@ -769,9 +733,7 @@
"/fine-tunes/{fine_tune_id}/events": {
"get": {
"operationId": "listFineTuneEvents",
"tags": [
"OpenAI"
],
"tags": ["OpenAI"],
"summary": "Get fine-grained status updates for a fine-tune job.\n",
"parameters": [
{
@@ -823,9 +785,7 @@
"/models": {
"get": {
"operationId": "listModels",
"tags": [
"OpenAI"
],
"tags": ["OpenAI"],
"summary": "Lists the currently available models, and provides basic information about each one such as the owner and availability.",
"responses": {
"200": {
@@ -855,9 +815,7 @@
"/models/{model}": {
"get": {
"operationId": "retrieveModel",
"tags": [
"OpenAI"
],
"tags": ["OpenAI"],
"summary": "Retrieves a model instance, providing basic information about the model such as the owner and permissioning.",
"parameters": [
{
@@ -897,9 +855,7 @@
},
"delete": {
"operationId": "deleteModel",
"tags": [
"OpenAI"
],
"tags": ["OpenAI"],
"summary": "Delete a fine-tuned model. You must have the Owner role in your organization.",
"parameters": [
{
@@ -941,9 +897,7 @@
"/moderations": {
"post": {
"operationId": "createModeration",
"tags": [
"OpenAI"
],
"tags": ["OpenAI"],
"summary": "Classifies if text violates OpenAI's Content Policy",
"requestBody": {
"required": true,
@@ -1004,12 +958,7 @@
"nullable": true
}
},
"required": [
"type",
"message",
"param",
"code"
]
"required": ["type", "message", "param", "code"]
},
"ErrorResponse": {
"type": "object",
@@ -1018,9 +967,7 @@
"$ref": "#/components/schemas/Error"
}
},
"required": [
"error"
]
"required": ["error"]
},
"ListModelsResponse": {
"type": "object",
@@ -1035,10 +982,7 @@
}
}
},
"required": [
"object",
"data"
]
"required": ["object", "data"]
},
"DeleteModelResponse": {
"type": "object",
@@ -1053,11 +997,7 @@
"type": "boolean"
}
},
"required": [
"id",
"object",
"deleted"
]
"required": ["id", "object", "deleted"]
},
"CreateCompletionRequest": {
"type": "object",
@@ -1243,10 +1183,7 @@
"description": "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n"
}
},
"required": [
"model",
"prompt"
]
"required": ["model", "prompt"]
},
"CreateCompletionResponse": {
"type": "object",
@@ -1267,12 +1204,7 @@
"type": "array",
"items": {
"type": "object",
"required": [
"text",
"index",
"logprobs",
"finish_reason"
],
"required": ["text", "index", "logprobs", "finish_reason"],
"properties": {
"text": {
"type": "string"
@@ -1312,10 +1244,7 @@
},
"finish_reason": {
"type": "string",
"enum": [
"stop",
"length"
]
"enum": ["stop", "length"]
}
}
}
@@ -1333,32 +1262,17 @@
"type": "integer"
}
},
"required": [
"prompt_tokens",
"completion_tokens",
"total_tokens"
]
"required": ["prompt_tokens", "completion_tokens", "total_tokens"]
}
},
"required": [
"id",
"object",
"created",
"model",
"choices"
]
"required": ["id", "object", "created", "model", "choices"]
},
"ChatCompletionRequestMessage": {
"type": "object",
"properties": {
"role": {
"type": "string",
"enum": [
"system",
"user",
"assistant",
"function"
],
"enum": ["system", "user", "assistant", "function"],
"description": "The role of the messages author. One of `system`, `user`, `assistant`, or `function`."
},
"content": {
@@ -1384,9 +1298,7 @@
}
}
},
"required": [
"role"
]
"required": ["role"]
},
"ChatCompletionFunctionParameters": {
"type": "object",
@@ -1408,21 +1320,14 @@
"$ref": "#/components/schemas/ChatCompletionFunctionParameters"
}
},
"required": [
"name"
]
"required": ["name"]
},
"ChatCompletionResponseMessage": {
"type": "object",
"properties": {
"role": {
"type": "string",
"enum": [
"system",
"user",
"assistant",
"function"
],
"enum": ["system", "user", "assistant", "function"],
"description": "The role of the author of this message."
},
"content": {
@@ -1445,21 +1350,14 @@
}
}
},
"required": [
"role"
]
"required": ["role"]
},
"ChatCompletionStreamResponseDelta": {
"type": "object",
"properties": {
"role": {
"type": "string",
"enum": [
"system",
"user",
"assistant",
"function"
],
"enum": ["system", "user", "assistant", "function"],
"description": "The role of the author of this message."
},
"content": {
@@ -1522,10 +1420,7 @@
"oneOf": [
{
"type": "string",
"enum": [
"none",
"auto"
]
"enum": ["none", "auto"]
},
{
"type": "object",
@@ -1535,9 +1430,7 @@
"description": "The name of the function to call."
}
},
"required": [
"name"
]
"required": ["name"]
}
]
},
@@ -1626,10 +1519,7 @@
"description": "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n"
}
},
"required": [
"model",
"messages"
]
"required": ["model", "messages"]
},
"CreateChatCompletionResponse": {
"type": "object",
@@ -1659,11 +1549,7 @@
},
"finish_reason": {
"type": "string",
"enum": [
"stop",
"length",
"function_call"
]
"enum": ["stop", "length", "function_call"]
}
}
}
@@ -1681,20 +1567,10 @@
"type": "integer"
}
},
"required": [
"prompt_tokens",
"completion_tokens",
"total_tokens"
]
"required": ["prompt_tokens", "completion_tokens", "total_tokens"]
}
},
"required": [
"id",
"object",
"created",
"model",
"choices"
]
"required": ["id", "object", "created", "model", "choices"]
},
"CreateChatCompletionStreamResponse": {
"type": "object",
@@ -1724,23 +1600,13 @@
},
"finish_reason": {
"type": "string",
"enum": [
"stop",
"length",
"function_call"
]
"enum": ["stop", "length", "function_call"]
}
}
}
}
},
"required": [
"id",
"object",
"created",
"model",
"choices"
]
"required": ["id", "object", "created", "model", "choices"]
},
"CreateEditRequest": {
"type": "object",
@@ -1755,10 +1621,7 @@
},
{
"type": "string",
"enum": [
"text-davinci-edit-001",
"code-davinci-edit-001"
]
"enum": ["text-davinci-edit-001", "code-davinci-edit-001"]
}
]
},
@@ -1802,10 +1665,7 @@
"description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\nWe generally recommend altering this or `temperature` but not both.\n"
}
},
"required": [
"model",
"instruction"
]
"required": ["model", "instruction"]
},
"CreateEditResponse": {
"type": "object",
@@ -1859,10 +1719,7 @@
},
"finish_reason": {
"type": "string",
"enum": [
"stop",
"length"
]
"enum": ["stop", "length"]
}
}
}
@@ -1880,19 +1737,10 @@
"type": "integer"
}
},
"required": [
"prompt_tokens",
"completion_tokens",
"total_tokens"
]
"required": ["prompt_tokens", "completion_tokens", "total_tokens"]
}
},
"required": [
"object",
"created",
"choices",
"usage"
]
"required": ["object", "created", "choices", "usage"]
},
"CreateImageRequest": {
"type": "object",
@@ -1913,11 +1761,7 @@
},
"size": {
"type": "string",
"enum": [
"256x256",
"512x512",
"1024x1024"
],
"enum": ["256x256", "512x512", "1024x1024"],
"default": "1024x1024",
"example": "1024x1024",
"nullable": true,
@@ -1925,10 +1769,7 @@
},
"response_format": {
"type": "string",
"enum": [
"url",
"b64_json"
],
"enum": ["url", "b64_json"],
"default": "url",
"example": "url",
"nullable": true,
@@ -1940,9 +1781,7 @@
"description": "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n"
}
},
"required": [
"prompt"
]
"required": ["prompt"]
},
"ImagesResponse": {
"properties": {
@@ -1964,10 +1803,7 @@
}
}
},
"required": [
"created",
"data"
]
"required": ["created", "data"]
},
"CreateImageEditRequest": {
"type": "object",
@@ -1998,11 +1834,7 @@
},
"size": {
"type": "string",
"enum": [
"256x256",
"512x512",
"1024x1024"
],
"enum": ["256x256", "512x512", "1024x1024"],
"default": "1024x1024",
"example": "1024x1024",
"nullable": true,
@@ -2010,10 +1842,7 @@
},
"response_format": {
"type": "string",
"enum": [
"url",
"b64_json"
],
"enum": ["url", "b64_json"],
"default": "url",
"example": "url",
"nullable": true,
@@ -2025,10 +1854,7 @@
"description": "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n"
}
},
"required": [
"prompt",
"image"
]
"required": ["prompt", "image"]
},
"CreateImageVariationRequest": {
"type": "object",
@@ -2049,11 +1875,7 @@
},
"size": {
"type": "string",
"enum": [
"256x256",
"512x512",
"1024x1024"
],
"enum": ["256x256", "512x512", "1024x1024"],
"default": "1024x1024",
"example": "1024x1024",
"nullable": true,
@@ -2061,10 +1883,7 @@
},
"response_format": {
"type": "string",
"enum": [
"url",
"b64_json"
],
"enum": ["url", "b64_json"],
"default": "url",
"example": "url",
"nullable": true,
@@ -2076,9 +1895,7 @@
"description": "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n"
}
},
"required": [
"image"
]
"required": ["image"]
},
"CreateModerationRequest": {
"type": "object",
@@ -2112,17 +1929,12 @@
},
{
"type": "string",
"enum": [
"text-moderation-latest",
"text-moderation-stable"
]
"enum": ["text-moderation-latest", "text-moderation-stable"]
}
]
}
},
"required": [
"input"
]
"required": ["input"]
},
"CreateModerationResponse": {
"type": "object",
@@ -2212,19 +2024,11 @@
]
}
},
"required": [
"flagged",
"categories",
"category_scores"
]
"required": ["flagged", "categories", "category_scores"]
}
}
},
"required": [
"id",
"model",
"results"
]
"required": ["id", "model", "results"]
},
"ListFilesResponse": {
"type": "object",
@@ -2239,10 +2043,7 @@
}
}
},
"required": [
"object",
"data"
]
"required": ["object", "data"]
},
"CreateFileRequest": {
"type": "object",
@@ -2258,10 +2059,7 @@
"type": "string"
}
},
"required": [
"file",
"purpose"
]
"required": ["file", "purpose"]
},
"DeleteFileResponse": {
"type": "object",
@@ -2276,11 +2074,7 @@
"type": "boolean"
}
},
"required": [
"id",
"object",
"deleted"
]
"required": ["id", "object", "deleted"]
},
"CreateFineTuneRequest": {
"type": "object",
@@ -2307,12 +2101,7 @@
},
{
"type": "string",
"enum": [
"ada",
"babbage",
"curie",
"davinci"
]
"enum": ["ada", "babbage", "curie", "davinci"]
}
]
},
@@ -2364,12 +2153,7 @@
"items": {
"type": "number"
},
"example": [
0.6,
1,
1.5,
2
],
"example": [0.6, 1, 1.5, 2],
"default": null,
"nullable": true
},
@@ -2382,9 +2166,7 @@
"nullable": true
}
},
"required": [
"training_file"
]
"required": ["training_file"]
},
"ListFineTunesResponse": {
"type": "object",
@@ -2399,10 +2181,7 @@
}
}
},
"required": [
"object",
"data"
]
"required": ["object", "data"]
},
"ListFineTuneEventsResponse": {
"type": "object",
@@ -2417,10 +2196,7 @@
}
}
},
"required": [
"object",
"data"
]
"required": ["object", "data"]
},
"CreateEmbeddingRequest": {
"type": "object",
@@ -2435,9 +2211,7 @@
},
{
"type": "string",
"enum": [
"text-embedding-ada-002"
]
"enum": ["text-embedding-ada-002"]
}
]
},
@@ -2486,10 +2260,7 @@
"description": "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n"
}
},
"required": [
"model",
"input"
]
"required": ["model", "input"]
},
"CreateEmbeddingResponse": {
"type": "object",
@@ -2518,11 +2289,7 @@
}
}
},
"required": [
"index",
"object",
"embedding"
]
"required": ["index", "object", "embedding"]
}
},
"usage": {
@@ -2535,18 +2302,10 @@
"type": "integer"
}
},
"required": [
"prompt_tokens",
"total_tokens"
]
"required": ["prompt_tokens", "total_tokens"]
}
},
"required": [
"object",
"model",
"data",
"usage"
]
"required": ["object", "model", "data", "usage"]
},
"CreateTranscriptionRequest": {
"type": "object",
@@ -2567,9 +2326,7 @@
},
{
"type": "string",
"enum": [
"whisper-1"
]
"enum": ["whisper-1"]
}
]
},
@@ -2592,10 +2349,7 @@
"type": "string"
}
},
"required": [
"file",
"model"
]
"required": ["file", "model"]
},
"CreateTranscriptionResponse": {
"type": "object",
@@ -2604,9 +2358,7 @@
"type": "string"
}
},
"required": [
"text"
]
"required": ["text"]
},
"CreateTranslationRequest": {
"type": "object",
@@ -2627,9 +2379,7 @@
},
{
"type": "string",
"enum": [
"whisper-1"
]
"enum": ["whisper-1"]
}
]
},
@@ -2648,10 +2398,7 @@
"default": 0
}
},
"required": [
"file",
"model"
]
"required": ["file", "model"]
},
"CreateTranslationResponse": {
"type": "object",
@@ -2660,9 +2407,7 @@
"type": "string"
}
},
"required": [
"text"
]
"required": ["text"]
},
"Model": {
"title": "Model",
@@ -2680,12 +2425,7 @@
"type": "string"
}
},
"required": [
"id",
"object",
"created",
"owned_by"
]
"required": ["id", "object", "created", "owned_by"]
},
"OpenAIFile": {
"title": "OpenAIFile",
@@ -2716,14 +2456,7 @@
"nullable": true
}
},
"required": [
"id",
"object",
"bytes",
"created_at",
"filename",
"purpose"
]
"required": ["id", "object", "bytes", "created_at", "filename", "purpose"]
},
"FineTune": {
"title": "FineTune",
@@ -2812,12 +2545,7 @@
"type": "string"
}
},
"required": [
"object",
"created_at",
"level",
"message"
]
"required": ["object", "created_at", "level", "message"]
}
}
},
@@ -2876,4 +2604,4 @@
]
},
"$schema": "http://json-schema.org/draft-04/schema#"
}
}

View File

@@ -189,7 +189,7 @@ export default function EditEvaluations() {
<Icon as={BsX} boxSize={6} />
</Button>
</HStack>
)
),
)}
{editingId == null && (
<Button

View File

@@ -1,12 +1,6 @@
import { type RouterOutputs, api } from "~/utils/api";
import { type PromptVariant, type Scenario } from "../types";
import {
Spinner,
Text,
Box,
Center,
Flex,
} from "@chakra-ui/react";
import { Spinner, Text, Box, Center, Flex } from "@chakra-ui/react";
import { useExperiment, useHandledAsyncCallback } from "~/utils/hooks";
import SyntaxHighlighter from "react-syntax-highlighter";
import { docco } from "react-syntax-highlighter/dist/cjs/styles/hljs";
@@ -82,7 +76,7 @@ export default function OutputCell({
await utils.promptVariants.stats.invalidate();
fetchMutex.current = false;
},
[outputMutation, scenario.id, variant.id]
[outputMutation, scenario.id, variant.id],
);
const hardRefetch = useCallback(() => fetchOutput(true), [fetchOutput]);
@@ -143,7 +137,7 @@ export default function OutputCell({
function: message.function_call.name,
args: parsedArgs,
},
{ maxLength: 40 }
{ maxLength: 40 },
)}
</SyntaxHighlighter>
<OutputStats model={model} modelOutput={output} scenario={scenario} />
@@ -160,5 +154,3 @@ export default function OutputCell({
</Flex>
);
}

View File

@@ -64,7 +64,7 @@ export default function ScenarioEditor({
});
await utils.scenarios.list.invalidate();
},
[reorderMutation, scenario.id]
[reorderMutation, scenario.id],
);
return (

View File

@@ -17,7 +17,7 @@ export default function VariantConfigEditor(props: { variant: PromptVariant }) {
const savedConfig = useMemo(
() => JSON.stringify(props.variant.config, null, 2),
[props.variant.config]
[props.variant.config],
);
const savedConfigRef = useRef(savedConfig);

View File

@@ -46,7 +46,7 @@ export default function VariantHeader(props: { variant: PromptVariant }) {
});
await utils.promptVariants.list.invalidate();
},
[reorderMutation, props.variant.id]
[reorderMutation, props.variant.id],
);
return (

View File

@@ -20,7 +20,7 @@ export default function VariantStats(props: { variant: PromptVariant }) {
scenarioCount: 0,
outputCount: 0,
},
}
},
);
const [passColor, neutralColor, failColor] = useToken("colors", [

View File

@@ -20,13 +20,13 @@ const stickyHeaderStyle: SystemStyleObject = {
export default function OutputsTable({ experimentId }: { experimentId: string | undefined }) {
const variants = api.promptVariants.list.useQuery(
{ experimentId: experimentId as string },
{ enabled: !!experimentId }
{ enabled: !!experimentId },
);
const openDrawer = useStore((s) => s.openDrawer);
const scenarios = api.scenarios.list.useQuery(
{ experimentId: experimentId as string },
{ enabled: !!experimentId }
{ enabled: !!experimentId },
);
if (!variants.data || !scenarios.data) return null;

View File

@@ -30,9 +30,7 @@ export default function ExperimentsPage() {
<NewExperimentButton mr={4} borderRadius={8} />
</HStack>
<SimpleGrid w="full" columns={{ base: 1, md: 2, lg: 3, xl: 4 }} spacing={8} p="4">
{experiments?.data?.map((exp) => (
<ExperimentCard key={exp.id} exp={exp} />
))}
{experiments?.data?.map((exp) => <ExperimentCard key={exp.id} exp={exp} />)}
</SimpleGrid>
</VStack>
</AppShell>

View File

@@ -1,14 +1,14 @@
import { type GetServerSideProps } from 'next';
import { type GetServerSideProps } from "next";
// eslint-disable-next-line @typescript-eslint/require-await
export const getServerSideProps: GetServerSideProps = async (context) => {
return {
redirect: {
destination: '/experiments',
destination: "/experiments",
permanent: false,
},
}
}
};
};
export default function Home() {
return null;

View File

@@ -1,4 +1,3 @@
import { type CompletionCreateParams } from "openai/resources/chat";
import { prisma } from "../db";
import { openai } from "../utils/openai";
@@ -27,7 +26,7 @@ function isAxiosError(error: unknown): error is AxiosError {
return false;
}
export const autogenerateScenarioValues = async (
experimentId: string
experimentId: string,
): Promise<Record<string, string>> => {
const [experiment, variables, existingScenarios, prompt] = await Promise.all([
prisma.experiment.findUnique({
@@ -84,8 +83,8 @@ export const autogenerateScenarioValues = async (
(scenario) =>
pick(
scenario.variableValues,
variables.map((variable) => variable.label)
) as Record<string, string>
variables.map((variable) => variable.label),
) as Record<string, string>,
)
.filter((vals) => Object.keys(vals ?? {}).length > 0)
.forEach((vals) => {
@@ -99,10 +98,13 @@ export const autogenerateScenarioValues = async (
});
});
const variableProperties = variables.reduce((acc, variable) => {
acc[variable.label] = { type: "string" };
return acc;
}, {} as Record<string, { type: "string" }>);
const variableProperties = variables.reduce(
(acc, variable) => {
acc[variable.label] = { type: "string" };
return acc;
},
{} as Record<string, { type: "string" }>,
);
try {
const completion = await openai.chat.completions.create({
@@ -123,7 +125,7 @@ export const autogenerateScenarioValues = async (
});
const parsed = JSON.parse(
completion.choices[0]?.message?.function_call?.arguments ?? "{}"
completion.choices[0]?.message?.function_call?.arguments ?? "{}",
) as Record<string, string>;
return parsed;
} catch (e) {

View File

@@ -21,7 +21,7 @@ export const evaluationsRouter = createTRPCRouter({
name: z.string(),
matchString: z.string(),
matchType: z.nativeEnum(EvaluationMatchType),
})
}),
)
.mutation(async ({ input }) => {
const evaluation = await prisma.evaluation.create({
@@ -44,7 +44,7 @@ export const evaluationsRouter = createTRPCRouter({
matchString: z.string().optional(),
matchType: z.nativeEnum(EvaluationMatchType).optional(),
}),
})
}),
)
.mutation(async ({ input }) => {
await prisma.evaluation.update({
@@ -56,7 +56,7 @@ export const evaluationsRouter = createTRPCRouter({
},
});
await reevaluateEvaluation(
await prisma.evaluation.findUniqueOrThrow({ where: { id: input.id } })
await prisma.evaluation.findUniqueOrThrow({ where: { id: input.id } }),
);
}),

View File

@@ -32,7 +32,7 @@ export const experimentsRouter = createTRPCRouter({
testScenarioCount: visibleTestScenarioCount,
promptVariantCount: visiblePromptVariantCount,
};
})
}),
);
return experimentsWithCounts;

View File

@@ -16,7 +16,7 @@ export const modelOutputsRouter = createTRPCRouter({
variantId: z.string(),
channel: z.string().optional(),
forceRefetch: z.boolean().optional(),
})
}),
)
.mutation(async ({ input }) => {
const existing = await prisma.modelOutput.findUnique({
@@ -46,7 +46,7 @@ export const modelOutputsRouter = createTRPCRouter({
const filledTemplate = fillTemplateJson(
variant.config as JSONSerializable,
scenario.variableValues as VariableMap
scenario.variableValues as VariableMap,
);
const inputHash = crypto

View File

@@ -73,7 +73,7 @@ export const promptVariantsRouter = createTRPCRouter({
.input(
z.object({
experimentId: z.string(),
})
}),
)
.mutation(async ({ input }) => {
const lastVariant = await prisma.promptVariant.findFirst({
@@ -126,7 +126,7 @@ export const promptVariantsRouter = createTRPCRouter({
updates: z.object({
label: z.string().optional(),
}),
})
}),
)
.mutation(async ({ input }) => {
const existing = await prisma.promptVariant.findUnique({
@@ -151,7 +151,7 @@ export const promptVariantsRouter = createTRPCRouter({
.input(
z.object({
id: z.string(),
})
}),
)
.mutation(async ({ input }) => {
return await prisma.promptVariant.update({
@@ -165,7 +165,7 @@ export const promptVariantsRouter = createTRPCRouter({
z.object({
id: z.string(),
config: z.string(),
})
}),
)
.mutation(async ({ input }) => {
const existing = await prisma.promptVariant.findUnique({
@@ -217,7 +217,7 @@ export const promptVariantsRouter = createTRPCRouter({
z.object({
draggedId: z.string(),
droppedId: z.string(),
})
}),
)
.mutation(async ({ input }) => {
const dragged = await prisma.promptVariant.findUnique({
@@ -234,7 +234,7 @@ export const promptVariantsRouter = createTRPCRouter({
if (!dragged || !dropped || dragged.experimentId !== dropped.experimentId) {
throw new Error(
`Prompt Variant with id ${input.draggedId} or ${input.droppedId} does not exist`
`Prompt Variant with id ${input.draggedId} or ${input.droppedId} does not exist`,
);
}
@@ -277,7 +277,7 @@ export const promptVariantsRouter = createTRPCRouter({
sortIndex: index,
},
});
})
}),
);
}),
});

View File

@@ -21,7 +21,7 @@ export const scenariosRouter = createTRPCRouter({
z.object({
experimentId: z.string(),
autogenerate: z.boolean().optional(),
})
}),
)
.mutation(async ({ input }) => {
const maxSortIndex =
@@ -68,7 +68,7 @@ export const scenariosRouter = createTRPCRouter({
z.object({
draggedId: z.string(),
droppedId: z.string(),
})
}),
)
.mutation(async ({ input }) => {
const dragged = await prisma.testScenario.findUnique({
@@ -85,7 +85,7 @@ export const scenariosRouter = createTRPCRouter({
if (!dragged || !dropped || dragged.experimentId !== dropped.experimentId) {
throw new Error(
`Prompt Variant with id ${input.draggedId} or ${input.droppedId} does not exist`
`Prompt Variant with id ${input.draggedId} or ${input.droppedId} does not exist`,
);
}
@@ -128,7 +128,7 @@ export const scenariosRouter = createTRPCRouter({
sortIndex: index,
},
});
})
}),
);
}),
@@ -137,7 +137,7 @@ export const scenariosRouter = createTRPCRouter({
z.object({
id: z.string(),
values: z.record(z.string()),
})
}),
)
.mutation(async ({ input }) => {
const existing = await prisma.testScenario.findUnique({

View File

@@ -76,8 +76,7 @@ const t = initTRPC.context<typeof createTRPCContext>().create({
...shape,
data: {
...shape.data,
zodError:
error.cause instanceof ZodError ? error.cause.flatten() : null,
zodError: error.cause instanceof ZodError ? error.cause.flatten() : null,
},
};
},

View File

@@ -8,8 +8,7 @@ const globalForPrisma = globalThis as unknown as {
export const prisma =
globalForPrisma.prisma ??
new PrismaClient({
log:
env.NODE_ENV === "development" ? ["query", "error", "warn"] : ["error"],
log: env.NODE_ENV === "development" ? ["query", "error", "warn"] : ["error"],
});
if (env.NODE_ENV !== "production") globalForPrisma.prisma = prisma;

View File

@@ -5,7 +5,7 @@ import { type VariableMap, fillTemplate } from "./fillTemplate";
export const evaluateOutput = (
modelOutput: ModelOutput,
scenario: TestScenario,
evaluation: Evaluation
evaluation: Evaluation,
): boolean => {
const output = modelOutput.output as unknown as ChatCompletion;
const message = output?.choices?.[0]?.message;

View File

@@ -20,7 +20,7 @@ export const reevaluateVariant = async (variantId: string) => {
await Promise.all(
evaluations.map(async (evaluation) => {
const passCount = modelOutputs.filter((output) =>
evaluateOutput(output, output.testScenario, evaluation)
evaluateOutput(output, output.testScenario, evaluation),
).length;
const failCount = modelOutputs.length - passCount;
@@ -42,7 +42,7 @@ export const reevaluateVariant = async (variantId: string) => {
failCount,
},
});
})
}),
);
};
@@ -64,7 +64,7 @@ export const reevaluateEvaluation = async (evaluation: Evaluation) => {
variants.map(async (variant) => {
const outputs = modelOutputs.filter((output) => output.promptVariantId === variant.id);
const passCount = outputs.filter((output) =>
evaluateOutput(output, output.testScenario, evaluation)
evaluateOutput(output, output.testScenario, evaluation),
).length;
const failCount = outputs.length - passCount;
@@ -86,6 +86,6 @@ export const reevaluateEvaluation = async (evaluation: Evaluation) => {
failCount,
},
});
})
}),
);
};

View File

@@ -8,17 +8,20 @@ export function fillTemplate(template: string, variables: VariableMap): string {
export function fillTemplateJson<T extends JSONSerializable>(
template: T,
variables: VariableMap
variables: VariableMap,
): T {
if (typeof template === "string") {
return fillTemplate(template, variables) as T;
} else if (Array.isArray(template)) {
return template.map((item) => fillTemplateJson(item, variables)) as T;
} else if (typeof template === "object" && template !== null) {
return Object.keys(template).reduce((acc, key) => {
acc[key] = fillTemplateJson(template[key] as JSONSerializable, variables);
return acc;
}, {} as { [key: string]: JSONSerializable } & T);
return Object.keys(template).reduce(
(acc, key) => {
acc[key] = fillTemplateJson(template[key] as JSONSerializable, variables);
return acc;
},
{} as { [key: string]: JSONSerializable } & T,
);
} else {
return template;
}

View File

@@ -23,7 +23,7 @@ type CompletionResponse = {
export async function getCompletion(
payload: JSONSerializable,
channel?: string
channel?: string,
): Promise<CompletionResponse> {
const modelName = getModelName(payload);
if (!modelName)
@@ -37,7 +37,7 @@ export async function getCompletion(
return getOpenAIChatCompletion(
payload as unknown as CompletionCreateParams,
env.OPENAI_API_KEY,
channel
channel,
);
}
return {
@@ -51,7 +51,7 @@ export async function getCompletion(
export async function getOpenAIChatCompletion(
payload: CompletionCreateParams,
apiKey: string,
channel?: string
channel?: string,
): Promise<CompletionResponse> {
// If functions are enabled, disable streaming so that we get the full response with token counts
if (payload.functions?.length) payload.stream = false;

View File

@@ -2,7 +2,11 @@ import { omit } from "lodash";
import { env } from "~/env.mjs";
import OpenAI from "openai";
import { type ChatCompletion, type ChatCompletionChunk, type CompletionCreateParams } from "openai/resources/chat";
import {
type ChatCompletion,
type ChatCompletionChunk,
type CompletionCreateParams,
} from "openai/resources/chat";
// console.log("creating openai client");
@@ -10,7 +14,7 @@ export const openai = new OpenAI({ apiKey: env.OPENAI_API_KEY });
export const mergeStreamedChunks = (
base: ChatCompletion | null,
chunk: ChatCompletionChunk
chunk: ChatCompletionChunk,
): ChatCompletion => {
if (base === null) {
return mergeStreamedChunks({ ...chunk, choices: [] }, chunk);
@@ -25,11 +29,14 @@ export const mergeStreamedChunks = (
if (choice.delta?.content)
baseChoice.message.content =
(baseChoice.message.content as string ?? "") + (choice.delta.content ?? "");
((baseChoice.message.content as string) ?? "") + (choice.delta.content ?? "");
if (choice.delta?.function_call) {
const fnCall = baseChoice.message.function_call ?? {};
fnCall.name = (fnCall.name as string ?? "") + (choice.delta.function_call.name as string ?? "");
fnCall.arguments = (fnCall.arguments as string ?? "") + (choice.delta.function_call.arguments as string ?? "");
fnCall.name =
((fnCall.name as string) ?? "") + ((choice.delta.function_call.name as string) ?? "");
fnCall.arguments =
((fnCall.arguments as string) ?? "") +
((choice.delta.function_call.arguments as string) ?? "");
}
} else {
choices.push({ ...omit(choice, "delta"), message: { role: "assistant", ...choice.delta } });

View File

@@ -25,7 +25,7 @@ const openAICompletionTokensToDollars: { [key in OpenAIChatModel]: number } = {
export const calculateTokenCost = (
model: SupportedModel | null,
numTokens: number,
isCompletion = false
isCompletion = false,
) => {
if (!model) return 0;
if (model in OpenAIChatModel) {
@@ -37,7 +37,7 @@ export const calculateTokenCost = (
const calculateOpenAIChatTokenCost = (
model: OpenAIChatModel,
numTokens: number,
isCompletion: boolean
isCompletion: boolean,
) => {
const tokensToDollars = isCompletion
? openAICompletionTokensToDollars[model]

View File

@@ -10,7 +10,7 @@ interface GPTTokensMessageItem {
export const countOpenAIChatTokens = (
model: OpenAIChatModel,
messages: ChatCompletion.Choice.Message[]
messages: ChatCompletion.Choice.Message[],
) => {
return new GPTTokens({ model, messages: messages as unknown as GPTTokensMessageItem[] })
.usedTokens;

View File

@@ -6,15 +6,15 @@ dayjs.extend(duration);
dayjs.extend(relativeTime);
export const formatTimePast = (date: Date) => {
const now = dayjs();
const dayDiff = Math.floor(now.diff(date, "day"));
if (dayDiff > 0) return dayjs.duration(-dayDiff, "days").humanize(true);
const hourDiff = Math.floor(now.diff(date, "hour"));
if (hourDiff > 0) return dayjs.duration(-hourDiff, "hours").humanize(true);
const now = dayjs();
const dayDiff = Math.floor(now.diff(date, "day"));
if (dayDiff > 0) return dayjs.duration(-dayDiff, "days").humanize(true);
const minuteDiff = Math.floor(now.diff(date, "minute"));
if (minuteDiff > 0) return dayjs.duration(-minuteDiff, "minutes").humanize(true);
const hourDiff = Math.floor(now.diff(date, "hour"));
if (hourDiff > 0) return dayjs.duration(-hourDiff, "hours").humanize(true);
return 'a few seconds ago'
const minuteDiff = Math.floor(now.diff(date, "minute"));
if (minuteDiff > 0) return dayjs.duration(-minuteDiff, "minutes").humanize(true);
return "a few seconds ago";
};

View File

@@ -6,7 +6,7 @@ export const useExperiment = () => {
const router = useRouter();
const experiment = api.experiments.get.useQuery(
{ id: router.query.id as string },
{ enabled: !!router.query.id }
{ enabled: !!router.query.id },
);
return experiment;
@@ -16,7 +16,7 @@ type AsyncFunction<T extends unknown[], U> = (...args: T) => Promise<U>;
export function useHandledAsyncCallback<T extends unknown[], U>(
callback: AsyncFunction<T, U>,
deps: React.DependencyList
deps: React.DependencyList,
) {
const [loading, setLoading] = useState(0);
const [error, setError] = useState<Error | null>(null);

View File

@@ -21,13 +21,6 @@
"~/*": ["./src/*"]
}
},
"include": [
".eslintrc.cjs",
"next-env.d.ts",
"**/*.ts",
"**/*.tsx",
"**/*.cjs",
"**/*.mjs"
],
"include": [".eslintrc.cjs", "next-env.d.ts", "**/*.ts", "**/*.tsx", "**/*.cjs", "**/*.mjs"],
"exclude": ["node_modules"]
}