From 72c70e2a55a12365b0d412ac2922cce687131cd7 Mon Sep 17 00:00:00 2001
From: arcticfly <41524992+arcticfly@users.noreply.github.com>
Date: Tue, 1 Aug 2023 21:03:23 -0700
Subject: [PATCH] Improve conversion to/from Claude (#108)
* Increase min width of prompt variant
* Increase width of custom instructions input
* Start recording API docs
* Provide better instructions for converting to/from Claude
* Fix prettier
---
src/components/OutputsTable/index.tsx | 2 +-
.../RefinePromptModal/RefinePromptModal.tsx | 2 +-
.../anthropic/codegen/input.schema.json | 90 +++----------------
src/modelProviders/anthropic/frontend.ts | 2 +
src/modelProviders/types.ts | 1 +
src/server/utils/deriveNewContructFn.ts | 8 +-
6 files changed, 22 insertions(+), 83 deletions(-)
diff --git a/src/components/OutputsTable/index.tsx b/src/components/OutputsTable/index.tsx
index 636dff4..f014bce 100644
--- a/src/components/OutputsTable/index.tsx
+++ b/src/components/OutputsTable/index.tsx
@@ -35,7 +35,7 @@ export default function OutputsTable({ experimentId }: { experimentId: string |
pb={24}
pl={8}
display="grid"
- gridTemplateColumns={`250px repeat(${variants.data.length}, minmax(300px, 1fr)) auto`}
+ gridTemplateColumns={`250px repeat(${variants.data.length}, minmax(360px, 1fr)) auto`}
sx={{
"> *": {
borderColor: "gray.300",
diff --git a/src/components/RefinePromptModal/RefinePromptModal.tsx b/src/components/RefinePromptModal/RefinePromptModal.tsx
index 49bdf0e..3d9db2f 100644
--- a/src/components/RefinePromptModal/RefinePromptModal.tsx
+++ b/src/components/RefinePromptModal/RefinePromptModal.tsx
@@ -97,7 +97,7 @@ export const RefinePromptModal = ({
-
+
{Object.keys(refinementActions).length && (
<>
diff --git a/src/modelProviders/anthropic/codegen/input.schema.json b/src/modelProviders/anthropic/codegen/input.schema.json
index cc36c56..0d5bbec 100644
--- a/src/modelProviders/anthropic/codegen/input.schema.json
+++ b/src/modelProviders/anthropic/codegen/input.schema.json
@@ -2,7 +2,7 @@
"type": "object",
"properties": {
"model": {
- "description": "The model that will complete your prompt.\nAs we improve Claude, we develop new versions of it that you can query.\nThis parameter controls which version of Claude answers your request.\nRight now we are offering two model families: Claude, and Claude Instant.\nYou can use them by setting model to \"claude-2\" or \"claude-instant-1\", respectively.\nSee models for additional details.\n",
+ "description": "The model that will complete your prompt.",
"x-oaiTypeLabel": "string",
"type": "string",
"enum": [
@@ -13,116 +13,50 @@
]
},
"prompt": {
- "description": "The prompt that you want Claude to complete.\n\nFor proper response generation you will need to format your prompt as follows:\n\\n\\nHuman: ${userQuestion}\\n\\nAssistant:\nSee our comments on prompts for more context.\n",
+ "description": "The prompt that you want Claude to complete.\n\nFor proper response generation you will need to format your prompt as follows:\n\"\\n\\nHuman: all instructions for the assistant\\n\\nAssistant:\". The prompt string should begin with the characters \"Human:\" and end with \"Assistant:\".",
"default": "<|endoftext|>",
- "nullable": true,
- "oneOf": [
- {
- "type": "string",
- "default": "",
- "example": "This is a test."
- },
- {
- "type": "array",
- "items": {
- "type": "string",
- "default": "",
- "example": "This is a test."
- }
- },
- {
- "type": "array",
- "minItems": 1,
- "items": {
- "type": "integer"
- },
- "example": "[1212, 318, 257, 1332, 13]"
- },
- {
- "type": "array",
- "minItems": 1,
- "items": {
- "type": "array",
- "minItems": 1,
- "items": {
- "type": "integer"
- }
- },
- "example": "[[1212, 318, 257, 1332, 13]]"
- }
- ]
+ "example": "\\n\\nHuman: What is the correct translation of ${scenario.input}? I would like a long analysis followed by a short answer.\\n\\nAssistant:",
+ "type": "string"
},
"max_tokens_to_sample": {
"type": "integer",
"minimum": 1,
"default": 256,
- "example": 256,
"nullable": true,
- "description": "The maximum number of tokens to generate before stopping.\n\nNote that our models may stop before reaching this maximum. This parameter only specifies the absolute maximum number of tokens to generate.\n"
+ "description": "The maximum number of tokens to generate before stopping."
},
"temperature": {
"type": "number",
"minimum": 0,
"maximum": 1,
- "default": 1,
- "example": 1,
"nullable": true,
- "description": "Amount of randomness injected into the response.\n\nDefaults to 1. Ranges from 0 to 1. Use temp closer to 0 for analytical / multiple choice, and closer to 1 for creative and generative tasks.\n"
+ "description": "Amount of randomness injected into the response.\n\nDefaults to 1."
},
"top_p": {
"type": "number",
"minimum": 0,
"maximum": 1,
- "default": 1,
- "example": 1,
"nullable": true,
- "description": "Use nucleus sampling.\n\nIn nucleus sampling, we compute the cumulative distribution over all the options \nfor each subsequent token in decreasing probability order and cut it off once \nit reaches a particular probability specified by top_p. You should either alter temperature or top_p, but not both.\n"
+ "description": "Use nucleus sampling.\n\nYou should either alter temperature or top_p, but not both.\n"
},
"top_k": {
"type": "number",
"minimum": 0,
"default": 5,
- "example": 5,
"nullable": true,
- "description": "Only sample from the top K options for each subsequent token.\n\nUsed to remove \"long tail\" low probability responses. Learn more technical details here.\n"
+ "description": "Only sample from the top K options for each subsequent token."
},
"stream": {
- "description": "Whether to incrementally stream the response using server-sent events.\nSee this guide to SSE events for details.type: boolean\n",
+ "description": "Whether to incrementally stream the response using server-sent events.",
+ "type": "boolean",
"nullable": true,
"default": false
},
"stop_sequences": {
- "description": "Sequences that will cause the model to stop generating completion text.\nOur models stop on \"\\n\\nHuman:\", and may include additional built-in stop sequences in the future. By providing the stop_sequences parameter, you may include additional strings that will cause the model to stop generating.\n",
+ "description": "Sequences that will cause the model to stop generating completion text.\nBy default, our models stop on \"\\n\\nHuman:\".",
"default": null,
"nullable": true,
- "oneOf": [
- {
- "type": "string",
- "default": "<|endoftext|>",
- "example": "\n",
- "nullable": true
- },
- {
- "type": "array",
- "minItems": 1,
- "maxItems": 4,
- "items": {
- "type": "string",
- "example": "[\"\\n\"]"
- }
- }
- ]
- },
- "metadata": {
- "type": "object",
- "properties": {
- "user_id": {
- "type": "string",
- "example": "13803d75-b4b5-4c3e-b2a2-6f21399b021b",
- "description": "An external identifier for the user who is associated with the request.\n\nThis should be a uuid, hash value, or other opaque identifier. Anthropic may use this id to help detect abuse. \nDo not include any identifying information such as name, email address, or phone number.\n"
- }
- },
- "description": "An object describing metadata about the request.\n"
+ "type": "array"
}
},
"required": ["model", "prompt", "max_tokens_to_sample"]
diff --git a/src/modelProviders/anthropic/frontend.ts b/src/modelProviders/anthropic/frontend.ts
index 6874648..a28aa83 100644
--- a/src/modelProviders/anthropic/frontend.ts
+++ b/src/modelProviders/anthropic/frontend.ts
@@ -15,6 +15,7 @@ const frontendModelProvider: FrontendModelProvider =
speed: "medium",
provider: "anthropic",
learnMoreUrl: "https://www.anthropic.com/product",
+ apiDocsUrl: "https://docs.anthropic.com/claude/reference/complete_post",
},
"claude-instant-1.1": {
name: "Claude Instant 1.1",
@@ -24,6 +25,7 @@ const frontendModelProvider: FrontendModelProvider =
speed: "fast",
provider: "anthropic",
learnMoreUrl: "https://www.anthropic.com/product",
+ apiDocsUrl: "https://docs.anthropic.com/claude/reference/complete_post",
},
},
diff --git a/src/modelProviders/types.ts b/src/modelProviders/types.ts
index c39b965..842d278 100644
--- a/src/modelProviders/types.ts
+++ b/src/modelProviders/types.ts
@@ -21,6 +21,7 @@ export type Model = {
provider: SupportedProvider;
description?: string;
learnMoreUrl?: string;
+ apiDocsUrl?: string;
};
export type ProviderModel = { provider: z.infer; model: string };
diff --git a/src/server/utils/deriveNewContructFn.ts b/src/server/utils/deriveNewContructFn.ts
index 32b7696..e5dc2af 100644
--- a/src/server/utils/deriveNewContructFn.ts
+++ b/src/server/utils/deriveNewContructFn.ts
@@ -51,7 +51,7 @@ const requestUpdatedPromptFunction = async (
originalModelProvider.inputSchema,
null,
2,
- )}\n\nDo not add any assistant messages. Do not add any extra fields to the schema. Try to keep temperature consistent.`,
+ )}\n\nDo not add any assistant messages.`,
},
{
role: "user",
@@ -66,9 +66,11 @@ const requestUpdatedPromptFunction = async (
if (newModel.provider !== originalModel.provider) {
messages.push({
role: "user",
- content: `The old provider was ${originalModel.provider}. The new provider is ${
+ content: `As seen in the first argument to definePrompt, the old provider endpoint was "${
+ originalModel.provider
+ }". The new provider endpoint is "${
newModel.provider
- }. Here is the schema for the new model:\n---\n${JSON.stringify(
+ }". Here is the schema for the new model:\n---\n${JSON.stringify(
modelProviders[newModel.provider].inputSchema,
null,
2,