Improve conversion to/from Claude (#108)

* Increase min width of prompt variant

* Increase width of custom instructions input

* Start recording API docs

* Provide better instructions for converting to/from Claude

* Fix prettier
This commit is contained in:
arcticfly
2023-08-01 21:03:23 -07:00
committed by GitHub
parent 026532f2c2
commit 72c70e2a55
6 changed files with 22 additions and 83 deletions

View File

@@ -35,7 +35,7 @@ export default function OutputsTable({ experimentId }: { experimentId: string |
pb={24} pb={24}
pl={8} pl={8}
display="grid" display="grid"
gridTemplateColumns={`250px repeat(${variants.data.length}, minmax(300px, 1fr)) auto`} gridTemplateColumns={`250px repeat(${variants.data.length}, minmax(360px, 1fr)) auto`}
sx={{ sx={{
"> *": { "> *": {
borderColor: "gray.300", borderColor: "gray.300",

View File

@@ -97,7 +97,7 @@ export const RefinePromptModal = ({
<ModalCloseButton /> <ModalCloseButton />
<ModalBody maxW="unset"> <ModalBody maxW="unset">
<VStack spacing={8}> <VStack spacing={8}>
<VStack spacing={4}> <VStack spacing={4} w="full">
{Object.keys(refinementActions).length && ( {Object.keys(refinementActions).length && (
<> <>
<SimpleGrid columns={{ base: 1, md: 2 }} spacing={8}> <SimpleGrid columns={{ base: 1, md: 2 }} spacing={8}>

View File

@@ -2,7 +2,7 @@
"type": "object", "type": "object",
"properties": { "properties": {
"model": { "model": {
"description": "The model that will complete your prompt.\nAs we improve Claude, we develop new versions of it that you can query.\nThis parameter controls which version of Claude answers your request.\nRight now we are offering two model families: Claude, and Claude Instant.\nYou can use them by setting model to \"claude-2\" or \"claude-instant-1\", respectively.\nSee models for additional details.\n", "description": "The model that will complete your prompt.",
"x-oaiTypeLabel": "string", "x-oaiTypeLabel": "string",
"type": "string", "type": "string",
"enum": [ "enum": [
@@ -13,116 +13,50 @@
] ]
}, },
"prompt": { "prompt": {
"description": "The prompt that you want Claude to complete.\n\nFor proper response generation you will need to format your prompt as follows:\n\\n\\nHuman: ${userQuestion}\\n\\nAssistant:\nSee our comments on prompts for more context.\n", "description": "The prompt that you want Claude to complete.\n\nFor proper response generation you will need to format your prompt as follows:\n\"\\n\\nHuman: all instructions for the assistant\\n\\nAssistant:\". The prompt string should begin with the characters \"Human:\" and end with \"Assistant:\".",
"default": "<|endoftext|>", "default": "<|endoftext|>",
"nullable": true, "example": "\\n\\nHuman: What is the correct translation of ${scenario.input}? I would like a long analysis followed by a short answer.\\n\\nAssistant:",
"oneOf": [ "type": "string"
{
"type": "string",
"default": "",
"example": "This is a test."
},
{
"type": "array",
"items": {
"type": "string",
"default": "",
"example": "This is a test."
}
},
{
"type": "array",
"minItems": 1,
"items": {
"type": "integer"
},
"example": "[1212, 318, 257, 1332, 13]"
},
{
"type": "array",
"minItems": 1,
"items": {
"type": "array",
"minItems": 1,
"items": {
"type": "integer"
}
},
"example": "[[1212, 318, 257, 1332, 13]]"
}
]
}, },
"max_tokens_to_sample": { "max_tokens_to_sample": {
"type": "integer", "type": "integer",
"minimum": 1, "minimum": 1,
"default": 256, "default": 256,
"example": 256,
"nullable": true, "nullable": true,
"description": "The maximum number of tokens to generate before stopping.\n\nNote that our models may stop before reaching this maximum. This parameter only specifies the absolute maximum number of tokens to generate.\n" "description": "The maximum number of tokens to generate before stopping."
}, },
"temperature": { "temperature": {
"type": "number", "type": "number",
"minimum": 0, "minimum": 0,
"maximum": 1, "maximum": 1,
"default": 1,
"example": 1,
"nullable": true, "nullable": true,
"description": "Amount of randomness injected into the response.\n\nDefaults to 1. Ranges from 0 to 1. Use temp closer to 0 for analytical / multiple choice, and closer to 1 for creative and generative tasks.\n" "description": "Amount of randomness injected into the response.\n\nDefaults to 1."
}, },
"top_p": { "top_p": {
"type": "number", "type": "number",
"minimum": 0, "minimum": 0,
"maximum": 1, "maximum": 1,
"default": 1,
"example": 1,
"nullable": true, "nullable": true,
"description": "Use nucleus sampling.\n\nIn nucleus sampling, we compute the cumulative distribution over all the options \nfor each subsequent token in decreasing probability order and cut it off once \nit reaches a particular probability specified by top_p. You should either alter temperature or top_p, but not both.\n" "description": "Use nucleus sampling.\n\nYou should either alter temperature or top_p, but not both.\n"
}, },
"top_k": { "top_k": {
"type": "number", "type": "number",
"minimum": 0, "minimum": 0,
"default": 5, "default": 5,
"example": 5,
"nullable": true, "nullable": true,
"description": "Only sample from the top K options for each subsequent token.\n\nUsed to remove \"long tail\" low probability responses. Learn more technical details here.\n" "description": "Only sample from the top K options for each subsequent token."
}, },
"stream": { "stream": {
"description": "Whether to incrementally stream the response using server-sent events.\nSee this guide to SSE events for details.type: boolean\n", "description": "Whether to incrementally stream the response using server-sent events.",
"type": "boolean",
"nullable": true, "nullable": true,
"default": false "default": false
}, },
"stop_sequences": { "stop_sequences": {
"description": "Sequences that will cause the model to stop generating completion text.\nOur models stop on \"\\n\\nHuman:\", and may include additional built-in stop sequences in the future. By providing the stop_sequences parameter, you may include additional strings that will cause the model to stop generating.\n", "description": "Sequences that will cause the model to stop generating completion text.\nBy default, our models stop on \"\\n\\nHuman:\".",
"default": null, "default": null,
"nullable": true, "nullable": true,
"oneOf": [ "type": "array"
{
"type": "string",
"default": "<|endoftext|>",
"example": "\n",
"nullable": true
},
{
"type": "array",
"minItems": 1,
"maxItems": 4,
"items": {
"type": "string",
"example": "[\"\\n\"]"
}
}
]
},
"metadata": {
"type": "object",
"properties": {
"user_id": {
"type": "string",
"example": "13803d75-b4b5-4c3e-b2a2-6f21399b021b",
"description": "An external identifier for the user who is associated with the request.\n\nThis should be a uuid, hash value, or other opaque identifier. Anthropic may use this id to help detect abuse. \nDo not include any identifying information such as name, email address, or phone number.\n"
}
},
"description": "An object describing metadata about the request.\n"
} }
}, },
"required": ["model", "prompt", "max_tokens_to_sample"] "required": ["model", "prompt", "max_tokens_to_sample"]

View File

@@ -15,6 +15,7 @@ const frontendModelProvider: FrontendModelProvider<SupportedModel, Completion> =
speed: "medium", speed: "medium",
provider: "anthropic", provider: "anthropic",
learnMoreUrl: "https://www.anthropic.com/product", learnMoreUrl: "https://www.anthropic.com/product",
apiDocsUrl: "https://docs.anthropic.com/claude/reference/complete_post",
}, },
"claude-instant-1.1": { "claude-instant-1.1": {
name: "Claude Instant 1.1", name: "Claude Instant 1.1",
@@ -24,6 +25,7 @@ const frontendModelProvider: FrontendModelProvider<SupportedModel, Completion> =
speed: "fast", speed: "fast",
provider: "anthropic", provider: "anthropic",
learnMoreUrl: "https://www.anthropic.com/product", learnMoreUrl: "https://www.anthropic.com/product",
apiDocsUrl: "https://docs.anthropic.com/claude/reference/complete_post",
}, },
}, },

View File

@@ -21,6 +21,7 @@ export type Model = {
provider: SupportedProvider; provider: SupportedProvider;
description?: string; description?: string;
learnMoreUrl?: string; learnMoreUrl?: string;
apiDocsUrl?: string;
}; };
export type ProviderModel = { provider: z.infer<typeof ZodSupportedProvider>; model: string }; export type ProviderModel = { provider: z.infer<typeof ZodSupportedProvider>; model: string };

View File

@@ -51,7 +51,7 @@ const requestUpdatedPromptFunction = async (
originalModelProvider.inputSchema, originalModelProvider.inputSchema,
null, null,
2, 2,
)}\n\nDo not add any assistant messages. Do not add any extra fields to the schema. Try to keep temperature consistent.`, )}\n\nDo not add any assistant messages.`,
}, },
{ {
role: "user", role: "user",
@@ -66,9 +66,11 @@ const requestUpdatedPromptFunction = async (
if (newModel.provider !== originalModel.provider) { if (newModel.provider !== originalModel.provider) {
messages.push({ messages.push({
role: "user", role: "user",
content: `The old provider was ${originalModel.provider}. The new provider is ${ content: `As seen in the first argument to definePrompt, the old provider endpoint was "${
originalModel.provider
}". The new provider endpoint is "${
newModel.provider newModel.provider
}. Here is the schema for the new model:\n---\n${JSON.stringify( }". Here is the schema for the new model:\n---\n${JSON.stringify(
modelProviders[newModel.provider].inputSchema, modelProviders[newModel.provider].inputSchema,
null, null,
2, 2,