65 lines
2.2 KiB
JSON
65 lines
2.2 KiB
JSON
{
|
|
"type": "object",
|
|
"properties": {
|
|
"model": {
|
|
"description": "The model that will complete your prompt.",
|
|
"x-oaiTypeLabel": "string",
|
|
"type": "string",
|
|
"enum": [
|
|
"claude-2",
|
|
"claude-2.0",
|
|
"claude-instant-1",
|
|
"claude-instant-1.1",
|
|
"claude-instant-1.2"
|
|
]
|
|
},
|
|
"prompt": {
|
|
"description": "The prompt that you want Claude to complete.\n\nFor proper response generation you will need to format your prompt as follows:\n\"\\n\\nHuman: all instructions for the assistant\\n\\nAssistant:\". The prompt string should begin with the characters \"Human:\" and end with \"Assistant:\".",
|
|
"default": "<|endoftext|>",
|
|
"example": "\\n\\nHuman: What is the correct translation of ${scenario.input}? I would like a long analysis followed by a short answer.\\n\\nAssistant:",
|
|
"type": "string"
|
|
},
|
|
"max_tokens_to_sample": {
|
|
"type": "integer",
|
|
"minimum": 1,
|
|
"default": 256,
|
|
"nullable": true,
|
|
"description": "The maximum number of tokens to generate before stopping."
|
|
},
|
|
"temperature": {
|
|
"type": "number",
|
|
"minimum": 0,
|
|
"maximum": 1,
|
|
"nullable": true,
|
|
"description": "Amount of randomness injected into the response.\n\nDefaults to 1."
|
|
},
|
|
"top_p": {
|
|
"type": "number",
|
|
"minimum": 0,
|
|
"maximum": 1,
|
|
"nullable": true,
|
|
"description": "Use nucleus sampling.\n\nYou should either alter temperature or top_p, but not both.\n"
|
|
},
|
|
"top_k": {
|
|
"type": "number",
|
|
"minimum": 0,
|
|
"default": 5,
|
|
"nullable": true,
|
|
"description": "Only sample from the top K options for each subsequent token."
|
|
},
|
|
"stream": {
|
|
"description": "Whether to incrementally stream the response using server-sent events.",
|
|
"type": "boolean",
|
|
"nullable": true,
|
|
"default": false
|
|
},
|
|
"stop_sequences": {
|
|
"description": "Sequences that will cause the model to stop generating completion text.\nBy default, our models stop on \"\\n\\nHuman:\".",
|
|
"default": null,
|
|
"nullable": true,
|
|
"type": "array"
|
|
}
|
|
},
|
|
"required": ["model", "prompt", "max_tokens_to_sample"]
|
|
}
|