Files
OpenPipe-llm/src/modelProviders/anthropic/codegen/input.schema.json
arcticfly 72c70e2a55 Improve conversion to/from Claude (#108)
* Increase min width of prompt variant

* Increase width of custom instructions input

* Start recording API docs

* Provide better instructions for converting to/from Claude

* Fix prettier
2023-08-01 21:03:23 -07:00

64 lines
2.1 KiB
JSON

{
"type": "object",
"properties": {
"model": {
"description": "The model that will complete your prompt.",
"x-oaiTypeLabel": "string",
"type": "string",
"enum": [
"claude-2",
"claude-2.0",
"claude-instant-1",
"claude-instant-1.1"
]
},
"prompt": {
"description": "The prompt that you want Claude to complete.\n\nFor proper response generation you will need to format your prompt as follows:\n\"\\n\\nHuman: all instructions for the assistant\\n\\nAssistant:\". The prompt string should begin with the characters \"Human:\" and end with \"Assistant:\".",
"default": "<|endoftext|>",
"example": "\\n\\nHuman: What is the correct translation of ${scenario.input}? I would like a long analysis followed by a short answer.\\n\\nAssistant:",
"type": "string"
},
"max_tokens_to_sample": {
"type": "integer",
"minimum": 1,
"default": 256,
"nullable": true,
"description": "The maximum number of tokens to generate before stopping."
},
"temperature": {
"type": "number",
"minimum": 0,
"maximum": 1,
"nullable": true,
"description": "Amount of randomness injected into the response.\n\nDefaults to 1."
},
"top_p": {
"type": "number",
"minimum": 0,
"maximum": 1,
"nullable": true,
"description": "Use nucleus sampling.\n\nYou should either alter temperature or top_p, but not both.\n"
},
"top_k": {
"type": "number",
"minimum": 0,
"default": 5,
"nullable": true,
"description": "Only sample from the top K options for each subsequent token."
},
"stream": {
"description": "Whether to incrementally stream the response using server-sent events.",
"type": "boolean",
"nullable": true,
"default": false
},
"stop_sequences": {
"description": "Sequences that will cause the model to stop generating completion text.\nBy default, our models stop on \"\\n\\nHuman:\".",
"default": null,
"nullable": true,
"type": "array"
}
},
"required": ["model", "prompt", "max_tokens_to_sample"]
}