Fix typescript hints for max_tokens

This commit is contained in:
Kyle Corbitt
2023-07-21 12:04:58 -07:00
parent af9943eefc
commit 213a00a8e6
2 changed files with 8 additions and 1 deletions

View File

@@ -56,6 +56,14 @@ modelProperty.type = "string";
modelProperty.enum = modelProperty.oneOf[1].enum; modelProperty.enum = modelProperty.oneOf[1].enum;
delete modelProperty["oneOf"]; delete modelProperty["oneOf"];
// The default of "inf" confuses the Typescript generator, so can just remove it
assert(
"max_tokens" in completionRequestSchema.properties &&
isObject(completionRequestSchema.properties.max_tokens) &&
"default" in completionRequestSchema.properties.max_tokens,
);
delete completionRequestSchema.properties.max_tokens["default"];
// Get the directory of the current script // Get the directory of the current script
const currentDirectory = path.dirname(import.meta.url).replace("file://", ""); const currentDirectory = path.dirname(import.meta.url).replace("file://", "");

View File

@@ -150,7 +150,6 @@
}, },
"max_tokens": { "max_tokens": {
"description": "The maximum number of [tokens](/tokenizer) to generate in the chat completion.\n\nThe total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.\n", "description": "The maximum number of [tokens](/tokenizer) to generate in the chat completion.\n\nThe total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.\n",
"default": "inf",
"type": "integer" "type": "integer"
}, },
"presence_penalty": { "presence_penalty": {