diff --git a/src/modelProviders/openai-ChatCompletion/codegen/codegen.ts b/src/modelProviders/openai-ChatCompletion/codegen/codegen.ts index 927a716..443f7a8 100644 --- a/src/modelProviders/openai-ChatCompletion/codegen/codegen.ts +++ b/src/modelProviders/openai-ChatCompletion/codegen/codegen.ts @@ -56,6 +56,14 @@ modelProperty.type = "string"; modelProperty.enum = modelProperty.oneOf[1].enum; delete modelProperty["oneOf"]; +// The default of "inf" confuses the Typescript generator, so can just remove it +assert( + "max_tokens" in completionRequestSchema.properties && + isObject(completionRequestSchema.properties.max_tokens) && + "default" in completionRequestSchema.properties.max_tokens, +); +delete completionRequestSchema.properties.max_tokens["default"]; + // Get the directory of the current script const currentDirectory = path.dirname(import.meta.url).replace("file://", ""); diff --git a/src/modelProviders/openai-ChatCompletion/codegen/input.schema.json b/src/modelProviders/openai-ChatCompletion/codegen/input.schema.json index 60d039e..9f62df0 100644 --- a/src/modelProviders/openai-ChatCompletion/codegen/input.schema.json +++ b/src/modelProviders/openai-ChatCompletion/codegen/input.schema.json @@ -150,7 +150,6 @@ }, "max_tokens": { "description": "The maximum number of [tokens](/tokenizer) to generate in the chat completion.\n\nThe total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.\n", - "default": "inf", "type": "integer" }, "presence_penalty": {