Better division of labor between frontend and backend model providers

A bit better thinking on which types go where.
This commit is contained in:
Kyle Corbitt
2023-07-21 11:49:35 -07:00
parent 7e1fbb3767
commit 741128e0f4
7 changed files with 58 additions and 48 deletions

View File

@@ -3,6 +3,7 @@ import { type ModelProvider } from "../types";
import inputSchema from "./codegen/input.schema.json";
import { type ChatCompletion, type CompletionCreateParams } from "openai/resources/chat";
import { getCompletion } from "./getCompletion";
import frontendModelProvider from "./frontend";
const supportedModels = [
"gpt-4-0613",
@@ -11,7 +12,7 @@ const supportedModels = [
"gpt-3.5-turbo-16k-0613",
] as const;
type SupportedModel = (typeof supportedModels)[number];
export type SupportedModel = (typeof supportedModels)[number];
export type OpenaiChatModelProvider = ModelProvider<
SupportedModel,
@@ -20,25 +21,6 @@ export type OpenaiChatModelProvider = ModelProvider<
>;
const modelProvider: OpenaiChatModelProvider = {
name: "OpenAI ChatCompletion",
models: {
"gpt-4-0613": {
name: "GPT-4",
learnMore: "https://openai.com/gpt-4",
},
"gpt-4-32k-0613": {
name: "GPT-4 32k",
learnMore: "https://openai.com/gpt-4",
},
"gpt-3.5-turbo-0613": {
name: "GPT-3.5 Turbo",
learnMore: "https://platform.openai.com/docs/guides/gpt/chat-completions-api",
},
"gpt-3.5-turbo-16k-0613": {
name: "GPT-3.5 Turbo 16k",
learnMore: "https://platform.openai.com/docs/guides/gpt/chat-completions-api",
},
},
getModel: (input) => {
if (supportedModels.includes(input.model as SupportedModel))
return input.model as SupportedModel;
@@ -57,6 +39,7 @@ const modelProvider: OpenaiChatModelProvider = {
inputSchema: inputSchema as JSONSchema4,
shouldStream: (input) => input.stream ?? false,
getCompletion,
...frontendModelProvider,
};
export default modelProvider;