Files
OpenPipe-llm/src/utils/countTokens.ts
arcticfly 1ae5612d55 Add promptTokens and completionTokens to model output (#11)
* Default to streaming in config

* Add tokens to database

* Add NEXT_PUBLIC_SOCKET_URL to .env.example

* Disable streaming for functions

* Add newline to types
2023-07-06 13:12:59 -07:00

18 lines
499 B
TypeScript

import { type ChatCompletion } from "openai/resources/chat";
import { GPTTokens } from "gpt-tokens";
import { type OpenAIChatModels } from "~/server/types";
interface GPTTokensMessageItem {
name?: string;
role: "system" | "user" | "assistant";
content: string;
}
export const countOpenAIChatTokens = (
model: OpenAIChatModels,
messages: ChatCompletion.Choice.Message[]
) => {
return new GPTTokens({ model, messages: messages as unknown as GPTTokensMessageItem[] })
.usedTokens;
};