Add promptTokens and completionTokens to model output (#11)
* Default to streaming in config * Add tokens to database * Add NEXT_PUBLIC_SOCKET_URL to .env.example * Disable streaming for functions * Add newline to types
This commit is contained in:
17
src/utils/countTokens.ts
Normal file
17
src/utils/countTokens.ts
Normal file
@@ -0,0 +1,17 @@
|
||||
import { type ChatCompletion } from "openai/resources/chat";
|
||||
import { GPTTokens } from "gpt-tokens";
|
||||
import { type OpenAIChatModels } from "~/server/types";
|
||||
|
||||
interface GPTTokensMessageItem {
|
||||
name?: string;
|
||||
role: "system" | "user" | "assistant";
|
||||
content: string;
|
||||
}
|
||||
|
||||
export const countOpenAIChatTokens = (
|
||||
model: OpenAIChatModels,
|
||||
messages: ChatCompletion.Choice.Message[]
|
||||
) => {
|
||||
return new GPTTokens({ model, messages: messages as unknown as GPTTokensMessageItem[] })
|
||||
.usedTokens;
|
||||
};
|
||||
Reference in New Issue
Block a user