Add promptTokens and completionTokens to model output (#11)

* Default to streaming in config

* Add tokens to database

* Add NEXT_PUBLIC_SOCKET_URL to .env.example

* Disable streaming for functions

* Add newline to types
This commit is contained in:
arcticfly
2023-07-06 13:12:59 -07:00
committed by GitHub
parent 6ecb952a68
commit 1ae5612d55
11 changed files with 201 additions and 82 deletions

17
src/utils/countTokens.ts Normal file
View File

@@ -0,0 +1,17 @@
import { type ChatCompletion } from "openai/resources/chat";
import { GPTTokens } from "gpt-tokens";
import { type OpenAIChatModels } from "~/server/types";
interface GPTTokensMessageItem {
name?: string;
role: "system" | "user" | "assistant";
content: string;
}
export const countOpenAIChatTokens = (
model: OpenAIChatModels,
messages: ChatCompletion.Choice.Message[]
) => {
return new GPTTokens({ model, messages: messages as unknown as GPTTokensMessageItem[] })
.usedTokens;
};