Add promptTokens and completionTokens to model output (#11)

* Default to streaming in config

* Add tokens to database

* Add NEXT_PUBLIC_SOCKET_URL to .env.example

* Disable streaming for functions

* Add newline to types
This commit is contained in:
arcticfly
2023-07-06 13:12:59 -07:00
committed by GitHub
parent 6ecb952a68
commit 1ae5612d55
11 changed files with 201 additions and 82 deletions

View File

@@ -44,6 +44,7 @@ export const experimentsRouter = createTRPCRouter({
sortIndex: 0,
config: {
model: "gpt-3.5-turbo",
stream: true,
messages: [
{
role: "system",