From e649f42c9cdbe94530aa9a5fdca212ab61b62435 Mon Sep 17 00:00:00 2001 From: arcticfly <41524992+arcticfly@users.noreply.github.com> Date: Mon, 14 Aug 2023 13:03:48 -0700 Subject: [PATCH] Await completions (#153) * Continue polling stats while waiting for completions to finish * Clarify convert to function call instructions --- .../components/OutputsTable/VariantStats.tsx | 8 +++- .../refinementActions.ts | 37 ++++++++++--------- .../api/routers/promptVariants.router.ts | 3 ++ app/src/server/utils/deriveNewContructFn.ts | 2 +- 4 files changed, 30 insertions(+), 20 deletions(-) diff --git a/app/src/components/OutputsTable/VariantStats.tsx b/app/src/components/OutputsTable/VariantStats.tsx index 8c81fdb..fd7a219 100644 --- a/app/src/components/OutputsTable/VariantStats.tsx +++ b/app/src/components/OutputsTable/VariantStats.tsx @@ -21,14 +21,18 @@ export default function VariantStats(props: { variant: PromptVariant }) { outputTokens: 0, scenarioCount: 0, outputCount: 0, + awaitingCompletions: false, awaitingEvals: false, }, refetchInterval, }, ); - // Poll every two seconds while we are waiting for LLM retrievals to finish - useEffect(() => setRefetchInterval(data.awaitingEvals ? 5000 : 0), [data.awaitingEvals]); + // Poll every five seconds while we are waiting for LLM retrievals to finish + useEffect( + () => setRefetchInterval(data.awaitingCompletions || data.awaitingEvals ? 5000 : 0), + [data.awaitingCompletions, data.awaitingEvals], + ); const [passColor, neutralColor, failColor] = useToken("colors", [ "green.500", diff --git a/app/src/modelProviders/openai-ChatCompletion/refinementActions.ts b/app/src/modelProviders/openai-ChatCompletion/refinementActions.ts index ee3128d..9081730 100644 --- a/app/src/modelProviders/openai-ChatCompletion/refinementActions.ts +++ b/app/src/modelProviders/openai-ChatCompletion/refinementActions.ts @@ -120,9 +120,9 @@ export const refinementActions: Record = { "Convert to function call": { icon: TfiThought, description: "Use function calls to get output from the model in a more structured way.", - instructions: `OpenAI functions are a specialized way for an LLM to return output. + instructions: `OpenAI functions are a specialized way for an LLM to return its final output. - This is what a prompt looks like before adding a function: + Example 1 before: definePrompt("openai/ChatCompletion", { model: "gpt-4", @@ -139,7 +139,7 @@ export const refinementActions: Record = { ], }); - This is what one looks like after adding a function: + Example 1 after: definePrompt("openai/ChatCompletion", { model: "gpt-4", @@ -156,7 +156,7 @@ export const refinementActions: Record = { ], functions: [ { - name: "extract_sentiment", + name: "log_extracted_sentiment", parameters: { type: "object", // parameters must always be an object with a properties key properties: { // properties key is required @@ -169,13 +169,13 @@ export const refinementActions: Record = { }, ], function_call: { - name: "extract_sentiment", + name: "log_extracted_sentiment", }, }); - Here's another example of adding a function: - - Before: + ========= + + Example 2 before: definePrompt("openai/ChatCompletion", { model: "gpt-3.5-turbo", @@ -197,7 +197,7 @@ export const refinementActions: Record = { temperature: 0, }); - After: + Example 2 after: definePrompt("openai/ChatCompletion", { model: "gpt-3.5-turbo", @@ -215,7 +215,7 @@ export const refinementActions: Record = { temperature: 0, functions: [ { - name: "score_post", + name: "log_post_score", parameters: { type: "object", properties: { @@ -227,13 +227,13 @@ export const refinementActions: Record = { }, ], function_call: { - name: "score_post", + name: "log_post_score", }, }); - Another example + ========= - Before: + Example 3 before: definePrompt("openai/ChatCompletion", { model: "gpt-3.5-turbo", @@ -246,7 +246,7 @@ export const refinementActions: Record = { ], }); - After: + Example 3 after: definePrompt("openai/ChatCompletion", { model: "gpt-3.5-turbo", @@ -258,21 +258,24 @@ export const refinementActions: Record = { ], functions: [ { - name: "write_in_language", + name: "log_translated_text", parameters: { type: "object", properties: { - text: { + translated_text: { type: "string", + description: "The text, written in the language specified in the prompt", }, }, }, }, ], function_call: { - name: "write_in_language", + name: "log_translated_text", }, }); + + ========= Add an OpenAI function that takes one or more nested parameters that match the expected output from this prompt.`, }, diff --git a/app/src/server/api/routers/promptVariants.router.ts b/app/src/server/api/routers/promptVariants.router.ts index b385dde..71c1e0d 100644 --- a/app/src/server/api/routers/promptVariants.router.ts +++ b/app/src/server/api/routers/promptVariants.router.ts @@ -131,6 +131,8 @@ export const promptVariantsRouter = createTRPCRouter({ const inputTokens = overallTokens._sum?.inputTokens ?? 0; const outputTokens = overallTokens._sum?.outputTokens ?? 0; + const awaitingCompletions = outputCount < scenarioCount; + const awaitingEvals = !!evalResults.find( (result) => result.totalCount < scenarioCount * evals.length, ); @@ -142,6 +144,7 @@ export const promptVariantsRouter = createTRPCRouter({ overallCost: overallTokens._sum?.cost ?? 0, scenarioCount, outputCount, + awaitingCompletions, awaitingEvals, }; }), diff --git a/app/src/server/utils/deriveNewContructFn.ts b/app/src/server/utils/deriveNewContructFn.ts index 7439c9a..2d2717d 100644 --- a/app/src/server/utils/deriveNewContructFn.ts +++ b/app/src/server/utils/deriveNewContructFn.ts @@ -51,7 +51,7 @@ const requestUpdatedPromptFunction = async ( originalModelProvider.inputSchema, null, 2, - )}\n\nDo not add any assistant messages.`, + )}`, }, { role: "user",