Await completions (#153)

* Continue polling stats while waiting for completions to finish

* Clarify convert to function call instructions
This commit is contained in:
arcticfly
2023-08-14 13:03:48 -07:00
committed by GitHub
parent 99f305483b
commit e649f42c9c
4 changed files with 30 additions and 20 deletions

View File

@@ -21,14 +21,18 @@ export default function VariantStats(props: { variant: PromptVariant }) {
outputTokens: 0, outputTokens: 0,
scenarioCount: 0, scenarioCount: 0,
outputCount: 0, outputCount: 0,
awaitingCompletions: false,
awaitingEvals: false, awaitingEvals: false,
}, },
refetchInterval, refetchInterval,
}, },
); );
// Poll every two seconds while we are waiting for LLM retrievals to finish // Poll every five seconds while we are waiting for LLM retrievals to finish
useEffect(() => setRefetchInterval(data.awaitingEvals ? 5000 : 0), [data.awaitingEvals]); useEffect(
() => setRefetchInterval(data.awaitingCompletions || data.awaitingEvals ? 5000 : 0),
[data.awaitingCompletions, data.awaitingEvals],
);
const [passColor, neutralColor, failColor] = useToken("colors", [ const [passColor, neutralColor, failColor] = useToken("colors", [
"green.500", "green.500",

View File

@@ -120,9 +120,9 @@ export const refinementActions: Record<string, RefinementAction> = {
"Convert to function call": { "Convert to function call": {
icon: TfiThought, icon: TfiThought,
description: "Use function calls to get output from the model in a more structured way.", description: "Use function calls to get output from the model in a more structured way.",
instructions: `OpenAI functions are a specialized way for an LLM to return output. instructions: `OpenAI functions are a specialized way for an LLM to return its final output.
This is what a prompt looks like before adding a function: Example 1 before:
definePrompt("openai/ChatCompletion", { definePrompt("openai/ChatCompletion", {
model: "gpt-4", model: "gpt-4",
@@ -139,7 +139,7 @@ export const refinementActions: Record<string, RefinementAction> = {
], ],
}); });
This is what one looks like after adding a function: Example 1 after:
definePrompt("openai/ChatCompletion", { definePrompt("openai/ChatCompletion", {
model: "gpt-4", model: "gpt-4",
@@ -156,7 +156,7 @@ export const refinementActions: Record<string, RefinementAction> = {
], ],
functions: [ functions: [
{ {
name: "extract_sentiment", name: "log_extracted_sentiment",
parameters: { parameters: {
type: "object", // parameters must always be an object with a properties key type: "object", // parameters must always be an object with a properties key
properties: { // properties key is required properties: { // properties key is required
@@ -169,13 +169,13 @@ export const refinementActions: Record<string, RefinementAction> = {
}, },
], ],
function_call: { function_call: {
name: "extract_sentiment", name: "log_extracted_sentiment",
}, },
}); });
Here's another example of adding a function: =========
Before: Example 2 before:
definePrompt("openai/ChatCompletion", { definePrompt("openai/ChatCompletion", {
model: "gpt-3.5-turbo", model: "gpt-3.5-turbo",
@@ -197,7 +197,7 @@ export const refinementActions: Record<string, RefinementAction> = {
temperature: 0, temperature: 0,
}); });
After: Example 2 after:
definePrompt("openai/ChatCompletion", { definePrompt("openai/ChatCompletion", {
model: "gpt-3.5-turbo", model: "gpt-3.5-turbo",
@@ -215,7 +215,7 @@ export const refinementActions: Record<string, RefinementAction> = {
temperature: 0, temperature: 0,
functions: [ functions: [
{ {
name: "score_post", name: "log_post_score",
parameters: { parameters: {
type: "object", type: "object",
properties: { properties: {
@@ -227,13 +227,13 @@ export const refinementActions: Record<string, RefinementAction> = {
}, },
], ],
function_call: { function_call: {
name: "score_post", name: "log_post_score",
}, },
}); });
Another example =========
Before: Example 3 before:
definePrompt("openai/ChatCompletion", { definePrompt("openai/ChatCompletion", {
model: "gpt-3.5-turbo", model: "gpt-3.5-turbo",
@@ -246,7 +246,7 @@ export const refinementActions: Record<string, RefinementAction> = {
], ],
}); });
After: Example 3 after:
definePrompt("openai/ChatCompletion", { definePrompt("openai/ChatCompletion", {
model: "gpt-3.5-turbo", model: "gpt-3.5-turbo",
@@ -258,22 +258,25 @@ export const refinementActions: Record<string, RefinementAction> = {
], ],
functions: [ functions: [
{ {
name: "write_in_language", name: "log_translated_text",
parameters: { parameters: {
type: "object", type: "object",
properties: { properties: {
text: { translated_text: {
type: "string", type: "string",
description: "The text, written in the language specified in the prompt",
}, },
}, },
}, },
}, },
], ],
function_call: { function_call: {
name: "write_in_language", name: "log_translated_text",
}, },
}); });
=========
Add an OpenAI function that takes one or more nested parameters that match the expected output from this prompt.`, Add an OpenAI function that takes one or more nested parameters that match the expected output from this prompt.`,
}, },
}; };

View File

@@ -131,6 +131,8 @@ export const promptVariantsRouter = createTRPCRouter({
const inputTokens = overallTokens._sum?.inputTokens ?? 0; const inputTokens = overallTokens._sum?.inputTokens ?? 0;
const outputTokens = overallTokens._sum?.outputTokens ?? 0; const outputTokens = overallTokens._sum?.outputTokens ?? 0;
const awaitingCompletions = outputCount < scenarioCount;
const awaitingEvals = !!evalResults.find( const awaitingEvals = !!evalResults.find(
(result) => result.totalCount < scenarioCount * evals.length, (result) => result.totalCount < scenarioCount * evals.length,
); );
@@ -142,6 +144,7 @@ export const promptVariantsRouter = createTRPCRouter({
overallCost: overallTokens._sum?.cost ?? 0, overallCost: overallTokens._sum?.cost ?? 0,
scenarioCount, scenarioCount,
outputCount, outputCount,
awaitingCompletions,
awaitingEvals, awaitingEvals,
}; };
}), }),

View File

@@ -51,7 +51,7 @@ const requestUpdatedPromptFunction = async (
originalModelProvider.inputSchema, originalModelProvider.inputSchema,
null, null,
2, 2,
)}\n\nDo not add any assistant messages.`, )}`,
}, },
{ {
role: "user", role: "user",