Improve refinement (#69)

* Format construction function on return

* Add more refinement examples

* Treat 503 like 429

* Define prompt as object

* Fix prettier
This commit is contained in:
arcticfly
2023-07-20 13:05:27 -07:00
committed by GitHub
parent 55c077d604
commit 86dc36a656
4 changed files with 184 additions and 14 deletions

View File

@@ -7,14 +7,123 @@ export const refineOptions: Record<
{ description: string; instructions: string } { description: string; instructions: string }
> = { > = {
"Add chain of thought": { "Add chain of thought": {
description: "Asks the model to think about its answer before it gives it to you.", description: "Asking the model to plan its answer can increase accuracy.",
instructions: `Adding chain of thought means asking the model to think about its answer before it gives it to you. This is useful for getting more accurate answers. Do not add an assistant message. instructions: `Adding chain of thought means asking the model to think about its answer before it gives it to you. This is useful for getting more accurate answers. Do not add an assistant message.
Add chain of thought to the original prompt.`, This is what a prompt looks like before adding chain of thought:
prompt = {
model: "gpt-4",
stream: true,
messages: [
{
role: "system",
content: \`Evaluate sentiment.\`,
},
{
role: "user",
content: \`This is the user's message: \${scenario.user_message}. Return "positive" or "negative" or "neutral"\`,
},
],
};
This is what one looks like after adding chain of thought:
prompt = {
model: "gpt-4",
stream: true,
messages: [
{
role: "system",
content: \`Evaluate sentiment.\`,
},
{
role: "user",
content: \`This is the user's message: \${scenario.user_message}. Return "positive" or "negative" or "neutral". Explain your answer before you give a score, then return the score on a new line.\`,
},
],
};
Here's another example:
Before:
prompt = {
model: "gpt-3.5-turbo",
messages: [
{
role: "user",
content: \`Title: \${scenario.title}
Body: \${scenario.body}
Need: \${scenario.need}
Rate likelihood on 1-3 scale.\`,
},
],
temperature: 0,
functions: [
{
name: "score_post",
parameters: {
type: "object",
properties: {
score: {
type: "number",
},
},
},
},
],
function_call: {
name: "score_post",
},
};
After:
prompt = {
model: "gpt-3.5-turbo",
messages: [
{
role: "user",
content: \`Title: \${scenario.title}
Body: \${scenario.body}
Need: \${scenario.need}
Rate likelihood on 1-3 scale. Provide an explanation, but always provide a score afterward.\`,
},
],
temperature: 0,
functions: [
{
name: "score_post",
parameters: {
type: "object",
properties: {
explanation: {
type: "string",
}
score: {
type: "number",
},
},
},
},
],
function_call: {
name: "score_post",
},
};
Add chain of thought to the original prompt.`,
}, },
"Convert to function call": { "Convert to function call": {
description: "Converts the prompt to a function call.", description: "Use function calls to get output from the model in a more structured way.",
instructions: `Function calls are a specific way for an LLM to return output. This is what a prompt looks like without using function calls: instructions: `OpenAI functions are a specialized way for an LLM to return output.
This is what a prompt looks like before adding a function:
prompt = { prompt = {
model: "gpt-4", model: "gpt-4",
@@ -31,10 +140,10 @@ export const refineOptions: Record<
], ],
}; };
This is what one looks like using function calls: This is what one looks like after adding a function:
prompt = { prompt = {
model: "gpt-3.5-turbo-0613", model: "gpt-4",
stream: true, stream: true,
messages: [ messages: [
{ {
@@ -63,8 +172,66 @@ export const refineOptions: Record<
function_call: { function_call: {
name: "extract_sentiment", name: "extract_sentiment",
}, },
}; };
Here's another example of adding a function:
Before:
prompt = {
model: "gpt-3.5-turbo",
messages: [
{
role: "user",
content: \`Here is the title and body of a reddit post I am interested in:
title: \${scenario.title}
body: \${scenario.body}
On a scale from 1 to 3, how likely is it that the person writing this post has the following need? If you are not sure, make your best guess, or answer 1.
Need: \${scenario.need}
Answer one integer between 1 and 3.\`,
},
],
temperature: 0,
};
After:
prompt = {
model: "gpt-3.5-turbo",
messages: [
{
role: "user",
content: \`Title: \${scenario.title}
Body: \${scenario.body}
Need: \${scenario.need}
Rate likelihood on 1-3 scale.\`,
},
],
temperature: 0,
functions: [
{
name: "score_post",
parameters: {
type: "object",
properties: {
score: {
type: "number",
},
},
},
},
],
function_call: {
name: "score_post",
},
};
Add a function call that takes one or more nested parameters.`, Add an OpenAI function that takes one or more nested parameters that match the expected output from this prompt.`,
}, },
}; };

View File

@@ -33,7 +33,10 @@ const getCompletionWithRetries = async (
payload as unknown as CompletionCreateParams, payload as unknown as CompletionCreateParams,
channel, channel,
); );
if (modelResponse.statusCode !== 429 || i === MAX_AUTO_RETRIES - 1) { if (
(modelResponse.statusCode !== 429 && modelResponse.statusCode !== 503) ||
i === MAX_AUTO_RETRIES - 1
) {
return modelResponse; return modelResponse;
} }
const delay = calculateDelay(i); const delay = calculateDelay(i);

View File

@@ -10,7 +10,7 @@ export async function constructPrompt(
): Promise<JSONSerializable> { ): Promise<JSONSerializable> {
const code = ` const code = `
const scenario = ${JSON.stringify(scenario ?? {}, null, 2)}; const scenario = ${JSON.stringify(scenario ?? {}, null, 2)};
let prompt let prompt = {};
${variant.constructFn} ${variant.constructFn}

View File

@@ -6,6 +6,7 @@ import { openai } from "./openai";
import { getApiShapeForModel } from "./getTypesForModel"; import { getApiShapeForModel } from "./getTypesForModel";
import { isObject } from "lodash-es"; import { isObject } from "lodash-es";
import { type CompletionCreateParams } from "openai/resources/chat/completions"; import { type CompletionCreateParams } from "openai/resources/chat/completions";
import formatPromptConstructor from "~/utils/formatPromptConstructor";
const isolate = new ivm.Isolate({ memoryLimit: 128 }); const isolate = new ivm.Isolate({ memoryLimit: 128 });
@@ -65,9 +66,8 @@ const requestUpdatedPromptFunction = async (
}); });
} }
messages.push({ messages.push({
role: "user", role: "system",
content: content: "The prompt variable has already been declared, so do not declare it again.",
"The prompt variable has already been declared, so do not declare it again. Rewrite the entire prompt constructor function.",
}); });
const completion = await openai.chat.completions.create({ const completion = await openai.chat.completions.create({
model: "gpt-4", model: "gpt-4",
@@ -111,7 +111,7 @@ const requestUpdatedPromptFunction = async (
const args = await contructPromptFunctionArgs.copy(); // Get the actual value from the isolate const args = await contructPromptFunctionArgs.copy(); // Get the actual value from the isolate
if (args && isObject(args) && "new_prompt_function" in args) { if (args && isObject(args) && "new_prompt_function" in args) {
newContructionFn = args.new_prompt_function as string; newContructionFn = await formatPromptConstructor(args.new_prompt_function as string);
break; break;
} }
} catch (e) { } catch (e) {