Define refinement actions in the model providers (#87)
* Add descriptions of fields in llama 2 input schema * Let GPT-4 know when the provider stays the same * Allow refetching in the event of any errors * Define refinement actions in model providers * Fix prettier
This commit is contained in:
@@ -81,10 +81,21 @@ export default function OutputCell({
|
|||||||
</Center>
|
</Center>
|
||||||
);
|
);
|
||||||
|
|
||||||
if (!cell && !fetchingOutput) return <Text color="gray.500">Error retrieving output</Text>;
|
if (!cell && !fetchingOutput)
|
||||||
|
return (
|
||||||
|
<VStack>
|
||||||
|
<CellOptions refetchingOutput={hardRefetching} refetchOutput={hardRefetch} />
|
||||||
|
<Text color="gray.500">Error retrieving output</Text>
|
||||||
|
</VStack>
|
||||||
|
);
|
||||||
|
|
||||||
if (cell && cell.errorMessage) {
|
if (cell && cell.errorMessage) {
|
||||||
return <ErrorHandler cell={cell} refetchOutput={hardRefetch} />;
|
return (
|
||||||
|
<VStack>
|
||||||
|
<CellOptions refetchingOutput={hardRefetching} refetchOutput={hardRefetch} />
|
||||||
|
<ErrorHandler cell={cell} refetchOutput={hardRefetch} />
|
||||||
|
</VStack>
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
const normalizedOutput = modelOutput
|
const normalizedOutput = modelOutput
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
import { HStack, Icon, Heading, Text, VStack, GridItem } from "@chakra-ui/react";
|
import { HStack, Icon, Heading, Text, VStack, GridItem } from "@chakra-ui/react";
|
||||||
import { type IconType } from "react-icons";
|
import { type IconType } from "react-icons";
|
||||||
|
import { BsStars } from "react-icons/bs";
|
||||||
|
|
||||||
export const RefineOption = ({
|
export const RefineAction = ({
|
||||||
label,
|
label,
|
||||||
icon,
|
icon,
|
||||||
desciption,
|
desciption,
|
||||||
@@ -10,7 +11,7 @@ export const RefineOption = ({
|
|||||||
loading,
|
loading,
|
||||||
}: {
|
}: {
|
||||||
label: string;
|
label: string;
|
||||||
icon: IconType;
|
icon?: IconType;
|
||||||
desciption: string;
|
desciption: string;
|
||||||
activeLabel: string | undefined;
|
activeLabel: string | undefined;
|
||||||
onClick: (label: string) => void;
|
onClick: (label: string) => void;
|
||||||
@@ -44,7 +45,7 @@ export const RefineOption = ({
|
|||||||
opacity={loading ? 0.5 : 1}
|
opacity={loading ? 0.5 : 1}
|
||||||
>
|
>
|
||||||
<HStack cursor="pointer" spacing={6} fontSize="sm" fontWeight="medium" color="gray.500">
|
<HStack cursor="pointer" spacing={6} fontSize="sm" fontWeight="medium" color="gray.500">
|
||||||
<Icon as={icon} boxSize={12} />
|
<Icon as={icon || BsStars} boxSize={12} />
|
||||||
<Heading size="md" fontFamily="inconsolata, monospace">
|
<Heading size="md" fontFamily="inconsolata, monospace">
|
||||||
{label}
|
{label}
|
||||||
</Heading>
|
</Heading>
|
||||||
@@ -21,10 +21,10 @@ import { type PromptVariant } from "@prisma/client";
|
|||||||
import { useState } from "react";
|
import { useState } from "react";
|
||||||
import CompareFunctions from "./CompareFunctions";
|
import CompareFunctions from "./CompareFunctions";
|
||||||
import { CustomInstructionsInput } from "./CustomInstructionsInput";
|
import { CustomInstructionsInput } from "./CustomInstructionsInput";
|
||||||
import { type RefineOptionInfo, refineOptions } from "./refineOptions";
|
import { RefineAction } from "./RefineAction";
|
||||||
import { RefineOption } from "./RefineOption";
|
|
||||||
import { isObject, isString } from "lodash-es";
|
import { isObject, isString } from "lodash-es";
|
||||||
import { type SupportedProvider } from "~/modelProviders/types";
|
import { type RefinementAction, type SupportedProvider } from "~/modelProviders/types";
|
||||||
|
import frontendModelProviders from "~/modelProviders/frontendModelProviders";
|
||||||
|
|
||||||
export const RefinePromptModal = ({
|
export const RefinePromptModal = ({
|
||||||
variant,
|
variant,
|
||||||
@@ -35,13 +35,14 @@ export const RefinePromptModal = ({
|
|||||||
}) => {
|
}) => {
|
||||||
const utils = api.useContext();
|
const utils = api.useContext();
|
||||||
|
|
||||||
const providerRefineOptions = refineOptions[variant.modelProvider as SupportedProvider];
|
const refinementActions =
|
||||||
|
frontendModelProviders[variant.modelProvider as SupportedProvider].refinementActions || {};
|
||||||
|
|
||||||
const { mutateAsync: getModifiedPromptMutateAsync, data: refinedPromptFn } =
|
const { mutateAsync: getModifiedPromptMutateAsync, data: refinedPromptFn } =
|
||||||
api.promptVariants.getModifiedPromptFn.useMutation();
|
api.promptVariants.getModifiedPromptFn.useMutation();
|
||||||
const [instructions, setInstructions] = useState<string>("");
|
const [instructions, setInstructions] = useState<string>("");
|
||||||
|
|
||||||
const [activeRefineOptionLabel, setActiveRefineOptionLabel] = useState<string | undefined>(
|
const [activeRefineActionLabel, setActiveRefineActionLabel] = useState<string | undefined>(
|
||||||
undefined,
|
undefined,
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -49,15 +50,15 @@ export const RefinePromptModal = ({
|
|||||||
async (label?: string) => {
|
async (label?: string) => {
|
||||||
if (!variant.experimentId) return;
|
if (!variant.experimentId) return;
|
||||||
const updatedInstructions = label
|
const updatedInstructions = label
|
||||||
? (providerRefineOptions[label] as RefineOptionInfo).instructions
|
? (refinementActions[label] as RefinementAction).instructions
|
||||||
: instructions;
|
: instructions;
|
||||||
setActiveRefineOptionLabel(label);
|
setActiveRefineActionLabel(label);
|
||||||
await getModifiedPromptMutateAsync({
|
await getModifiedPromptMutateAsync({
|
||||||
id: variant.id,
|
id: variant.id,
|
||||||
instructions: updatedInstructions,
|
instructions: updatedInstructions,
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
[getModifiedPromptMutateAsync, onClose, variant, instructions, setActiveRefineOptionLabel],
|
[getModifiedPromptMutateAsync, onClose, variant, instructions, setActiveRefineActionLabel],
|
||||||
);
|
);
|
||||||
|
|
||||||
const replaceVariantMutation = api.promptVariants.replaceVariant.useMutation();
|
const replaceVariantMutation = api.promptVariants.replaceVariant.useMutation();
|
||||||
@@ -95,18 +96,18 @@ export const RefinePromptModal = ({
|
|||||||
<ModalBody maxW="unset">
|
<ModalBody maxW="unset">
|
||||||
<VStack spacing={8}>
|
<VStack spacing={8}>
|
||||||
<VStack spacing={4}>
|
<VStack spacing={4}>
|
||||||
{Object.keys(providerRefineOptions).length && (
|
{Object.keys(refinementActions).length && (
|
||||||
<>
|
<>
|
||||||
<SimpleGrid columns={{ base: 1, md: 2 }} spacing={8}>
|
<SimpleGrid columns={{ base: 1, md: 2 }} spacing={8}>
|
||||||
{Object.keys(providerRefineOptions).map((label) => (
|
{Object.keys(refinementActions).map((label) => (
|
||||||
<RefineOption
|
<RefineAction
|
||||||
key={label}
|
key={label}
|
||||||
label={label}
|
label={label}
|
||||||
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
|
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
|
||||||
icon={providerRefineOptions[label]!.icon}
|
icon={refinementActions[label]!.icon}
|
||||||
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
|
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
|
||||||
desciption={providerRefineOptions[label]!.description}
|
desciption={refinementActions[label]!.description}
|
||||||
activeLabel={activeRefineOptionLabel}
|
activeLabel={activeRefineActionLabel}
|
||||||
onClick={getModifiedPromptFn}
|
onClick={getModifiedPromptFn}
|
||||||
loading={modificationInProgress}
|
loading={modificationInProgress}
|
||||||
/>
|
/>
|
||||||
|
|||||||
@@ -1,287 +0,0 @@
|
|||||||
// Super hacky, but we'll redo the organization when we have more models
|
|
||||||
|
|
||||||
import { type SupportedProvider } from "~/modelProviders/types";
|
|
||||||
import { VscJson } from "react-icons/vsc";
|
|
||||||
import { TfiThought } from "react-icons/tfi";
|
|
||||||
import { type IconType } from "react-icons";
|
|
||||||
|
|
||||||
export type RefineOptionInfo = { icon: IconType; description: string; instructions: string };
|
|
||||||
|
|
||||||
export const refineOptions: Record<SupportedProvider, { [key: string]: RefineOptionInfo }> = {
|
|
||||||
"openai/ChatCompletion": {
|
|
||||||
"Add chain of thought": {
|
|
||||||
icon: VscJson,
|
|
||||||
description: "Asking the model to plan its answer can increase accuracy.",
|
|
||||||
instructions: `Adding chain of thought means asking the model to think about its answer before it gives it to you. This is useful for getting more accurate answers. Do not add an assistant message.
|
|
||||||
|
|
||||||
This is what a prompt looks like before adding chain of thought:
|
|
||||||
|
|
||||||
definePrompt("openai/ChatCompletion", {
|
|
||||||
model: "gpt-4",
|
|
||||||
stream: true,
|
|
||||||
messages: [
|
|
||||||
{
|
|
||||||
role: "system",
|
|
||||||
content: \`Evaluate sentiment.\`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
role: "user",
|
|
||||||
content: \`This is the user's message: \${scenario.user_message}. Return "positive" or "negative" or "neutral"\`,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
});
|
|
||||||
|
|
||||||
This is what one looks like after adding chain of thought:
|
|
||||||
|
|
||||||
definePrompt("openai/ChatCompletion", {
|
|
||||||
model: "gpt-4",
|
|
||||||
stream: true,
|
|
||||||
messages: [
|
|
||||||
{
|
|
||||||
role: "system",
|
|
||||||
content: \`Evaluate sentiment.\`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
role: "user",
|
|
||||||
content: \`This is the user's message: \${scenario.user_message}. Return "positive" or "negative" or "neutral". Explain your answer before you give a score, then return the score on a new line.\`,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
});
|
|
||||||
|
|
||||||
Here's another example:
|
|
||||||
|
|
||||||
Before:
|
|
||||||
|
|
||||||
definePrompt("openai/ChatCompletion", {
|
|
||||||
model: "gpt-3.5-turbo",
|
|
||||||
messages: [
|
|
||||||
{
|
|
||||||
role: "user",
|
|
||||||
content: \`Title: \${scenario.title}
|
|
||||||
Body: \${scenario.body}
|
|
||||||
|
|
||||||
Need: \${scenario.need}
|
|
||||||
|
|
||||||
Rate likelihood on 1-3 scale.\`,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
temperature: 0,
|
|
||||||
functions: [
|
|
||||||
{
|
|
||||||
name: "score_post",
|
|
||||||
parameters: {
|
|
||||||
type: "object",
|
|
||||||
properties: {
|
|
||||||
score: {
|
|
||||||
type: "number",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
],
|
|
||||||
function_call: {
|
|
||||||
name: "score_post",
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
After:
|
|
||||||
|
|
||||||
definePrompt("openai/ChatCompletion", {
|
|
||||||
model: "gpt-3.5-turbo",
|
|
||||||
messages: [
|
|
||||||
{
|
|
||||||
role: "user",
|
|
||||||
content: \`Title: \${scenario.title}
|
|
||||||
Body: \${scenario.body}
|
|
||||||
|
|
||||||
Need: \${scenario.need}
|
|
||||||
|
|
||||||
Rate likelihood on 1-3 scale. Provide an explanation, but always provide a score afterward.\`,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
temperature: 0,
|
|
||||||
functions: [
|
|
||||||
{
|
|
||||||
name: "score_post",
|
|
||||||
parameters: {
|
|
||||||
type: "object",
|
|
||||||
properties: {
|
|
||||||
explanation: {
|
|
||||||
type: "string",
|
|
||||||
}
|
|
||||||
score: {
|
|
||||||
type: "number",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
],
|
|
||||||
function_call: {
|
|
||||||
name: "score_post",
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
Add chain of thought to the original prompt.`,
|
|
||||||
},
|
|
||||||
"Convert to function call": {
|
|
||||||
icon: TfiThought,
|
|
||||||
description: "Use function calls to get output from the model in a more structured way.",
|
|
||||||
instructions: `OpenAI functions are a specialized way for an LLM to return output.
|
|
||||||
|
|
||||||
This is what a prompt looks like before adding a function:
|
|
||||||
|
|
||||||
definePrompt("openai/ChatCompletion", {
|
|
||||||
model: "gpt-4",
|
|
||||||
stream: true,
|
|
||||||
messages: [
|
|
||||||
{
|
|
||||||
role: "system",
|
|
||||||
content: \`Evaluate sentiment.\`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
role: "user",
|
|
||||||
content: \`This is the user's message: \${scenario.user_message}. Return "positive" or "negative" or "neutral"\`,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
});
|
|
||||||
|
|
||||||
This is what one looks like after adding a function:
|
|
||||||
|
|
||||||
definePrompt("openai/ChatCompletion", {
|
|
||||||
model: "gpt-4",
|
|
||||||
stream: true,
|
|
||||||
messages: [
|
|
||||||
{
|
|
||||||
role: "system",
|
|
||||||
content: "Evaluate sentiment.",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
role: "user",
|
|
||||||
content: scenario.user_message,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
functions: [
|
|
||||||
{
|
|
||||||
name: "extract_sentiment",
|
|
||||||
parameters: {
|
|
||||||
type: "object", // parameters must always be an object with a properties key
|
|
||||||
properties: { // properties key is required
|
|
||||||
sentiment: {
|
|
||||||
type: "string",
|
|
||||||
description: "one of positive/negative/neutral",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
],
|
|
||||||
function_call: {
|
|
||||||
name: "extract_sentiment",
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
Here's another example of adding a function:
|
|
||||||
|
|
||||||
Before:
|
|
||||||
|
|
||||||
definePrompt("openai/ChatCompletion", {
|
|
||||||
model: "gpt-3.5-turbo",
|
|
||||||
messages: [
|
|
||||||
{
|
|
||||||
role: "user",
|
|
||||||
content: \`Here is the title and body of a reddit post I am interested in:
|
|
||||||
|
|
||||||
title: \${scenario.title}
|
|
||||||
body: \${scenario.body}
|
|
||||||
|
|
||||||
On a scale from 1 to 3, how likely is it that the person writing this post has the following need? If you are not sure, make your best guess, or answer 1.
|
|
||||||
|
|
||||||
Need: \${scenario.need}
|
|
||||||
|
|
||||||
Answer one integer between 1 and 3.\`,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
temperature: 0,
|
|
||||||
});
|
|
||||||
|
|
||||||
After:
|
|
||||||
|
|
||||||
definePrompt("openai/ChatCompletion", {
|
|
||||||
model: "gpt-3.5-turbo",
|
|
||||||
messages: [
|
|
||||||
{
|
|
||||||
role: "user",
|
|
||||||
content: \`Title: \${scenario.title}
|
|
||||||
Body: \${scenario.body}
|
|
||||||
|
|
||||||
Need: \${scenario.need}
|
|
||||||
|
|
||||||
Rate likelihood on 1-3 scale.\`,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
temperature: 0,
|
|
||||||
functions: [
|
|
||||||
{
|
|
||||||
name: "score_post",
|
|
||||||
parameters: {
|
|
||||||
type: "object",
|
|
||||||
properties: {
|
|
||||||
score: {
|
|
||||||
type: "number",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
],
|
|
||||||
function_call: {
|
|
||||||
name: "score_post",
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
Another example
|
|
||||||
|
|
||||||
Before:
|
|
||||||
|
|
||||||
definePrompt("openai/ChatCompletion", {
|
|
||||||
model: "gpt-3.5-turbo",
|
|
||||||
stream: true,
|
|
||||||
messages: [
|
|
||||||
{
|
|
||||||
role: "system",
|
|
||||||
content: \`Write 'Start experimenting!' in \${scenario.language}\`,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
});
|
|
||||||
|
|
||||||
After:
|
|
||||||
|
|
||||||
definePrompt("openai/ChatCompletion", {
|
|
||||||
model: "gpt-3.5-turbo",
|
|
||||||
messages: [
|
|
||||||
{
|
|
||||||
role: "system",
|
|
||||||
content: \`Write 'Start experimenting!' in \${scenario.language}\`,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
functions: [
|
|
||||||
{
|
|
||||||
name: "write_in_language",
|
|
||||||
parameters: {
|
|
||||||
type: "object",
|
|
||||||
properties: {
|
|
||||||
text: {
|
|
||||||
type: "string",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
],
|
|
||||||
function_call: {
|
|
||||||
name: "write_in_language",
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
Add an OpenAI function that takes one or more nested parameters that match the expected output from this prompt.`,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"replicate/llama2": {},
|
|
||||||
};
|
|
||||||
@@ -2,6 +2,7 @@ import { type JsonValue } from "type-fest";
|
|||||||
import { type SupportedModel } from ".";
|
import { type SupportedModel } from ".";
|
||||||
import { type FrontendModelProvider } from "../types";
|
import { type FrontendModelProvider } from "../types";
|
||||||
import { type ChatCompletion } from "openai/resources/chat";
|
import { type ChatCompletion } from "openai/resources/chat";
|
||||||
|
import { refinementActions } from "./refinementActions";
|
||||||
|
|
||||||
const frontendModelProvider: FrontendModelProvider<SupportedModel, ChatCompletion> = {
|
const frontendModelProvider: FrontendModelProvider<SupportedModel, ChatCompletion> = {
|
||||||
name: "OpenAI ChatCompletion",
|
name: "OpenAI ChatCompletion",
|
||||||
@@ -45,6 +46,8 @@ const frontendModelProvider: FrontendModelProvider<SupportedModel, ChatCompletio
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
|
refinementActions,
|
||||||
|
|
||||||
normalizeOutput: (output) => {
|
normalizeOutput: (output) => {
|
||||||
const message = output.choices[0]?.message;
|
const message = output.choices[0]?.message;
|
||||||
if (!message)
|
if (!message)
|
||||||
|
|||||||
279
src/modelProviders/openai-ChatCompletion/refinementActions.ts
Normal file
279
src/modelProviders/openai-ChatCompletion/refinementActions.ts
Normal file
@@ -0,0 +1,279 @@
|
|||||||
|
import { TfiThought } from "react-icons/tfi";
|
||||||
|
import { type RefinementAction } from "../types";
|
||||||
|
import { VscJson } from "react-icons/vsc";
|
||||||
|
|
||||||
|
export const refinementActions: Record<string, RefinementAction> = {
|
||||||
|
"Add chain of thought": {
|
||||||
|
icon: VscJson,
|
||||||
|
description: "Asking the model to plan its answer can increase accuracy.",
|
||||||
|
instructions: `Adding chain of thought means asking the model to think about its answer before it gives it to you. This is useful for getting more accurate answers. Do not add an assistant message.
|
||||||
|
|
||||||
|
This is what a prompt looks like before adding chain of thought:
|
||||||
|
|
||||||
|
definePrompt("openai/ChatCompletion", {
|
||||||
|
model: "gpt-4",
|
||||||
|
stream: true,
|
||||||
|
messages: [
|
||||||
|
{
|
||||||
|
role: "system",
|
||||||
|
content: \`Evaluate sentiment.\`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
role: "user",
|
||||||
|
content: \`This is the user's message: \${scenario.user_message}. Return "positive" or "negative" or "neutral"\`,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
});
|
||||||
|
|
||||||
|
This is what one looks like after adding chain of thought:
|
||||||
|
|
||||||
|
definePrompt("openai/ChatCompletion", {
|
||||||
|
model: "gpt-4",
|
||||||
|
stream: true,
|
||||||
|
messages: [
|
||||||
|
{
|
||||||
|
role: "system",
|
||||||
|
content: \`Evaluate sentiment.\`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
role: "user",
|
||||||
|
content: \`This is the user's message: \${scenario.user_message}. Return "positive" or "negative" or "neutral". Explain your answer before you give a score, then return the score on a new line.\`,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
});
|
||||||
|
|
||||||
|
Here's another example:
|
||||||
|
|
||||||
|
Before:
|
||||||
|
|
||||||
|
definePrompt("openai/ChatCompletion", {
|
||||||
|
model: "gpt-3.5-turbo",
|
||||||
|
messages: [
|
||||||
|
{
|
||||||
|
role: "user",
|
||||||
|
content: \`Title: \${scenario.title}
|
||||||
|
Body: \${scenario.body}
|
||||||
|
|
||||||
|
Need: \${scenario.need}
|
||||||
|
|
||||||
|
Rate likelihood on 1-3 scale.\`,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
temperature: 0,
|
||||||
|
functions: [
|
||||||
|
{
|
||||||
|
name: "score_post",
|
||||||
|
parameters: {
|
||||||
|
type: "object",
|
||||||
|
properties: {
|
||||||
|
score: {
|
||||||
|
type: "number",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
function_call: {
|
||||||
|
name: "score_post",
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
After:
|
||||||
|
|
||||||
|
definePrompt("openai/ChatCompletion", {
|
||||||
|
model: "gpt-3.5-turbo",
|
||||||
|
messages: [
|
||||||
|
{
|
||||||
|
role: "user",
|
||||||
|
content: \`Title: \${scenario.title}
|
||||||
|
Body: \${scenario.body}
|
||||||
|
|
||||||
|
Need: \${scenario.need}
|
||||||
|
|
||||||
|
Rate likelihood on 1-3 scale. Provide an explanation, but always provide a score afterward.\`,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
temperature: 0,
|
||||||
|
functions: [
|
||||||
|
{
|
||||||
|
name: "score_post",
|
||||||
|
parameters: {
|
||||||
|
type: "object",
|
||||||
|
properties: {
|
||||||
|
explanation: {
|
||||||
|
type: "string",
|
||||||
|
}
|
||||||
|
score: {
|
||||||
|
type: "number",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
function_call: {
|
||||||
|
name: "score_post",
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
Add chain of thought to the original prompt.`,
|
||||||
|
},
|
||||||
|
"Convert to function call": {
|
||||||
|
icon: TfiThought,
|
||||||
|
description: "Use function calls to get output from the model in a more structured way.",
|
||||||
|
instructions: `OpenAI functions are a specialized way for an LLM to return output.
|
||||||
|
|
||||||
|
This is what a prompt looks like before adding a function:
|
||||||
|
|
||||||
|
definePrompt("openai/ChatCompletion", {
|
||||||
|
model: "gpt-4",
|
||||||
|
stream: true,
|
||||||
|
messages: [
|
||||||
|
{
|
||||||
|
role: "system",
|
||||||
|
content: \`Evaluate sentiment.\`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
role: "user",
|
||||||
|
content: \`This is the user's message: \${scenario.user_message}. Return "positive" or "negative" or "neutral"\`,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
});
|
||||||
|
|
||||||
|
This is what one looks like after adding a function:
|
||||||
|
|
||||||
|
definePrompt("openai/ChatCompletion", {
|
||||||
|
model: "gpt-4",
|
||||||
|
stream: true,
|
||||||
|
messages: [
|
||||||
|
{
|
||||||
|
role: "system",
|
||||||
|
content: "Evaluate sentiment.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
role: "user",
|
||||||
|
content: scenario.user_message,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
functions: [
|
||||||
|
{
|
||||||
|
name: "extract_sentiment",
|
||||||
|
parameters: {
|
||||||
|
type: "object", // parameters must always be an object with a properties key
|
||||||
|
properties: { // properties key is required
|
||||||
|
sentiment: {
|
||||||
|
type: "string",
|
||||||
|
description: "one of positive/negative/neutral",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
function_call: {
|
||||||
|
name: "extract_sentiment",
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
Here's another example of adding a function:
|
||||||
|
|
||||||
|
Before:
|
||||||
|
|
||||||
|
definePrompt("openai/ChatCompletion", {
|
||||||
|
model: "gpt-3.5-turbo",
|
||||||
|
messages: [
|
||||||
|
{
|
||||||
|
role: "user",
|
||||||
|
content: \`Here is the title and body of a reddit post I am interested in:
|
||||||
|
|
||||||
|
title: \${scenario.title}
|
||||||
|
body: \${scenario.body}
|
||||||
|
|
||||||
|
On a scale from 1 to 3, how likely is it that the person writing this post has the following need? If you are not sure, make your best guess, or answer 1.
|
||||||
|
|
||||||
|
Need: \${scenario.need}
|
||||||
|
|
||||||
|
Answer one integer between 1 and 3.\`,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
temperature: 0,
|
||||||
|
});
|
||||||
|
|
||||||
|
After:
|
||||||
|
|
||||||
|
definePrompt("openai/ChatCompletion", {
|
||||||
|
model: "gpt-3.5-turbo",
|
||||||
|
messages: [
|
||||||
|
{
|
||||||
|
role: "user",
|
||||||
|
content: \`Title: \${scenario.title}
|
||||||
|
Body: \${scenario.body}
|
||||||
|
|
||||||
|
Need: \${scenario.need}
|
||||||
|
|
||||||
|
Rate likelihood on 1-3 scale.\`,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
temperature: 0,
|
||||||
|
functions: [
|
||||||
|
{
|
||||||
|
name: "score_post",
|
||||||
|
parameters: {
|
||||||
|
type: "object",
|
||||||
|
properties: {
|
||||||
|
score: {
|
||||||
|
type: "number",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
function_call: {
|
||||||
|
name: "score_post",
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
Another example
|
||||||
|
|
||||||
|
Before:
|
||||||
|
|
||||||
|
definePrompt("openai/ChatCompletion", {
|
||||||
|
model: "gpt-3.5-turbo",
|
||||||
|
stream: true,
|
||||||
|
messages: [
|
||||||
|
{
|
||||||
|
role: "system",
|
||||||
|
content: \`Write 'Start experimenting!' in \${scenario.language}\`,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
});
|
||||||
|
|
||||||
|
After:
|
||||||
|
|
||||||
|
definePrompt("openai/ChatCompletion", {
|
||||||
|
model: "gpt-3.5-turbo",
|
||||||
|
messages: [
|
||||||
|
{
|
||||||
|
role: "system",
|
||||||
|
content: \`Write 'Start experimenting!' in \${scenario.language}\`,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
functions: [
|
||||||
|
{
|
||||||
|
name: "write_in_language",
|
||||||
|
parameters: {
|
||||||
|
type: "object",
|
||||||
|
properties: {
|
||||||
|
text: {
|
||||||
|
type: "string",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
function_call: {
|
||||||
|
name: "write_in_language",
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
Add an OpenAI function that takes one or more nested parameters that match the expected output from this prompt.`,
|
||||||
|
},
|
||||||
|
};
|
||||||
@@ -1,5 +1,6 @@
|
|||||||
import { type SupportedModel, type ReplicateLlama2Output } from ".";
|
import { type SupportedModel, type ReplicateLlama2Output } from ".";
|
||||||
import { type FrontendModelProvider } from "../types";
|
import { type FrontendModelProvider } from "../types";
|
||||||
|
import { refinementActions } from "./refinementActions";
|
||||||
|
|
||||||
const frontendModelProvider: FrontendModelProvider<SupportedModel, ReplicateLlama2Output> = {
|
const frontendModelProvider: FrontendModelProvider<SupportedModel, ReplicateLlama2Output> = {
|
||||||
name: "Replicate Llama2",
|
name: "Replicate Llama2",
|
||||||
@@ -31,6 +32,8 @@ const frontendModelProvider: FrontendModelProvider<SupportedModel, ReplicateLlam
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
|
refinementActions,
|
||||||
|
|
||||||
normalizeOutput: (output) => {
|
normalizeOutput: (output) => {
|
||||||
return {
|
return {
|
||||||
type: "text",
|
type: "text",
|
||||||
|
|||||||
@@ -38,26 +38,42 @@ const modelProvider: ReplicateLlama2Provider = {
|
|||||||
type: "string",
|
type: "string",
|
||||||
enum: supportedModels as unknown as string[],
|
enum: supportedModels as unknown as string[],
|
||||||
},
|
},
|
||||||
|
system_prompt: {
|
||||||
|
type: "string",
|
||||||
|
description:
|
||||||
|
"System prompt to send to Llama v2. This is prepended to the prompt and helps guide system behavior.",
|
||||||
|
},
|
||||||
prompt: {
|
prompt: {
|
||||||
type: "string",
|
type: "string",
|
||||||
|
description: "Prompt to send to Llama v2.",
|
||||||
},
|
},
|
||||||
stream: {
|
stream: {
|
||||||
type: "boolean",
|
type: "boolean",
|
||||||
|
description: "Whether to stream output from Llama v2.",
|
||||||
},
|
},
|
||||||
max_length: {
|
max_new_tokens: {
|
||||||
type: "number",
|
type: "number",
|
||||||
|
description:
|
||||||
|
"Maximum number of tokens to generate. A word is generally 2-3 tokens (minimum: 1)",
|
||||||
},
|
},
|
||||||
temperature: {
|
temperature: {
|
||||||
type: "number",
|
type: "number",
|
||||||
|
description:
|
||||||
|
"Adjusts randomness of outputs, greater than 1 is random and 0 is deterministic, 0.75 is a good starting value. (minimum: 0.01; maximum: 5)",
|
||||||
},
|
},
|
||||||
top_p: {
|
top_p: {
|
||||||
type: "number",
|
type: "number",
|
||||||
|
description:
|
||||||
|
"When decoding text, samples from the top p percentage of most likely tokens; lower to ignore less likely tokens (minimum: 0.01; maximum: 1)",
|
||||||
},
|
},
|
||||||
repetition_penalty: {
|
repetition_penalty: {
|
||||||
type: "number",
|
type: "number",
|
||||||
|
description:
|
||||||
|
"Penalty for repeated words in generated text; 1 is no penalty, values greater than 1 discourage repetition, less than 1 encourage it. (minimum: 0.01; maximum: 5)",
|
||||||
},
|
},
|
||||||
debug: {
|
debug: {
|
||||||
type: "boolean",
|
type: "boolean",
|
||||||
|
description: "provide debugging output in logs",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
required: ["model", "prompt"],
|
required: ["model", "prompt"],
|
||||||
|
|||||||
3
src/modelProviders/replicate-llama2/refinementActions.ts
Normal file
3
src/modelProviders/replicate-llama2/refinementActions.ts
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
import { type RefinementAction } from "../types";
|
||||||
|
|
||||||
|
export const refinementActions: Record<string, RefinementAction> = {};
|
||||||
@@ -1,4 +1,5 @@
|
|||||||
import { type JSONSchema4 } from "json-schema";
|
import { type JSONSchema4 } from "json-schema";
|
||||||
|
import { type IconType } from "react-icons";
|
||||||
import { type JsonValue } from "type-fest";
|
import { type JsonValue } from "type-fest";
|
||||||
import { z } from "zod";
|
import { z } from "zod";
|
||||||
|
|
||||||
@@ -23,9 +24,12 @@ export const ZodModel = z.object({
|
|||||||
|
|
||||||
export type Model = z.infer<typeof ZodModel>;
|
export type Model = z.infer<typeof ZodModel>;
|
||||||
|
|
||||||
|
export type RefinementAction = { icon?: IconType; description: string; instructions: string };
|
||||||
|
|
||||||
export type FrontendModelProvider<SupportedModels extends string, OutputSchema> = {
|
export type FrontendModelProvider<SupportedModels extends string, OutputSchema> = {
|
||||||
name: string;
|
name: string;
|
||||||
models: Record<SupportedModels, Model>;
|
models: Record<SupportedModels, Model>;
|
||||||
|
refinementActions?: Record<string, RefinementAction>;
|
||||||
|
|
||||||
normalizeOutput: (output: OutputSchema) => NormalizedOutput;
|
normalizeOutput: (output: OutputSchema) => NormalizedOutput;
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -74,6 +74,11 @@ const requestUpdatedPromptFunction = async (
|
|||||||
2,
|
2,
|
||||||
)}`,
|
)}`,
|
||||||
});
|
});
|
||||||
|
} else {
|
||||||
|
messages.push({
|
||||||
|
role: "user",
|
||||||
|
content: `The provider is the same as the old provider: ${originalModel.provider}`,
|
||||||
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (instructions) {
|
if (instructions) {
|
||||||
|
|||||||
Reference in New Issue
Block a user