diff --git a/src/components/OutputsTable/OutputCell/OutputCell.tsx b/src/components/OutputsTable/OutputCell/OutputCell.tsx index a127c3e..5cf288b 100644 --- a/src/components/OutputsTable/OutputCell/OutputCell.tsx +++ b/src/components/OutputsTable/OutputCell/OutputCell.tsx @@ -81,10 +81,21 @@ export default function OutputCell({ ); - if (!cell && !fetchingOutput) return Error retrieving output; + if (!cell && !fetchingOutput) + return ( + + + Error retrieving output + + ); if (cell && cell.errorMessage) { - return ; + return ( + + + + + ); } const normalizedOutput = modelOutput diff --git a/src/components/RefinePromptModal/RefineOption.tsx b/src/components/RefinePromptModal/RefineAction.tsx similarity index 90% rename from src/components/RefinePromptModal/RefineOption.tsx rename to src/components/RefinePromptModal/RefineAction.tsx index f797ab7..d1e2e7d 100644 --- a/src/components/RefinePromptModal/RefineOption.tsx +++ b/src/components/RefinePromptModal/RefineAction.tsx @@ -1,7 +1,8 @@ import { HStack, Icon, Heading, Text, VStack, GridItem } from "@chakra-ui/react"; import { type IconType } from "react-icons"; +import { BsStars } from "react-icons/bs"; -export const RefineOption = ({ +export const RefineAction = ({ label, icon, desciption, @@ -10,7 +11,7 @@ export const RefineOption = ({ loading, }: { label: string; - icon: IconType; + icon?: IconType; desciption: string; activeLabel: string | undefined; onClick: (label: string) => void; @@ -44,7 +45,7 @@ export const RefineOption = ({ opacity={loading ? 0.5 : 1} > - + {label} diff --git a/src/components/RefinePromptModal/RefinePromptModal.tsx b/src/components/RefinePromptModal/RefinePromptModal.tsx index 93eb386..4104dd3 100644 --- a/src/components/RefinePromptModal/RefinePromptModal.tsx +++ b/src/components/RefinePromptModal/RefinePromptModal.tsx @@ -21,10 +21,10 @@ import { type PromptVariant } from "@prisma/client"; import { useState } from "react"; import CompareFunctions from "./CompareFunctions"; import { CustomInstructionsInput } from "./CustomInstructionsInput"; -import { type RefineOptionInfo, refineOptions } from "./refineOptions"; -import { RefineOption } from "./RefineOption"; +import { RefineAction } from "./RefineAction"; import { isObject, isString } from "lodash-es"; -import { type SupportedProvider } from "~/modelProviders/types"; +import { type RefinementAction, type SupportedProvider } from "~/modelProviders/types"; +import frontendModelProviders from "~/modelProviders/frontendModelProviders"; export const RefinePromptModal = ({ variant, @@ -35,13 +35,14 @@ export const RefinePromptModal = ({ }) => { const utils = api.useContext(); - const providerRefineOptions = refineOptions[variant.modelProvider as SupportedProvider]; + const refinementActions = + frontendModelProviders[variant.modelProvider as SupportedProvider].refinementActions || {}; const { mutateAsync: getModifiedPromptMutateAsync, data: refinedPromptFn } = api.promptVariants.getModifiedPromptFn.useMutation(); const [instructions, setInstructions] = useState(""); - const [activeRefineOptionLabel, setActiveRefineOptionLabel] = useState( + const [activeRefineActionLabel, setActiveRefineActionLabel] = useState( undefined, ); @@ -49,15 +50,15 @@ export const RefinePromptModal = ({ async (label?: string) => { if (!variant.experimentId) return; const updatedInstructions = label - ? (providerRefineOptions[label] as RefineOptionInfo).instructions + ? (refinementActions[label] as RefinementAction).instructions : instructions; - setActiveRefineOptionLabel(label); + setActiveRefineActionLabel(label); await getModifiedPromptMutateAsync({ id: variant.id, instructions: updatedInstructions, }); }, - [getModifiedPromptMutateAsync, onClose, variant, instructions, setActiveRefineOptionLabel], + [getModifiedPromptMutateAsync, onClose, variant, instructions, setActiveRefineActionLabel], ); const replaceVariantMutation = api.promptVariants.replaceVariant.useMutation(); @@ -95,18 +96,18 @@ export const RefinePromptModal = ({ - {Object.keys(providerRefineOptions).length && ( + {Object.keys(refinementActions).length && ( <> - {Object.keys(providerRefineOptions).map((label) => ( - ( + diff --git a/src/components/RefinePromptModal/refineOptions.ts b/src/components/RefinePromptModal/refineOptions.ts deleted file mode 100644 index 3151c31..0000000 --- a/src/components/RefinePromptModal/refineOptions.ts +++ /dev/null @@ -1,287 +0,0 @@ -// Super hacky, but we'll redo the organization when we have more models - -import { type SupportedProvider } from "~/modelProviders/types"; -import { VscJson } from "react-icons/vsc"; -import { TfiThought } from "react-icons/tfi"; -import { type IconType } from "react-icons"; - -export type RefineOptionInfo = { icon: IconType; description: string; instructions: string }; - -export const refineOptions: Record = { - "openai/ChatCompletion": { - "Add chain of thought": { - icon: VscJson, - description: "Asking the model to plan its answer can increase accuracy.", - instructions: `Adding chain of thought means asking the model to think about its answer before it gives it to you. This is useful for getting more accurate answers. Do not add an assistant message. - - This is what a prompt looks like before adding chain of thought: - - definePrompt("openai/ChatCompletion", { - model: "gpt-4", - stream: true, - messages: [ - { - role: "system", - content: \`Evaluate sentiment.\`, - }, - { - role: "user", - content: \`This is the user's message: \${scenario.user_message}. Return "positive" or "negative" or "neutral"\`, - }, - ], - }); - - This is what one looks like after adding chain of thought: - - definePrompt("openai/ChatCompletion", { - model: "gpt-4", - stream: true, - messages: [ - { - role: "system", - content: \`Evaluate sentiment.\`, - }, - { - role: "user", - content: \`This is the user's message: \${scenario.user_message}. Return "positive" or "negative" or "neutral". Explain your answer before you give a score, then return the score on a new line.\`, - }, - ], - }); - - Here's another example: - - Before: - - definePrompt("openai/ChatCompletion", { - model: "gpt-3.5-turbo", - messages: [ - { - role: "user", - content: \`Title: \${scenario.title} - Body: \${scenario.body} - - Need: \${scenario.need} - - Rate likelihood on 1-3 scale.\`, - }, - ], - temperature: 0, - functions: [ - { - name: "score_post", - parameters: { - type: "object", - properties: { - score: { - type: "number", - }, - }, - }, - }, - ], - function_call: { - name: "score_post", - }, - }); - - After: - - definePrompt("openai/ChatCompletion", { - model: "gpt-3.5-turbo", - messages: [ - { - role: "user", - content: \`Title: \${scenario.title} - Body: \${scenario.body} - - Need: \${scenario.need} - - Rate likelihood on 1-3 scale. Provide an explanation, but always provide a score afterward.\`, - }, - ], - temperature: 0, - functions: [ - { - name: "score_post", - parameters: { - type: "object", - properties: { - explanation: { - type: "string", - } - score: { - type: "number", - }, - }, - }, - }, - ], - function_call: { - name: "score_post", - }, - }); - - Add chain of thought to the original prompt.`, - }, - "Convert to function call": { - icon: TfiThought, - description: "Use function calls to get output from the model in a more structured way.", - instructions: `OpenAI functions are a specialized way for an LLM to return output. - - This is what a prompt looks like before adding a function: - - definePrompt("openai/ChatCompletion", { - model: "gpt-4", - stream: true, - messages: [ - { - role: "system", - content: \`Evaluate sentiment.\`, - }, - { - role: "user", - content: \`This is the user's message: \${scenario.user_message}. Return "positive" or "negative" or "neutral"\`, - }, - ], - }); - - This is what one looks like after adding a function: - - definePrompt("openai/ChatCompletion", { - model: "gpt-4", - stream: true, - messages: [ - { - role: "system", - content: "Evaluate sentiment.", - }, - { - role: "user", - content: scenario.user_message, - }, - ], - functions: [ - { - name: "extract_sentiment", - parameters: { - type: "object", // parameters must always be an object with a properties key - properties: { // properties key is required - sentiment: { - type: "string", - description: "one of positive/negative/neutral", - }, - }, - }, - }, - ], - function_call: { - name: "extract_sentiment", - }, - }); - - Here's another example of adding a function: - - Before: - - definePrompt("openai/ChatCompletion", { - model: "gpt-3.5-turbo", - messages: [ - { - role: "user", - content: \`Here is the title and body of a reddit post I am interested in: - - title: \${scenario.title} - body: \${scenario.body} - - On a scale from 1 to 3, how likely is it that the person writing this post has the following need? If you are not sure, make your best guess, or answer 1. - - Need: \${scenario.need} - - Answer one integer between 1 and 3.\`, - }, - ], - temperature: 0, - }); - - After: - - definePrompt("openai/ChatCompletion", { - model: "gpt-3.5-turbo", - messages: [ - { - role: "user", - content: \`Title: \${scenario.title} - Body: \${scenario.body} - - Need: \${scenario.need} - - Rate likelihood on 1-3 scale.\`, - }, - ], - temperature: 0, - functions: [ - { - name: "score_post", - parameters: { - type: "object", - properties: { - score: { - type: "number", - }, - }, - }, - }, - ], - function_call: { - name: "score_post", - }, - }); - - Another example - - Before: - - definePrompt("openai/ChatCompletion", { - model: "gpt-3.5-turbo", - stream: true, - messages: [ - { - role: "system", - content: \`Write 'Start experimenting!' in \${scenario.language}\`, - }, - ], - }); - - After: - - definePrompt("openai/ChatCompletion", { - model: "gpt-3.5-turbo", - messages: [ - { - role: "system", - content: \`Write 'Start experimenting!' in \${scenario.language}\`, - }, - ], - functions: [ - { - name: "write_in_language", - parameters: { - type: "object", - properties: { - text: { - type: "string", - }, - }, - }, - }, - ], - function_call: { - name: "write_in_language", - }, - }); - - Add an OpenAI function that takes one or more nested parameters that match the expected output from this prompt.`, - }, - }, - "replicate/llama2": {}, -}; diff --git a/src/modelProviders/openai-ChatCompletion/frontend.ts b/src/modelProviders/openai-ChatCompletion/frontend.ts index 825c2c1..e8aa08d 100644 --- a/src/modelProviders/openai-ChatCompletion/frontend.ts +++ b/src/modelProviders/openai-ChatCompletion/frontend.ts @@ -2,6 +2,7 @@ import { type JsonValue } from "type-fest"; import { type SupportedModel } from "."; import { type FrontendModelProvider } from "../types"; import { type ChatCompletion } from "openai/resources/chat"; +import { refinementActions } from "./refinementActions"; const frontendModelProvider: FrontendModelProvider = { name: "OpenAI ChatCompletion", @@ -45,6 +46,8 @@ const frontendModelProvider: FrontendModelProvider { const message = output.choices[0]?.message; if (!message) diff --git a/src/modelProviders/openai-ChatCompletion/refinementActions.ts b/src/modelProviders/openai-ChatCompletion/refinementActions.ts new file mode 100644 index 0000000..ee3128d --- /dev/null +++ b/src/modelProviders/openai-ChatCompletion/refinementActions.ts @@ -0,0 +1,279 @@ +import { TfiThought } from "react-icons/tfi"; +import { type RefinementAction } from "../types"; +import { VscJson } from "react-icons/vsc"; + +export const refinementActions: Record = { + "Add chain of thought": { + icon: VscJson, + description: "Asking the model to plan its answer can increase accuracy.", + instructions: `Adding chain of thought means asking the model to think about its answer before it gives it to you. This is useful for getting more accurate answers. Do not add an assistant message. + + This is what a prompt looks like before adding chain of thought: + + definePrompt("openai/ChatCompletion", { + model: "gpt-4", + stream: true, + messages: [ + { + role: "system", + content: \`Evaluate sentiment.\`, + }, + { + role: "user", + content: \`This is the user's message: \${scenario.user_message}. Return "positive" or "negative" or "neutral"\`, + }, + ], + }); + + This is what one looks like after adding chain of thought: + + definePrompt("openai/ChatCompletion", { + model: "gpt-4", + stream: true, + messages: [ + { + role: "system", + content: \`Evaluate sentiment.\`, + }, + { + role: "user", + content: \`This is the user's message: \${scenario.user_message}. Return "positive" or "negative" or "neutral". Explain your answer before you give a score, then return the score on a new line.\`, + }, + ], + }); + + Here's another example: + + Before: + + definePrompt("openai/ChatCompletion", { + model: "gpt-3.5-turbo", + messages: [ + { + role: "user", + content: \`Title: \${scenario.title} + Body: \${scenario.body} + + Need: \${scenario.need} + + Rate likelihood on 1-3 scale.\`, + }, + ], + temperature: 0, + functions: [ + { + name: "score_post", + parameters: { + type: "object", + properties: { + score: { + type: "number", + }, + }, + }, + }, + ], + function_call: { + name: "score_post", + }, + }); + + After: + + definePrompt("openai/ChatCompletion", { + model: "gpt-3.5-turbo", + messages: [ + { + role: "user", + content: \`Title: \${scenario.title} + Body: \${scenario.body} + + Need: \${scenario.need} + + Rate likelihood on 1-3 scale. Provide an explanation, but always provide a score afterward.\`, + }, + ], + temperature: 0, + functions: [ + { + name: "score_post", + parameters: { + type: "object", + properties: { + explanation: { + type: "string", + } + score: { + type: "number", + }, + }, + }, + }, + ], + function_call: { + name: "score_post", + }, + }); + + Add chain of thought to the original prompt.`, + }, + "Convert to function call": { + icon: TfiThought, + description: "Use function calls to get output from the model in a more structured way.", + instructions: `OpenAI functions are a specialized way for an LLM to return output. + + This is what a prompt looks like before adding a function: + + definePrompt("openai/ChatCompletion", { + model: "gpt-4", + stream: true, + messages: [ + { + role: "system", + content: \`Evaluate sentiment.\`, + }, + { + role: "user", + content: \`This is the user's message: \${scenario.user_message}. Return "positive" or "negative" or "neutral"\`, + }, + ], + }); + + This is what one looks like after adding a function: + + definePrompt("openai/ChatCompletion", { + model: "gpt-4", + stream: true, + messages: [ + { + role: "system", + content: "Evaluate sentiment.", + }, + { + role: "user", + content: scenario.user_message, + }, + ], + functions: [ + { + name: "extract_sentiment", + parameters: { + type: "object", // parameters must always be an object with a properties key + properties: { // properties key is required + sentiment: { + type: "string", + description: "one of positive/negative/neutral", + }, + }, + }, + }, + ], + function_call: { + name: "extract_sentiment", + }, + }); + + Here's another example of adding a function: + + Before: + + definePrompt("openai/ChatCompletion", { + model: "gpt-3.5-turbo", + messages: [ + { + role: "user", + content: \`Here is the title and body of a reddit post I am interested in: + + title: \${scenario.title} + body: \${scenario.body} + + On a scale from 1 to 3, how likely is it that the person writing this post has the following need? If you are not sure, make your best guess, or answer 1. + + Need: \${scenario.need} + + Answer one integer between 1 and 3.\`, + }, + ], + temperature: 0, + }); + + After: + + definePrompt("openai/ChatCompletion", { + model: "gpt-3.5-turbo", + messages: [ + { + role: "user", + content: \`Title: \${scenario.title} + Body: \${scenario.body} + + Need: \${scenario.need} + + Rate likelihood on 1-3 scale.\`, + }, + ], + temperature: 0, + functions: [ + { + name: "score_post", + parameters: { + type: "object", + properties: { + score: { + type: "number", + }, + }, + }, + }, + ], + function_call: { + name: "score_post", + }, + }); + + Another example + + Before: + + definePrompt("openai/ChatCompletion", { + model: "gpt-3.5-turbo", + stream: true, + messages: [ + { + role: "system", + content: \`Write 'Start experimenting!' in \${scenario.language}\`, + }, + ], + }); + + After: + + definePrompt("openai/ChatCompletion", { + model: "gpt-3.5-turbo", + messages: [ + { + role: "system", + content: \`Write 'Start experimenting!' in \${scenario.language}\`, + }, + ], + functions: [ + { + name: "write_in_language", + parameters: { + type: "object", + properties: { + text: { + type: "string", + }, + }, + }, + }, + ], + function_call: { + name: "write_in_language", + }, + }); + + Add an OpenAI function that takes one or more nested parameters that match the expected output from this prompt.`, + }, +}; diff --git a/src/modelProviders/replicate-llama2/frontend.ts b/src/modelProviders/replicate-llama2/frontend.ts index dc3a6e0..c3169f7 100644 --- a/src/modelProviders/replicate-llama2/frontend.ts +++ b/src/modelProviders/replicate-llama2/frontend.ts @@ -1,5 +1,6 @@ import { type SupportedModel, type ReplicateLlama2Output } from "."; import { type FrontendModelProvider } from "../types"; +import { refinementActions } from "./refinementActions"; const frontendModelProvider: FrontendModelProvider = { name: "Replicate Llama2", @@ -31,6 +32,8 @@ const frontendModelProvider: FrontendModelProvider { return { type: "text", diff --git a/src/modelProviders/replicate-llama2/index.ts b/src/modelProviders/replicate-llama2/index.ts index 4e7c891..1907a65 100644 --- a/src/modelProviders/replicate-llama2/index.ts +++ b/src/modelProviders/replicate-llama2/index.ts @@ -38,26 +38,42 @@ const modelProvider: ReplicateLlama2Provider = { type: "string", enum: supportedModels as unknown as string[], }, + system_prompt: { + type: "string", + description: + "System prompt to send to Llama v2. This is prepended to the prompt and helps guide system behavior.", + }, prompt: { type: "string", + description: "Prompt to send to Llama v2.", }, stream: { type: "boolean", + description: "Whether to stream output from Llama v2.", }, - max_length: { + max_new_tokens: { type: "number", + description: + "Maximum number of tokens to generate. A word is generally 2-3 tokens (minimum: 1)", }, temperature: { type: "number", + description: + "Adjusts randomness of outputs, greater than 1 is random and 0 is deterministic, 0.75 is a good starting value. (minimum: 0.01; maximum: 5)", }, top_p: { type: "number", + description: + "When decoding text, samples from the top p percentage of most likely tokens; lower to ignore less likely tokens (minimum: 0.01; maximum: 1)", }, repetition_penalty: { type: "number", + description: + "Penalty for repeated words in generated text; 1 is no penalty, values greater than 1 discourage repetition, less than 1 encourage it. (minimum: 0.01; maximum: 5)", }, debug: { type: "boolean", + description: "provide debugging output in logs", }, }, required: ["model", "prompt"], diff --git a/src/modelProviders/replicate-llama2/refinementActions.ts b/src/modelProviders/replicate-llama2/refinementActions.ts new file mode 100644 index 0000000..6df8ac8 --- /dev/null +++ b/src/modelProviders/replicate-llama2/refinementActions.ts @@ -0,0 +1,3 @@ +import { type RefinementAction } from "../types"; + +export const refinementActions: Record = {}; diff --git a/src/modelProviders/types.ts b/src/modelProviders/types.ts index 2bdb8ec..18c5545 100644 --- a/src/modelProviders/types.ts +++ b/src/modelProviders/types.ts @@ -1,4 +1,5 @@ import { type JSONSchema4 } from "json-schema"; +import { type IconType } from "react-icons"; import { type JsonValue } from "type-fest"; import { z } from "zod"; @@ -23,9 +24,12 @@ export const ZodModel = z.object({ export type Model = z.infer; +export type RefinementAction = { icon?: IconType; description: string; instructions: string }; + export type FrontendModelProvider = { name: string; models: Record; + refinementActions?: Record; normalizeOutput: (output: OutputSchema) => NormalizedOutput; }; diff --git a/src/server/utils/deriveNewContructFn.ts b/src/server/utils/deriveNewContructFn.ts index 2149f00..aa164b6 100644 --- a/src/server/utils/deriveNewContructFn.ts +++ b/src/server/utils/deriveNewContructFn.ts @@ -74,6 +74,11 @@ const requestUpdatedPromptFunction = async ( 2, )}`, }); + } else { + messages.push({ + role: "user", + content: `The provider is the same as the old provider: ${originalModel.provider}`, + }); } } if (instructions) {