Change up refinement UI (#66)

* Remove unused ScenarioVariantCell fields

* Refine deriveNewConstructFn

* Fix prettier

* Remove migration script

* Add refine modal

* Fix prettier

* Fix diff checker overflow

* Decrease diff height

* Add more context to prompt refining

* Auto-expand prompt when refining
This commit is contained in:
arcticfly
2023-07-19 17:19:45 -07:00
committed by GitHub
parent 7d2166b305
commit e6e2c706c2
3 changed files with 116 additions and 18 deletions

View File

@@ -12,7 +12,6 @@ const CompareFunctions = ({
originalFunction: string;
newFunction?: string;
}) => {
console.log("newFunction", newFunction);
const highlightSyntax = (str: string) => {
let highlighted;
try {
@@ -25,17 +24,17 @@ const CompareFunctions = ({
};
return (
<HStack w="full" spacing={5}>
<VStack w="full" spacing={4} maxH="65vh" fontSize={12} lineHeight={1} overflowY="auto">
<VStack w="full" spacing={4} maxH="50vh" fontSize={12} lineHeight={1} overflowY="auto">
<DiffViewer
oldValue={originalFunction}
newValue={newFunction || originalFunction}
splitView={true}
hideLineNumbers={true}
leftTitle="Original"
rightTitle={newFunction ? "Modified" : "Unmodified"}
disableWordDiff={true}
compareMethod={DiffMethod.CHARS}
renderContent={highlightSyntax}
showDiffOnly={false}
/>
</VStack>
</HStack>

View File

@@ -11,12 +11,16 @@ import {
Text,
Spinner,
HStack,
InputGroup,
Input,
InputRightElement,
Icon,
} from "@chakra-ui/react";
import { IoMdSend } from "react-icons/io";
import { api } from "~/utils/api";
import { useHandledAsyncCallback } from "~/utils/hooks";
import { type PromptVariant } from "@prisma/client";
import { useState } from "react";
import AutoResizeTextArea from "../AutoResizeTextArea";
import CompareFunctions from "./CompareFunctions";
export const RefinePromptModal = ({
@@ -56,12 +60,20 @@ export const RefinePromptModal = ({
<Modal isOpen onClose={onClose} size={{ base: "xl", sm: "2xl", md: "7xl" }}>
<ModalOverlay />
<ModalContent w={1200}>
<ModalHeader>Refine Your Prompt</ModalHeader>
<ModalHeader>Refine with GPT-4</ModalHeader>
<ModalCloseButton />
<ModalBody maxW="unset">
<VStack spacing={8}>
<HStack w="full">
<AutoResizeTextArea
<VStack spacing={16} pt={8}>
<InputGroup
size="md"
w="full"
maxW="600"
boxShadow="0 0 40px 4px rgba(0, 0, 0, 0.1);"
borderRadius={8}
alignItems="center"
colorScheme="orange"
>
<Input
value={instructions}
onChange={(e) => setInstructions(e.target.value)}
onKeyDown={(e) => {
@@ -71,12 +83,41 @@ export const RefinePromptModal = ({
getRefinedPromptFn();
}
}}
placeholder="Use chain of thought"
placeholder="Send instructions"
py={7}
px={4}
colorScheme="orange"
borderColor="gray.300"
borderWidth={1}
_hover={{
borderColor: "gray.300",
}}
_focus={{
borderColor: "gray.300",
}}
/>
<Button onClick={getRefinedPromptFn}>
{refiningInProgress ? <Spinner boxSize={4} /> : <Text>Submit</Text>}
</Button>
</HStack>
<InputRightElement width="8" height="full">
<Button
h="8"
w="8"
minW="unset"
size="sm"
onClick={getRefinedPromptFn}
disabled={!instructions}
variant={instructions ? "solid" : "ghost"}
mr={4}
borderRadius="8"
bgColor={instructions ? "orange.400" : "transparent"}
colorScheme="orange"
>
{refiningInProgress ? (
<Spinner boxSize={4} />
) : (
<Icon as={IoMdSend} color={instructions ? "white" : "gray.500"} boxSize={5} />
)}
</Button>
</InputRightElement>
</InputGroup>
<CompareFunctions
originalFunction={variant.constructFn}
newFunction={refinedPromptFn}
@@ -85,13 +126,14 @@ export const RefinePromptModal = ({
</ModalBody>
<ModalFooter>
<HStack spacing={4}>
<Button onClick={onClose}>Cancel</Button>
<HStack spacing={4} pt={8}>
<Button
colorScheme="blue"
onClick={replaceVariant}
minW={24}
disabled={!refinedPromptFn}
disabled={true}
_disabled={{
bgColor: "blue.500",
}}
>
{replacementInProgress ? <Spinner boxSize={4} /> : <Text>Accept</Text>}
</Button>

View File

@@ -40,6 +40,59 @@ const requestUpdatedPromptFunction = async (
) => {
const originalModel = originalVariant.model as SupportedModel;
let newContructionFn = "";
const usefulTips = `Chain of thought means asking the model to think about its answer before it gives it to you. This is useful for getting more accurate answers.
This is what a prompt looks like without using function calls:
prompt = {
model: "gpt-4",
stream: true,
messages: [
{
role: "system",
content: \`Evaluate sentiment.\`,
},
{
role: "user",
content: \`This is the user's message: \${scenario.user_message}. Return "positive" or "negative" or "neutral"\`,
},
],
};
This is what one looks like using function calls:
prompt = {
model: "gpt-3.5-turbo-0613",
stream: true,
messages: [
{
role: "system",
content: "Evaluate sentiment.",
},
{
role: "user",
content: scenario.user_message,
},
],
functions: [
{
name: "extract_sentiment",
parameters: {
type: "object",
properties: {
sentiment: {
type: "string",
description: "one of positive/negative/neutral",
},
},
},
},
],
function_call: {
name: "extract_sentiment",
},
};
`;
for (let i = 0; i < NUM_RETRIES; i++) {
try {
const messages: CompletionCreateParams.CreateChatCompletionRequestNonStreaming.Message[] = [
@@ -49,7 +102,7 @@ const requestUpdatedPromptFunction = async (
getApiShapeForModel(originalModel),
null,
2,
)}`,
)}\n\nDo not add any assistant messages.`,
},
];
if (newModel) {
@@ -59,6 +112,10 @@ const requestUpdatedPromptFunction = async (
});
}
if (instructions) {
messages.push({
role: "system",
content: `Here is some useful information about prompt engineering: ${usefulTips}`,
});
messages.push({
role: "user",
content: `Follow these instructions: ${instructions}`,