Allow user to create a version of their current prompt with a new model (#58)
* Add dropdown header for model switching * Allow variant duplication * Fix prettier * Use env variable to restrict prisma logs * Fix env.mjs * Remove unnecessary scroll bar from function call output * Properly record when 404 error occurs in queryLLM task * Add SelectedModelInfo in SelectModelModal * Add react-select * Calculate new prompt after switching model * Send newly selected model with creation request * Get new prompt construction function back from GPT-4 * Fix prettier * Fix prettier
This commit is contained in:
107
src/server/utils/deriveNewContructFn.ts
Normal file
107
src/server/utils/deriveNewContructFn.ts
Normal file
@@ -0,0 +1,107 @@
|
||||
import { type PromptVariant } from "@prisma/client";
|
||||
import { type SupportedModel } from "../types";
|
||||
import ivm from "isolated-vm";
|
||||
import dedent from "dedent";
|
||||
import { openai } from "./openai";
|
||||
import { getApiShapeForModel } from "./getTypesForModel";
|
||||
import { isObject } from "lodash-es";
|
||||
|
||||
const isolate = new ivm.Isolate({ memoryLimit: 128 });
|
||||
|
||||
export async function deriveNewConstructFn(
|
||||
originalVariant: PromptVariant | null,
|
||||
newModel?: SupportedModel,
|
||||
) {
|
||||
if (originalVariant && !newModel) {
|
||||
return originalVariant.constructFn;
|
||||
}
|
||||
if (originalVariant && newModel) {
|
||||
return await getPromptFunctionForNewModel(originalVariant, newModel);
|
||||
}
|
||||
return dedent`
|
||||
prompt = {
|
||||
model: "gpt-3.5-turbo",
|
||||
messages: [
|
||||
{
|
||||
role: "system",
|
||||
content: "Return 'Hello, world!'",
|
||||
}
|
||||
]
|
||||
}`;
|
||||
}
|
||||
|
||||
const NUM_RETRIES = 5;
|
||||
const getPromptFunctionForNewModel = async (
|
||||
originalVariant: PromptVariant,
|
||||
newModel: SupportedModel,
|
||||
) => {
|
||||
const originalModel = originalVariant.model as SupportedModel;
|
||||
let newContructionFn = "";
|
||||
for (let i = 0; i < NUM_RETRIES; i++) {
|
||||
try {
|
||||
// TODO: Add api shape info to prompt
|
||||
const completion = await openai.chat.completions.create({
|
||||
model: "gpt-4",
|
||||
messages: [
|
||||
{
|
||||
role: "system",
|
||||
content: `Your job is to translate prompt constructor functions from ${originalModel} to ${newModel}. Here are is the api shape for the original model:\n---\n${JSON.stringify(
|
||||
getApiShapeForModel(originalModel),
|
||||
null,
|
||||
2,
|
||||
)}\n\nThe prompt variable has already been declared.}`,
|
||||
},
|
||||
{
|
||||
role: "user",
|
||||
content: `Return the prompt constructor function for ${newModel} given the following prompt constructor function for ${originalModel}:\n---\n${originalVariant.constructFn}`,
|
||||
},
|
||||
],
|
||||
functions: [
|
||||
{
|
||||
name: "translate_prompt_constructor_function",
|
||||
parameters: {
|
||||
type: "object",
|
||||
properties: {
|
||||
new_prompt_function: {
|
||||
type: "string",
|
||||
description: "The new prompt function, runnable in typescript",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
function_call: {
|
||||
name: "translate_prompt_constructor_function",
|
||||
},
|
||||
});
|
||||
const argString = completion.choices[0]?.message?.function_call?.arguments || "{}";
|
||||
|
||||
const code = `
|
||||
global.contructPromptFunctionArgs = ${argString};
|
||||
`;
|
||||
|
||||
const context = await isolate.createContext();
|
||||
|
||||
const jail = context.global;
|
||||
await jail.set("global", jail.derefInto());
|
||||
|
||||
const script = await isolate.compileScript(code);
|
||||
|
||||
await script.run(context);
|
||||
const contructPromptFunctionArgs = (await context.global.get(
|
||||
"contructPromptFunctionArgs",
|
||||
)) as ivm.Reference;
|
||||
|
||||
const args = await contructPromptFunctionArgs.copy(); // Get the actual value from the isolate
|
||||
|
||||
if (args && isObject(args) && "new_prompt_function" in args) {
|
||||
newContructionFn = args.new_prompt_function as string;
|
||||
break;
|
||||
}
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
}
|
||||
}
|
||||
|
||||
return newContructionFn;
|
||||
};
|
||||
@@ -21,6 +21,13 @@ export type CompletionResponse = {
|
||||
export async function getCompletion(
|
||||
payload: CompletionCreateParams,
|
||||
channel?: string,
|
||||
): Promise<CompletionResponse> {
|
||||
return getOpenAIChatCompletion(payload, channel);
|
||||
}
|
||||
|
||||
export async function getOpenAIChatCompletion(
|
||||
payload: CompletionCreateParams,
|
||||
channel?: string,
|
||||
): Promise<CompletionResponse> {
|
||||
// If functions are enabled, disable streaming so that we get the full response with token counts
|
||||
if (payload.functions?.length) payload.stream = false;
|
||||
|
||||
7
src/server/utils/getTypesForModel.ts
Normal file
7
src/server/utils/getTypesForModel.ts
Normal file
@@ -0,0 +1,7 @@
|
||||
import { OpenAIChatModel, type SupportedModel } from "../types";
|
||||
import openAIChatApiShape from "~/codegen/openai.types.ts.txt";
|
||||
|
||||
export const getApiShapeForModel = (model: SupportedModel) => {
|
||||
if (model in OpenAIChatModel) return openAIChatApiShape;
|
||||
return "";
|
||||
};
|
||||
Reference in New Issue
Block a user