Compare commits

..

7 Commits

Author SHA1 Message Date
David Corbitt
01343efb6a Properly use isDisabled 2023-07-20 22:54:38 -07:00
David Corbitt
c7aaaea426 Update instructions 2023-07-20 22:42:51 -07:00
David Corbitt
332e7afb0c Pass variant to SelectModelModal 2023-07-20 22:39:32 -07:00
David Corbitt
fe08e29f47 Show prompt comparison in SelectModelModal 2023-07-20 22:39:14 -07:00
David Corbitt
89ce730e52 Accept newModel in getModifiedPromptFn 2023-07-20 22:38:42 -07:00
David Corbitt
ad87c1b2eb Change RefinePromptModal styles 2023-07-20 22:38:09 -07:00
David Corbitt
58ddc72cbb Make CompareFunctions more configurable 2023-07-20 22:36:21 -07:00
223 changed files with 2152 additions and 6090 deletions

View File

@@ -17,9 +17,6 @@ DATABASE_URL="postgresql://postgres:postgres@localhost:5432/openpipe?schema=publ
# https://help.openai.com/en/articles/4936850-where-do-i-find-my-secret-api-key # https://help.openai.com/en/articles/4936850-where-do-i-find-my-secret-api-key
OPENAI_API_KEY="" OPENAI_API_KEY=""
# Replicate API token. Create a token here: https://replicate.com/account/api-tokens
REPLICATE_API_TOKEN=""
NEXT_PUBLIC_SOCKET_URL="http://localhost:3318" NEXT_PUBLIC_SOCKET_URL="http://localhost:3318"
# Next Auth # Next Auth

View File

@@ -1,11 +1,9 @@
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files. # See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
# App files
# dependencies # dependencies
/node_modules /node_modules
/.pnp /.pnp
/.pnp.js .pnp.js
# testing # testing
/coverage /coverage
@@ -17,28 +15,28 @@
# next.js # next.js
/.next/ /.next/
/out/ /out/
/next-env.d.ts next-env.d.ts
# production # production
/build /build
# misc # misc
/.DS_Store .DS_Store
/*.pem *.pem
# debug # debug
/npm-debug.log* npm-debug.log*
/yarn-debug.log* yarn-debug.log*
/yarn-error.log* yarn-error.log*
/.pnpm-debug.log* .pnpm-debug.log*
# local env files # local env files
# do not commit any .env files to git, except for the .env.example file. https://create.t3.gg/en/usage/env-variables#using-environment-variables # do not commit any .env files to git, except for the .env.example file. https://create.t3.gg/en/usage/env-variables#using-environment-variables
/.env .env
/.env*.local .env*.local
# vercel # vercel
/.vercel .vercel
# typescript # typescript
/*.tsbuildinfo *.tsbuildinfo

2
.prettierignore Normal file
View File

@@ -0,0 +1,2 @@
src/codegen/openai.schema.json
pnpm-lock.yaml

View File

@@ -1,3 +1,6 @@
{ {
"eslint.format.enable": true "eslint.format.enable": true,
"editor.codeActionsOnSave": {
"source.fixAll.eslint": true
}
} }

View File

@@ -12,9 +12,7 @@ declare module "nextjs-routes" {
export type Route = export type Route =
| StaticRoute<"/account/signin"> | StaticRoute<"/account/signin">
| DynamicRoute<"/api/[...trpc]", { "trpc": string[] }>
| DynamicRoute<"/api/auth/[...nextauth]", { "nextauth": string[] }> | DynamicRoute<"/api/auth/[...nextauth]", { "nextauth": string[] }>
| StaticRoute<"/api/openapi">
| DynamicRoute<"/api/trpc/[trpc]", { "trpc": string }> | DynamicRoute<"/api/trpc/[trpc]", { "trpc": string }>
| DynamicRoute<"/experiments/[id]", { "id": string }> | DynamicRoute<"/experiments/[id]", { "id": string }>
| StaticRoute<"/experiments"> | StaticRoute<"/experiments">

View File

@@ -43,9 +43,7 @@ Natively supports [OpenAI function calls](https://openai.com/blog/function-calli
## Supported Models ## Supported Models
- All models available through the OpenAI [chat completion API](https://platform.openai.com/docs/guides/gpt/chat-completions-api) OpenPipe currently supports GPT-3.5 and GPT-4. Wider model support is planned.
- Llama2 [7b chat](https://replicate.com/a16z-infra/llama7b-v2-chat), [13b chat](https://replicate.com/a16z-infra/llama13b-v2-chat), [70b chat](https://replicate.com/replicate/llama70b-v2-chat).
- Anthropic's [Claude 1 Instant](https://www.anthropic.com/index/introducing-claude) and [Claude 2](https://www.anthropic.com/index/claude-2)
## Running Locally ## Running Locally

View File

@@ -1,2 +0,0 @@
*.schema.json
pnpm-lock.yaml

File diff suppressed because one or more lines are too long

View File

@@ -1,7 +0,0 @@
{
"$schema": "./node_modules/@openapitools/openapi-generator-cli/config.schema.json",
"spaces": 2,
"generator-cli": {
"version": "6.6.0"
}
}

View File

@@ -1,8 +0,0 @@
/*
Warnings:
- You are about to drop the column `streamingChannel` on the `ScenarioVariantCell` table. All the data in the column will be lost.
*/
-- AlterTable
ALTER TABLE "ScenarioVariantCell" DROP COLUMN "streamingChannel";

View File

@@ -1,52 +0,0 @@
-- DropForeignKey
ALTER TABLE "ModelOutput" DROP CONSTRAINT "ModelOutput_scenarioVariantCellId_fkey";
-- DropForeignKey
ALTER TABLE "OutputEvaluation" DROP CONSTRAINT "OutputEvaluation_modelOutputId_fkey";
-- DropIndex
DROP INDEX "OutputEvaluation_modelOutputId_evaluationId_key";
-- AlterTable
ALTER TABLE "OutputEvaluation" RENAME COLUMN "modelOutputId" TO "modelResponseId";
-- AlterTable
ALTER TABLE "ScenarioVariantCell" DROP COLUMN "retryTime",
DROP COLUMN "statusCode",
ADD COLUMN "jobQueuedAt" TIMESTAMP(3),
ADD COLUMN "jobStartedAt" TIMESTAMP(3);
ALTER TABLE "ModelOutput" RENAME TO "ModelResponse";
ALTER TABLE "ModelResponse"
ADD COLUMN "requestedAt" TIMESTAMP(3),
ADD COLUMN "receivedAt" TIMESTAMP(3),
ADD COLUMN "statusCode" INTEGER,
ADD COLUMN "errorMessage" TEXT,
ADD COLUMN "retryTime" TIMESTAMP(3),
ADD COLUMN "outdated" BOOLEAN NOT NULL DEFAULT false;
-- 3. Remove the unnecessary column
ALTER TABLE "ModelResponse"
DROP COLUMN "timeToComplete";
-- AlterTable
ALTER TABLE "ModelResponse" RENAME CONSTRAINT "ModelOutput_pkey" TO "ModelResponse_pkey";
ALTER TABLE "ModelResponse" ALTER COLUMN "output" DROP NOT NULL;
-- DropIndex
DROP INDEX "ModelOutput_scenarioVariantCellId_key";
-- AddForeignKey
ALTER TABLE "ModelResponse" ADD CONSTRAINT "ModelResponse_scenarioVariantCellId_fkey" FOREIGN KEY ("scenarioVariantCellId") REFERENCES "ScenarioVariantCell"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- RenameIndex
ALTER INDEX "ModelOutput_inputHash_idx" RENAME TO "ModelResponse_inputHash_idx";
-- CreateIndex
CREATE UNIQUE INDEX "OutputEvaluation_modelResponseId_evaluationId_key" ON "OutputEvaluation"("modelResponseId", "evaluationId");
-- AddForeignKey
ALTER TABLE "OutputEvaluation" ADD CONSTRAINT "OutputEvaluation_modelResponseId_fkey" FOREIGN KEY ("modelResponseId") REFERENCES "ModelResponse"("id") ON DELETE CASCADE ON UPDATE CASCADE;

View File

@@ -1,64 +0,0 @@
-- AlterTable
ALTER TABLE "Experiment" ADD COLUMN "dataFlowId" UUID;
-- AlterTable
ALTER TABLE "ModelResponse" ADD COLUMN "loggedCallId" UUID,
ALTER COLUMN "scenarioVariantCellId" DROP NOT NULL;
-- CreateTable
CREATE TABLE "LoggedCall" (
"id" UUID NOT NULL,
"promptFunctionHash" TEXT NOT NULL,
"promptFunction" TEXT NOT NULL,
"prompt" JSONB NOT NULL,
"responsePayload" JSONB,
"scenarioVariables" JSONB NOT NULL,
"model" TEXT NOT NULL,
"modelProvider" TEXT NOT NULL,
"dataFlowId" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL,
CONSTRAINT "LoggedCall_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "DataFlow" (
"id" UUID NOT NULL,
"label" TEXT NOT NULL,
"organizationId" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL,
CONSTRAINT "DataFlow_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "ApiKey" (
"id" UUID NOT NULL,
"name" TEXT NOT NULL,
"key" TEXT NOT NULL,
"organizationId" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL,
CONSTRAINT "ApiKey_pkey" PRIMARY KEY ("id")
);
-- CreateIndex
CREATE UNIQUE INDEX "ApiKey_key_key" ON "ApiKey"("key");
-- AddForeignKey
ALTER TABLE "Experiment" ADD CONSTRAINT "Experiment_dataFlowId_fkey" FOREIGN KEY ("dataFlowId") REFERENCES "DataFlow"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "ModelResponse" ADD CONSTRAINT "ModelResponse_loggedCallId_fkey" FOREIGN KEY ("loggedCallId") REFERENCES "LoggedCall"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "LoggedCall" ADD CONSTRAINT "LoggedCall_dataFlowId_fkey" FOREIGN KEY ("dataFlowId") REFERENCES "DataFlow"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "DataFlow" ADD CONSTRAINT "DataFlow_organizationId_fkey" FOREIGN KEY ("organizationId") REFERENCES "Organization"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "ApiKey" ADD CONSTRAINT "ApiKey_organizationId_fkey" FOREIGN KEY ("organizationId") REFERENCES "Organization"("id") ON DELETE CASCADE ON UPDATE CASCADE;

View File

@@ -1,42 +0,0 @@
import "dotenv/config";
import { openApiDocument } from "~/pages/api/openapi.json";
import fs from "fs";
import path from "path";
import { execSync } from "child_process";
import { generatePromptTypes } from "~/modelProviders/generatePromptTypes";
console.log("Exporting public OpenAPI schema to client-libs/schema.json");
const executionDir = import.meta.url.replace("file://", "");
const schemaPath = path.join(
path.dirname(executionDir),
"../../../client-libs/schema.json"
);
console.log("Exporting schema");
fs.writeFileSync(schemaPath, JSON.stringify(openApiDocument, null, 2), "utf-8");
console.log("Generating Typescript client");
const tsClientPath = path.join(
path.dirname(executionDir),
"../../../client-libs/js/codegen"
);
fs.rmSync(tsClientPath, { recursive: true, force: true });
execSync(
`pnpm dlx @openapitools/openapi-generator-cli generate -i "${schemaPath}" -g typescript-axios -o "${tsClientPath}"`,
{
stdio: "inherit",
}
);
const promptTypes = await generatePromptTypes();
const promptTypesPath = path.join(tsClientPath, "promptTypes.ts");
console.log(`Writing promptTypes to ${promptTypesPath}`);
fs.writeFileSync(promptTypesPath, promptTypes, "utf-8");
console.log("Done!");

View File

@@ -1,36 +0,0 @@
import { Text, VStack } from "@chakra-ui/react";
import { type LegacyRef } from "react";
import Select from "react-select";
import { useElementDimensions } from "~/utils/hooks";
import { flatMap } from "lodash-es";
import frontendModelProviders from "~/modelProviders/frontendModelProviders";
import { type ProviderModel } from "~/modelProviders/types";
import { modelLabel } from "~/utils/utils";
const modelOptions = flatMap(Object.entries(frontendModelProviders), ([providerId, provider]) =>
Object.entries(provider.models).map(([modelId]) => ({
provider: providerId,
model: modelId,
})),
) as ProviderModel[];
export const ModelSearch = (props: {
selectedModel: ProviderModel;
setSelectedModel: (model: ProviderModel) => void;
}) => {
const [containerRef, containerDimensions] = useElementDimensions();
return (
<VStack ref={containerRef as LegacyRef<HTMLDivElement>} w="full">
<Text>Browse Models</Text>
<Select<ProviderModel>
styles={{ control: (provided) => ({ ...provided, width: containerDimensions?.width }) }}
getOptionLabel={(data) => modelLabel(data.provider, data.model)}
getOptionValue={(data) => modelLabel(data.provider, data.model)}
options={modelOptions}
onChange={(option) => option && props.setSelectedModel(option)}
/>
</VStack>
);
};

View File

@@ -1,109 +0,0 @@
import {
GridItem,
HStack,
Link,
SimpleGrid,
Text,
VStack,
type StackProps,
} from "@chakra-ui/react";
import { type lookupModel } from "~/utils/utils";
export const ModelStatsCard = ({
label,
model,
}: {
label: string;
model: ReturnType<typeof lookupModel>;
}) => {
if (!model) return null;
return (
<VStack w="full" align="start">
<Text fontWeight="bold" fontSize="sm" textTransform="uppercase">
{label}
</Text>
<VStack w="full" spacing={6} bgColor="gray.100" p={4} borderRadius={4}>
<HStack w="full" align="flex-start">
<Text flex={1} fontSize="lg">
<Text as="span" color="gray.600">
{model.provider} /{" "}
</Text>
<Text as="span" fontWeight="bold" color="gray.900">
{model.name}
</Text>
</Text>
<Link
href={model.learnMoreUrl}
isExternal
color="blue.500"
fontWeight="bold"
fontSize="sm"
ml={2}
>
Learn More
</Link>
</HStack>
<SimpleGrid
w="full"
justifyContent="space-between"
alignItems="flex-start"
fontSize="sm"
columns={{ base: 2, md: 4 }}
>
<SelectedModelLabeledInfo label="Context Window" info={model.contextWindow} />
{model.promptTokenPrice && (
<SelectedModelLabeledInfo
label="Input"
info={
<Text>
${(model.promptTokenPrice * 1000).toFixed(3)}
<Text color="gray.500"> / 1K tokens</Text>
</Text>
}
/>
)}
{model.completionTokenPrice && (
<SelectedModelLabeledInfo
label="Output"
info={
<Text>
${(model.completionTokenPrice * 1000).toFixed(3)}
<Text color="gray.500"> / 1K tokens</Text>
</Text>
}
/>
)}
{model.pricePerSecond && (
<SelectedModelLabeledInfo
label="Price"
info={
<Text>
${model.pricePerSecond.toFixed(3)}
<Text color="gray.500"> / second</Text>
</Text>
}
/>
)}
<SelectedModelLabeledInfo label="Speed" info={<Text>{model.speed}</Text>} />
</SimpleGrid>
</VStack>
</VStack>
);
};
const SelectedModelLabeledInfo = ({
label,
info,
...props
}: {
label: string;
info: string | number | React.ReactElement;
} & StackProps) => (
<GridItem>
<VStack alignItems="flex-start" {...props}>
<Text fontWeight="bold">{label}</Text>
<Text>{info}</Text>
</VStack>
</GridItem>
);

View File

@@ -1,69 +0,0 @@
import {
Button,
Icon,
AlertDialog,
AlertDialogBody,
AlertDialogFooter,
AlertDialogHeader,
AlertDialogContent,
AlertDialogOverlay,
useDisclosure,
Text,
} from "@chakra-ui/react";
import { useRouter } from "next/router";
import { useRef } from "react";
import { BsTrash } from "react-icons/bs";
import { api } from "~/utils/api";
import { useExperiment, useHandledAsyncCallback } from "~/utils/hooks";
export const DeleteButton = () => {
const experiment = useExperiment();
const mutation = api.experiments.delete.useMutation();
const utils = api.useContext();
const router = useRouter();
const { isOpen, onOpen, onClose } = useDisclosure();
const cancelRef = useRef<HTMLButtonElement>(null);
const [onDeleteConfirm] = useHandledAsyncCallback(async () => {
if (!experiment.data?.id) return;
await mutation.mutateAsync({ id: experiment.data.id });
await utils.experiments.list.invalidate();
await router.push({ pathname: "/experiments" });
onClose();
}, [mutation, experiment.data?.id, router]);
return (
<>
<Button size="sm" variant="ghost" colorScheme="red" fontWeight="normal" onClick={onOpen}>
<Icon as={BsTrash} boxSize={4} />
<Text ml={2}>Delete Experiment</Text>
</Button>
<AlertDialog isOpen={isOpen} leastDestructiveRef={cancelRef} onClose={onClose}>
<AlertDialogOverlay>
<AlertDialogContent>
<AlertDialogHeader fontSize="lg" fontWeight="bold">
Delete Experiment
</AlertDialogHeader>
<AlertDialogBody>
If you delete this experiment all the associated prompts and scenarios will be deleted
as well. Are you sure?
</AlertDialogBody>
<AlertDialogFooter>
<Button ref={cancelRef} onClick={onClose}>
Cancel
</Button>
<Button colorScheme="red" onClick={onDeleteConfirm} ml={3}>
Delete
</Button>
</AlertDialogFooter>
</AlertDialogContent>
</AlertDialogOverlay>
</AlertDialog>
</>
);
};

View File

@@ -1,57 +0,0 @@
import { Box, Flex, Icon, Spinner } from "@chakra-ui/react";
import { BsPlus } from "react-icons/bs";
import { Text } from "@chakra-ui/react";
import { api } from "~/utils/api";
import {
useExperiment,
useExperimentAccess,
useHandledAsyncCallback,
useVisibleScenarioIds,
} from "~/utils/hooks";
import { cellPadding } from "../constants";
import { ActionButton } from "./ScenariosHeader";
export default function AddVariantButton() {
const experiment = useExperiment();
const mutation = api.promptVariants.create.useMutation();
const utils = api.useContext();
const visibleScenarios = useVisibleScenarioIds();
const [onClick, loading] = useHandledAsyncCallback(async () => {
if (!experiment.data) return;
await mutation.mutateAsync({
experimentId: experiment.data.id,
streamScenarios: visibleScenarios,
});
await utils.promptVariants.list.invalidate();
}, [mutation]);
const { canModify } = useExperimentAccess();
if (!canModify) return <Box w={cellPadding.x} />;
return (
<Flex w="100%" justifyContent="flex-end">
<ActionButton
onClick={onClick}
py={5}
leftIcon={<Icon as={loading ? Spinner : BsPlus} boxSize={6} mr={loading ? 1 : 0} />}
>
<Text display={{ base: "none", md: "flex" }}>Add Variant</Text>
</ActionButton>
{/* <Button
alignItems="center"
justifyContent="center"
fontWeight="normal"
bgColor="transparent"
_hover={{ bgColor: "gray.100" }}
px={cellPadding.x}
onClick={onClick}
height="unset"
minH={headerMinHeight}
>
<Icon as={loading ? Spinner : BsPlus} boxSize={6} mr={loading ? 1 : 0} />
<Text display={{ base: "none", md: "flex" }}>Add Variant</Text>
</Button> */}
</Flex>
);
}

View File

@@ -1,19 +0,0 @@
import { type StackProps, VStack } from "@chakra-ui/react";
import { CellOptions } from "./CellOptions";
export const CellContent = ({
hardRefetch,
hardRefetching,
children,
...props
}: {
hardRefetch: () => void;
hardRefetching: boolean;
} & StackProps) => (
<VStack w="full" alignItems="flex-start" {...props}>
<CellOptions refetchingOutput={hardRefetching} refetchOutput={hardRefetch} />
<VStack w="full" alignItems="flex-start" maxH={500} overflowY="auto">
{children}
</VStack>
</VStack>
);

View File

@@ -1,204 +0,0 @@
import { api } from "~/utils/api";
import { type PromptVariant, type Scenario } from "../types";
import { Text, VStack } from "@chakra-ui/react";
import { useExperiment, useHandledAsyncCallback } from "~/utils/hooks";
import SyntaxHighlighter from "react-syntax-highlighter";
import { docco } from "react-syntax-highlighter/dist/cjs/styles/hljs";
import stringify from "json-stringify-pretty-compact";
import { type ReactElement, useState, useEffect, Fragment } from "react";
import useSocket from "~/utils/useSocket";
import { OutputStats } from "./OutputStats";
import { RetryCountdown } from "./RetryCountdown";
import frontendModelProviders from "~/modelProviders/frontendModelProviders";
import { ResponseLog } from "./ResponseLog";
import { CellContent } from "./CellContent";
const WAITING_MESSAGE_INTERVAL = 20000;
export default function OutputCell({
scenario,
variant,
}: {
scenario: Scenario;
variant: PromptVariant;
}): ReactElement | null {
const utils = api.useContext();
const experiment = useExperiment();
const vars = api.templateVars.list.useQuery({
experimentId: experiment.data?.id ?? "",
}).data;
const scenarioVariables = scenario.variableValues as Record<string, string>;
const templateHasVariables =
vars?.length === 0 || vars?.some((v) => scenarioVariables[v.label] !== undefined);
let disabledReason: string | null = null;
if (!templateHasVariables) disabledReason = "Add a value to the scenario variables to see output";
const [refetchInterval, setRefetchInterval] = useState(0);
const { data: cell, isLoading: queryLoading } = api.scenarioVariantCells.get.useQuery(
{ scenarioId: scenario.id, variantId: variant.id },
{ refetchInterval },
);
const provider =
frontendModelProviders[variant.modelProvider as keyof typeof frontendModelProviders];
type OutputSchema = Parameters<typeof provider.normalizeOutput>[0];
const { mutateAsync: hardRefetchMutate } = api.scenarioVariantCells.forceRefetch.useMutation();
const [hardRefetch, hardRefetching] = useHandledAsyncCallback(async () => {
await hardRefetchMutate({ scenarioId: scenario.id, variantId: variant.id });
await utils.scenarioVariantCells.get.invalidate({
scenarioId: scenario.id,
variantId: variant.id,
});
await utils.promptVariants.stats.invalidate({
variantId: variant.id,
});
}, [hardRefetchMutate, scenario.id, variant.id]);
const fetchingOutput = queryLoading || hardRefetching;
const awaitingOutput =
!cell ||
!cell.evalsComplete ||
cell.retrievalStatus === "PENDING" ||
cell.retrievalStatus === "IN_PROGRESS" ||
hardRefetching;
useEffect(() => setRefetchInterval(awaitingOutput ? 1000 : 0), [awaitingOutput]);
// TODO: disconnect from socket if we're not streaming anymore
const streamedMessage = useSocket<OutputSchema>(cell?.id);
if (!vars) return null;
if (!cell && !fetchingOutput)
return (
<CellContent hardRefetching={hardRefetching} hardRefetch={hardRefetch}>
<Text color="gray.500">Error retrieving output</Text>
</CellContent>
);
if (cell && cell.errorMessage) {
return (
<CellContent hardRefetching={hardRefetching} hardRefetch={hardRefetch}>
<Text color="red.500">{cell.errorMessage}</Text>
</CellContent>
);
}
if (disabledReason) return <Text color="gray.500">{disabledReason}</Text>;
const mostRecentResponse = cell?.modelResponses[cell.modelResponses.length - 1];
const showLogs = !streamedMessage && !mostRecentResponse?.output;
if (showLogs)
return (
<CellContent
hardRefetching={hardRefetching}
hardRefetch={hardRefetch}
alignItems="flex-start"
fontFamily="inconsolata, monospace"
spacing={0}
>
{cell?.jobQueuedAt && <ResponseLog time={cell.jobQueuedAt} title="Job queued" />}
{cell?.jobStartedAt && <ResponseLog time={cell.jobStartedAt} title="Job started" />}
{cell?.modelResponses?.map((response) => {
let numWaitingMessages = 0;
const relativeWaitingTime = response.receivedAt
? response.receivedAt.getTime()
: Date.now();
if (response.requestedAt) {
numWaitingMessages = Math.floor(
(relativeWaitingTime - response.requestedAt.getTime()) / WAITING_MESSAGE_INTERVAL,
);
}
return (
<Fragment key={response.id}>
{response.requestedAt && (
<ResponseLog time={response.requestedAt} title="Request sent to API" />
)}
{response.requestedAt &&
Array.from({ length: numWaitingMessages }, (_, i) => (
<ResponseLog
key={`waiting-${i}`}
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
time={new Date(response.requestedAt!.getTime() + i * WAITING_MESSAGE_INTERVAL)}
title="Waiting for response"
/>
))}
{response.receivedAt && (
<ResponseLog
time={response.receivedAt}
title="Response received from API"
message={`statusCode: ${response.statusCode ?? ""}\n ${
response.errorMessage ?? ""
}`}
/>
)}
</Fragment>
);
}) ?? null}
{mostRecentResponse?.retryTime && (
<RetryCountdown retryTime={mostRecentResponse.retryTime} />
)}
</CellContent>
);
const normalizedOutput = mostRecentResponse?.output
? provider.normalizeOutput(mostRecentResponse?.output)
: streamedMessage
? provider.normalizeOutput(streamedMessage)
: null;
if (mostRecentResponse?.output && normalizedOutput?.type === "json") {
return (
<VStack
w="100%"
h="100%"
fontSize="xs"
flexWrap="wrap"
overflowX="hidden"
justifyContent="space-between"
>
<CellContent
hardRefetching={hardRefetching}
hardRefetch={hardRefetch}
w="full"
flex={1}
spacing={0}
>
<SyntaxHighlighter
customStyle={{ overflowX: "unset", width: "100%", flex: 1 }}
language="json"
style={docco}
lineProps={{
style: { wordBreak: "break-all", whiteSpace: "pre-wrap" },
}}
wrapLines
>
{stringify(normalizedOutput.value, { maxLength: 40 })}
</SyntaxHighlighter>
</CellContent>
<OutputStats modelResponse={mostRecentResponse} scenario={scenario} />
</VStack>
);
}
const contentToDisplay = (normalizedOutput?.type === "text" && normalizedOutput.value) || "";
return (
<VStack w="100%" h="100%" justifyContent="space-between" whiteSpace="pre-wrap">
<VStack w="full" alignItems="flex-start" spacing={0}>
<CellContent hardRefetching={hardRefetching} hardRefetch={hardRefetch}>
<Text>{contentToDisplay}</Text>
</CellContent>
</VStack>
{mostRecentResponse?.output && (
<OutputStats modelResponse={mostRecentResponse} scenario={scenario} />
)}
</VStack>
);
}

View File

@@ -1,22 +0,0 @@
import { HStack, VStack, Text } from "@chakra-ui/react";
import dayjs from "dayjs";
export const ResponseLog = ({
time,
title,
message,
}: {
time: Date;
title: string;
message?: string;
}) => {
return (
<VStack spacing={0} alignItems="flex-start">
<HStack>
<Text>{dayjs(time).format("HH:mm:ss")}</Text>
<Text>{title}</Text>
</HStack>
{message && <Text pl={4}>{message}</Text>}
</VStack>
);
};

View File

@@ -1,74 +0,0 @@
import { Box, HStack, IconButton } from "@chakra-ui/react";
import {
BsChevronDoubleLeft,
BsChevronDoubleRight,
BsChevronLeft,
BsChevronRight,
} from "react-icons/bs";
import { usePage, useScenarios } from "~/utils/hooks";
const ScenarioPaginator = () => {
const [page, setPage] = usePage();
const { data } = useScenarios();
if (!data) return null;
const { scenarios, startIndex, lastPage, count } = data;
const nextPage = () => {
if (page < lastPage) {
setPage(page + 1, "replace");
}
};
const prevPage = () => {
if (page > 1) {
setPage(page - 1, "replace");
}
};
const goToLastPage = () => setPage(lastPage, "replace");
const goToFirstPage = () => setPage(1, "replace");
return (
<HStack pt={4}>
<IconButton
variant="ghost"
size="sm"
onClick={goToFirstPage}
isDisabled={page === 1}
aria-label="Go to first page"
icon={<BsChevronDoubleLeft />}
/>
<IconButton
variant="ghost"
size="sm"
onClick={prevPage}
isDisabled={page === 1}
aria-label="Previous page"
icon={<BsChevronLeft />}
/>
<Box>
{startIndex}-{startIndex + scenarios.length - 1} / {count}
</Box>
<IconButton
variant="ghost"
size="sm"
onClick={nextPage}
isDisabled={page === lastPage}
aria-label="Next page"
icon={<BsChevronRight />}
/>
<IconButton
variant="ghost"
size="sm"
onClick={goToLastPage}
isDisabled={page === lastPage}
aria-label="Go to last page"
icon={<BsChevronDoubleRight />}
/>
</HStack>
);
};
export default ScenarioPaginator;

View File

@@ -1,82 +0,0 @@
import {
Button,
type ButtonProps,
HStack,
Text,
Icon,
Menu,
MenuButton,
MenuList,
MenuItem,
IconButton,
Spinner,
} from "@chakra-ui/react";
import { cellPadding } from "../constants";
import {
useExperiment,
useExperimentAccess,
useHandledAsyncCallback,
useScenarios,
} from "~/utils/hooks";
import { BsGear, BsPencil, BsPlus, BsStars } from "react-icons/bs";
import { useAppStore } from "~/state/store";
import { api } from "~/utils/api";
export const ActionButton = (props: ButtonProps) => (
<Button size="sm" variant="ghost" color="gray.600" {...props} />
);
export const ScenariosHeader = () => {
const openDrawer = useAppStore((s) => s.openDrawer);
const { canModify } = useExperimentAccess();
const scenarios = useScenarios();
const experiment = useExperiment();
const createScenarioMutation = api.scenarios.create.useMutation();
const utils = api.useContext();
const [onAddScenario, loading] = useHandledAsyncCallback(
async (autogenerate: boolean) => {
if (!experiment.data) return;
await createScenarioMutation.mutateAsync({
experimentId: experiment.data.id,
autogenerate,
});
await utils.scenarios.list.invalidate();
},
[createScenarioMutation],
);
return (
<HStack w="100%" pb={cellPadding.y} pt={0} align="center" spacing={0}>
<Text fontSize={16} fontWeight="bold">
Scenarios ({scenarios.data?.count})
</Text>
{canModify && (
<Menu>
<MenuButton
as={IconButton}
mt={1}
variant="ghost"
aria-label="Edit Scenarios"
icon={<Icon as={loading ? Spinner : BsGear} />}
/>
<MenuList fontSize="md" zIndex="dropdown" mt={-3}>
<MenuItem
icon={<Icon as={BsPlus} boxSize={6} mx="-5px" />}
onClick={() => onAddScenario(false)}
>
Add Scenario
</MenuItem>
<MenuItem icon={<BsStars />} onClick={() => onAddScenario(true)}>
Autogenerate Scenario
</MenuItem>
<MenuItem icon={<BsPencil />} onClick={openDrawer}>
Edit Vars
</MenuItem>
</MenuList>
</Menu>
)}
</HStack>
);
};

View File

@@ -1,107 +0,0 @@
import { Grid, GridItem, type GridItemProps } from "@chakra-ui/react";
import { api } from "~/utils/api";
import AddVariantButton from "./AddVariantButton";
import ScenarioRow from "./ScenarioRow";
import VariantEditor from "./VariantEditor";
import VariantHeader from "../VariantHeader/VariantHeader";
import VariantStats from "./VariantStats";
import { ScenariosHeader } from "./ScenariosHeader";
import { borders } from "./styles";
import { useScenarios } from "~/utils/hooks";
import ScenarioPaginator from "./ScenarioPaginator";
import { Fragment } from "react";
export default function OutputsTable({ experimentId }: { experimentId: string | undefined }) {
const variants = api.promptVariants.list.useQuery(
{ experimentId: experimentId as string },
{ enabled: !!experimentId },
);
const scenarios = useScenarios();
if (!variants.data || !scenarios.data) return null;
const allCols = variants.data.length + 2;
const variantHeaderRows = 3;
const scenarioHeaderRows = 1;
const scenarioFooterRows = 1;
const visibleScenariosCount = scenarios.data.scenarios.length;
const allRows =
variantHeaderRows + scenarioHeaderRows + visibleScenariosCount + scenarioFooterRows;
return (
<Grid
pt={4}
pb={24}
pl={8}
display="grid"
gridTemplateColumns={`250px repeat(${variants.data.length}, minmax(300px, 1fr)) auto`}
sx={{
"> *": {
borderColor: "gray.300",
},
}}
fontSize="sm"
>
<GridItem rowSpan={variantHeaderRows}>
<AddVariantButton />
</GridItem>
{variants.data.map((variant, i) => {
const sharedProps: GridItemProps = {
...borders,
colStart: i + 2,
borderLeftWidth: i === 0 ? 1 : 0,
marginLeft: i === 0 ? "-1px" : 0,
backgroundColor: "gray.100",
};
return (
<Fragment key={variant.uiId}>
<VariantHeader
variant={variant}
canHide={variants.data.length > 1}
rowStart={1}
{...sharedProps}
/>
<GridItem rowStart={2} {...sharedProps}>
<VariantEditor variant={variant} />
</GridItem>
<GridItem rowStart={3} {...sharedProps}>
<VariantStats variant={variant} />
</GridItem>
</Fragment>
);
})}
<GridItem
colSpan={allCols - 1}
rowStart={variantHeaderRows + 1}
colStart={1}
{...borders}
borderRightWidth={0}
>
<ScenariosHeader />
</GridItem>
{scenarios.data.scenarios.map((scenario, i) => (
<ScenarioRow
rowStart={i + variantHeaderRows + scenarioHeaderRows + 2}
key={scenario.uiId}
scenario={scenario}
variants={variants.data}
canHide={visibleScenariosCount > 1}
/>
))}
<GridItem
rowStart={variantHeaderRows + scenarioHeaderRows + visibleScenariosCount + 2}
colStart={1}
colSpan={allCols}
>
<ScenarioPaginator />
</GridItem>
{/* Add some extra padding on the right, because when the table is too wide to fit in the viewport `pr` on the Grid isn't respected. */}
<GridItem rowStart={1} colStart={allCols} rowSpan={allRows} w={4} borderBottomWidth={0} />
</Grid>
);
}

View File

@@ -1,6 +0,0 @@
import { type GridItemProps } from "@chakra-ui/react";
export const borders: GridItemProps = {
borderRightWidth: 1,
borderBottomWidth: 1,
};

View File

@@ -1,57 +0,0 @@
import {
Button,
AlertDialog,
AlertDialogBody,
AlertDialogFooter,
AlertDialogHeader,
AlertDialogContent,
AlertDialogOverlay,
} from "@chakra-ui/react";
import { useRouter } from "next/router";
import { useRef } from "react";
import { api } from "~/utils/api";
import { useExperiment, useHandledAsyncCallback } from "~/utils/hooks";
export const DeleteDialog = ({ onClose }: { onClose: () => void }) => {
const experiment = useExperiment();
const deleteMutation = api.experiments.delete.useMutation();
const utils = api.useContext();
const router = useRouter();
const cancelRef = useRef<HTMLButtonElement>(null);
const [onDeleteConfirm] = useHandledAsyncCallback(async () => {
if (!experiment.data?.id) return;
await deleteMutation.mutateAsync({ id: experiment.data.id });
await utils.experiments.list.invalidate();
await router.push({ pathname: "/experiments" });
onClose();
}, [deleteMutation, experiment.data?.id, router]);
return (
<AlertDialog isOpen leastDestructiveRef={cancelRef} onClose={onClose}>
<AlertDialogOverlay>
<AlertDialogContent>
<AlertDialogHeader fontSize="lg" fontWeight="bold">
Delete Experiment
</AlertDialogHeader>
<AlertDialogBody>
If you delete this experiment all the associated prompts and scenarios will be deleted
as well. Are you sure?
</AlertDialogBody>
<AlertDialogFooter>
<Button ref={cancelRef} onClick={onClose}>
Cancel
</Button>
<Button colorScheme="red" onClick={onDeleteConfirm} ml={3}>
Delete
</Button>
</AlertDialogFooter>
</AlertDialogContent>
</AlertDialogOverlay>
</AlertDialog>
);
};

View File

@@ -1,42 +0,0 @@
import { Button, HStack, Icon, Spinner, Text } from "@chakra-ui/react";
import { useOnForkButtonPressed } from "./useOnForkButtonPressed";
import { useExperiment } from "~/utils/hooks";
import { BsGearFill } from "react-icons/bs";
import { TbGitFork } from "react-icons/tb";
import { useAppStore } from "~/state/store";
export const HeaderButtons = () => {
const experiment = useExperiment();
const canModify = experiment.data?.access.canModify ?? false;
const { onForkButtonPressed, isForking } = useOnForkButtonPressed();
const openDrawer = useAppStore((s) => s.openDrawer);
if (experiment.isLoading) return null;
return (
<HStack spacing={0} mt={{ base: 2, md: 0 }}>
<Button
onClick={onForkButtonPressed}
mr={4}
colorScheme={canModify ? undefined : "orange"}
bgColor={canModify ? undefined : "orange.400"}
minW={0}
variant={{ base: "solid", md: canModify ? "ghost" : "solid" }}
>
{isForking ? <Spinner boxSize={5} /> : <Icon as={TbGitFork} boxSize={5} />}
<Text ml={2}>Fork</Text>
</Button>
{canModify && (
<Button variant={{ base: "solid", md: "ghost" }} onClick={openDrawer}>
<HStack>
<Icon as={BsGearFill} />
<Text>Settings</Text>
</HStack>
</Button>
)}
</HStack>
);
};

View File

@@ -1,30 +0,0 @@
import { useCallback } from "react";
import { api } from "~/utils/api";
import { useExperiment, useHandledAsyncCallback } from "~/utils/hooks";
import { signIn, useSession } from "next-auth/react";
import { useRouter } from "next/router";
export const useOnForkButtonPressed = () => {
const router = useRouter();
const user = useSession().data;
const experiment = useExperiment();
const forkMutation = api.experiments.fork.useMutation();
const [onFork, isForking] = useHandledAsyncCallback(async () => {
if (!experiment.data?.id) return;
const forkedExperimentId = await forkMutation.mutateAsync({ id: experiment.data.id });
await router.push({ pathname: "/experiments/[id]", query: { id: forkedExperimentId } });
}, [forkMutation, experiment.data?.id, router]);
const onForkButtonPressed = useCallback(() => {
if (user === null) {
signIn("github").catch(console.error);
} else {
onFork();
}
}, [onFork, user]);
return { onForkButtonPressed, isForking };
};

View File

@@ -1,129 +0,0 @@
{
"type": "object",
"properties": {
"model": {
"description": "The model that will complete your prompt.\nAs we improve Claude, we develop new versions of it that you can query.\nThis parameter controls which version of Claude answers your request.\nRight now we are offering two model families: Claude, and Claude Instant.\nYou can use them by setting model to \"claude-2\" or \"claude-instant-1\", respectively.\nSee models for additional details.\n",
"x-oaiTypeLabel": "string",
"type": "string",
"enum": [
"claude-2",
"claude-2.0",
"claude-instant-1",
"claude-instant-1.1"
]
},
"prompt": {
"description": "The prompt that you want Claude to complete.\n\nFor proper response generation you will need to format your prompt as follows:\n\\n\\nHuman: ${userQuestion}\\n\\nAssistant:\nSee our comments on prompts for more context.\n",
"default": "<|endoftext|>",
"nullable": true,
"oneOf": [
{
"type": "string",
"default": "",
"example": "This is a test."
},
{
"type": "array",
"items": {
"type": "string",
"default": "",
"example": "This is a test."
}
},
{
"type": "array",
"minItems": 1,
"items": {
"type": "integer"
},
"example": "[1212, 318, 257, 1332, 13]"
},
{
"type": "array",
"minItems": 1,
"items": {
"type": "array",
"minItems": 1,
"items": {
"type": "integer"
}
},
"example": "[[1212, 318, 257, 1332, 13]]"
}
]
},
"max_tokens_to_sample": {
"type": "integer",
"minimum": 1,
"default": 256,
"example": 256,
"nullable": true,
"description": "The maximum number of tokens to generate before stopping.\n\nNote that our models may stop before reaching this maximum. This parameter only specifies the absolute maximum number of tokens to generate.\n"
},
"temperature": {
"type": "number",
"minimum": 0,
"maximum": 1,
"default": 1,
"example": 1,
"nullable": true,
"description": "Amount of randomness injected into the response.\n\nDefaults to 1. Ranges from 0 to 1. Use temp closer to 0 for analytical / multiple choice, and closer to 1 for creative and generative tasks.\n"
},
"top_p": {
"type": "number",
"minimum": 0,
"maximum": 1,
"default": 1,
"example": 1,
"nullable": true,
"description": "Use nucleus sampling.\n\nIn nucleus sampling, we compute the cumulative distribution over all the options \nfor each subsequent token in decreasing probability order and cut it off once \nit reaches a particular probability specified by top_p. You should either alter temperature or top_p, but not both.\n"
},
"top_k": {
"type": "number",
"minimum": 0,
"default": 5,
"example": 5,
"nullable": true,
"description": "Only sample from the top K options for each subsequent token.\n\nUsed to remove \"long tail\" low probability responses. Learn more technical details here.\n"
},
"stream": {
"description": "Whether to incrementally stream the response using server-sent events.\nSee this guide to SSE events for details.type: boolean\n",
"nullable": true,
"default": false
},
"stop_sequences": {
"description": "Sequences that will cause the model to stop generating completion text.\nOur models stop on \"\\n\\nHuman:\", and may include additional built-in stop sequences in the future. By providing the stop_sequences parameter, you may include additional strings that will cause the model to stop generating.\n",
"default": null,
"nullable": true,
"oneOf": [
{
"type": "string",
"default": "<|endoftext|>",
"example": "\n",
"nullable": true
},
{
"type": "array",
"minItems": 1,
"maxItems": 4,
"items": {
"type": "string",
"example": "[\"\\n\"]"
}
}
]
},
"metadata": {
"type": "object",
"properties": {
"user_id": {
"type": "string",
"example": "13803d75-b4b5-4c3e-b2a2-6f21399b021b",
"description": "An external identifier for the user who is associated with the request.\n\nThis should be a uuid, hash value, or other opaque identifier. Anthropic may use this id to help detect abuse. \nDo not include any identifying information such as name, email address, or phone number.\n"
}
},
"description": "An object describing metadata about the request.\n"
}
},
"required": ["model", "prompt", "max_tokens_to_sample"]
}

View File

@@ -1,40 +0,0 @@
import { type Completion } from "@anthropic-ai/sdk/resources";
import { type SupportedModel } from ".";
import { type FrontendModelProvider } from "../types";
import { refinementActions } from "./refinementActions";
const frontendModelProvider: FrontendModelProvider<SupportedModel, Completion> = {
name: "Replicate Llama2",
models: {
"claude-2.0": {
name: "Claude 2.0",
contextWindow: 100000,
promptTokenPrice: 11.02 / 1000000,
completionTokenPrice: 32.68 / 1000000,
speed: "medium",
provider: "anthropic",
learnMoreUrl: "https://www.anthropic.com/product",
},
"claude-instant-1.1": {
name: "Claude Instant 1.1",
contextWindow: 100000,
promptTokenPrice: 1.63 / 1000000,
completionTokenPrice: 5.51 / 1000000,
speed: "fast",
provider: "anthropic",
learnMoreUrl: "https://www.anthropic.com/product",
},
},
refinementActions,
normalizeOutput: (output) => {
return {
type: "text",
value: output.completion,
};
},
};
export default frontendModelProvider;

View File

@@ -1,86 +0,0 @@
import { env } from "~/env.mjs";
import { type CompletionResponse } from "../types";
import Anthropic, { APIError } from "@anthropic-ai/sdk";
import { type Completion, type CompletionCreateParams } from "@anthropic-ai/sdk/resources";
import { isObject, isString } from "lodash-es";
const anthropic = new Anthropic({
apiKey: env.ANTHROPIC_API_KEY,
});
export async function getCompletion(
input: CompletionCreateParams,
onStream: ((partialOutput: Completion) => void) | null,
): Promise<CompletionResponse<Completion>> {
const start = Date.now();
let finalCompletion: Completion | null = null;
try {
if (onStream) {
const resp = await anthropic.completions.create(
{ ...input, stream: true },
{
maxRetries: 0,
},
);
for await (const part of resp) {
if (finalCompletion === null) {
finalCompletion = part;
} else {
finalCompletion = { ...part, completion: finalCompletion.completion + part.completion };
}
onStream(finalCompletion);
}
if (!finalCompletion) {
return {
type: "error",
message: "Streaming failed to return a completion",
autoRetry: false,
};
}
} else {
const resp = await anthropic.completions.create(
{ ...input, stream: false },
{
maxRetries: 0,
},
);
finalCompletion = resp;
}
const timeToComplete = Date.now() - start;
return {
type: "success",
statusCode: 200,
value: finalCompletion,
timeToComplete,
};
} catch (error: unknown) {
console.log("CAUGHT ERROR", error);
if (error instanceof APIError) {
const message =
isObject(error.error) &&
"error" in error.error &&
isObject(error.error.error) &&
"message" in error.error.error &&
isString(error.error.error.message)
? error.error.error.message
: error.message;
return {
type: "error",
message,
autoRetry: error.status === 429 || error.status === 503,
statusCode: error.status,
};
} else {
return {
type: "error",
message: (error as Error).message,
autoRetry: true,
};
}
}
}

View File

@@ -1,34 +0,0 @@
import { type JSONSchema4 } from "json-schema";
import { type ModelProvider } from "../types";
import inputSchema from "./codegen/input.schema.json";
import { getCompletion } from "./getCompletion";
import frontendModelProvider from "./frontend";
import type { Completion, CompletionCreateParams } from "@anthropic-ai/sdk/resources";
const supportedModels = ["claude-2.0", "claude-instant-1.1"] as const;
export type SupportedModel = (typeof supportedModels)[number];
export type AnthropicProvider = ModelProvider<SupportedModel, CompletionCreateParams, Completion>;
const modelProvider: AnthropicProvider = {
getModel: (input) => {
if (supportedModels.includes(input.model as SupportedModel))
return input.model as SupportedModel;
const modelMaps: Record<string, SupportedModel> = {
"claude-2": "claude-2.0",
"claude-instant-1": "claude-instant-1.1",
};
if (input.model in modelMaps) return modelMaps[input.model] as SupportedModel;
return null;
},
inputSchema: inputSchema as JSONSchema4,
canStream: true,
getCompletion,
...frontendModelProvider,
};
export default modelProvider;

View File

@@ -1,3 +0,0 @@
import { type RefinementAction } from "../types";
export const refinementActions: Record<string, RefinementAction> = {};

View File

@@ -1,15 +0,0 @@
import openaiChatCompletionFrontend from "./openai-ChatCompletion/frontend";
import replicateLlama2Frontend from "./replicate-llama2/frontend";
import anthropicFrontend from "./anthropic/frontend";
import { type SupportedProvider, type FrontendModelProvider } from "./types";
// Keep attributes here that need to be accessible from the frontend. We can't
// just include them in the default `modelProviders` object because it has some
// transient dependencies that can only be imported on the server.
const frontendModelProviders: Record<SupportedProvider, FrontendModelProvider<any, any>> = {
"openai/ChatCompletion": openaiChatCompletionFrontend,
"replicate/llama2": replicateLlama2Frontend,
anthropic: anthropicFrontend,
};
export default frontendModelProviders;

View File

@@ -1,41 +0,0 @@
import { type JSONSchema4Object } from "json-schema";
import modelProviders from "./modelProviders";
import { compile } from "json-schema-to-typescript";
import dedent from "dedent";
export const declarePromptTypes = async () => {
const promptTypes = (await generatePromptTypes()).replace(
/export interface PromptTypes/g,
"interface PromptTypes",
);
return dedent`
${promptTypes}
declare function definePrompt<T extends keyof PromptTypes>(modelProvider: T, input: PromptTypes[T])
`;
};
export const generatePromptTypes = async () => {
const combinedSchema = {
type: "object",
properties: {} as Record<string, JSONSchema4Object>,
};
Object.entries(modelProviders).forEach(([id, provider]) => {
combinedSchema.properties[id] = provider.inputSchema;
});
Object.entries(modelProviders).forEach(([id, provider]) => {
combinedSchema.properties[id] = provider.inputSchema;
});
return await compile(combinedSchema as JSONSchema4Object, "PromptTypes", {
additionalProperties: false,
bannerComment: dedent`
/**
* This type map defines the input types for each model provider.
*/
`,
});
};

View File

@@ -1,12 +0,0 @@
import openaiChatCompletion from "./openai-ChatCompletion";
import replicateLlama2 from "./replicate-llama2";
import anthropic from "./anthropic";
import { type SupportedProvider, type ModelProvider } from "./types";
const modelProviders: Record<SupportedProvider, ModelProvider<any, any, any>> = {
"openai/ChatCompletion": openaiChatCompletion,
"replicate/llama2": replicateLlama2,
anthropic,
};
export default modelProviders;

View File

@@ -1,77 +0,0 @@
/* eslint-disable @typescript-eslint/no-var-requires */
import YAML from "yaml";
import fs from "fs";
import path from "path";
import { openapiSchemaToJsonSchema } from "@openapi-contrib/openapi-schema-to-json-schema";
import $RefParser from "@apidevtools/json-schema-ref-parser";
import { type JSONObject } from "superjson/dist/types";
import assert from "assert";
import { type JSONSchema4Object } from "json-schema";
import { isObject } from "lodash-es";
// @ts-expect-error for some reason missing from types
import parserEstree from "prettier/plugins/estree";
import parserBabel from "prettier/plugins/babel";
import prettier from "prettier/standalone";
const OPENAPI_URL =
"https://raw.githubusercontent.com/openai/openai-openapi/0c432eb66fd0c758fd8b9bd69db41c1096e5f4db/openapi.yaml";
// Fetch the openapi document
const response = await fetch(OPENAPI_URL);
const openApiYaml = await response.text();
// Parse the yaml document
let schema = YAML.parse(openApiYaml) as JSONObject;
schema = openapiSchemaToJsonSchema(schema);
const jsonSchema = await $RefParser.dereference(schema);
assert("components" in jsonSchema);
const completionRequestSchema = jsonSchema.components.schemas
.CreateChatCompletionRequest as JSONSchema4Object;
// We need to do a bit of surgery here since the Monaco editor doesn't like
// the fact that the schema says `model` can be either a string or an enum,
// and displays a warning in the editor. Let's stick with just an enum for
// now and drop the string option.
assert(
"properties" in completionRequestSchema &&
isObject(completionRequestSchema.properties) &&
"model" in completionRequestSchema.properties &&
isObject(completionRequestSchema.properties.model),
);
const modelProperty = completionRequestSchema.properties.model;
assert(
"oneOf" in modelProperty &&
Array.isArray(modelProperty.oneOf) &&
modelProperty.oneOf.length === 2 &&
isObject(modelProperty.oneOf[1]) &&
"enum" in modelProperty.oneOf[1],
"Expected model to have oneOf length of 2",
);
modelProperty.type = "string";
modelProperty.enum = modelProperty.oneOf[1].enum;
delete modelProperty["oneOf"];
// The default of "inf" confuses the Typescript generator, so can just remove it
assert(
"max_tokens" in completionRequestSchema.properties &&
isObject(completionRequestSchema.properties.max_tokens) &&
"default" in completionRequestSchema.properties.max_tokens,
);
delete completionRequestSchema.properties.max_tokens["default"];
// Get the directory of the current script
const currentDirectory = path.dirname(import.meta.url).replace("file://", "");
// Write the JSON schema to a file in the current directory
fs.writeFileSync(
path.join(currentDirectory, "input.schema.json"),
await prettier.format(JSON.stringify(completionRequestSchema, null, 2), {
parser: "json",
plugins: [parserBabel, parserEstree],
}),
);

View File

@@ -1,87 +0,0 @@
import { type JsonValue } from "type-fest";
import { type SupportedModel } from ".";
import { type FrontendModelProvider } from "../types";
import { type ChatCompletion } from "openai/resources/chat";
import { refinementActions } from "./refinementActions";
const frontendModelProvider: FrontendModelProvider<SupportedModel, ChatCompletion> = {
name: "OpenAI ChatCompletion",
models: {
"gpt-4-0613": {
name: "GPT-4",
contextWindow: 8192,
promptTokenPrice: 0.00003,
completionTokenPrice: 0.00006,
speed: "medium",
provider: "openai/ChatCompletion",
learnMoreUrl: "https://openai.com/gpt-4",
},
"gpt-4-32k-0613": {
name: "GPT-4 32k",
contextWindow: 32768,
promptTokenPrice: 0.00006,
completionTokenPrice: 0.00012,
speed: "medium",
provider: "openai/ChatCompletion",
learnMoreUrl: "https://openai.com/gpt-4",
},
"gpt-3.5-turbo-0613": {
name: "GPT-3.5 Turbo",
contextWindow: 4096,
promptTokenPrice: 0.0000015,
completionTokenPrice: 0.000002,
speed: "fast",
provider: "openai/ChatCompletion",
learnMoreUrl: "https://platform.openai.com/docs/guides/gpt/chat-completions-api",
},
"gpt-3.5-turbo-16k-0613": {
name: "GPT-3.5 Turbo 16k",
contextWindow: 16384,
promptTokenPrice: 0.000003,
completionTokenPrice: 0.000004,
speed: "fast",
provider: "openai/ChatCompletion",
learnMoreUrl: "https://platform.openai.com/docs/guides/gpt/chat-completions-api",
},
},
refinementActions,
normalizeOutput: (output) => {
const message = output.choices[0]?.message;
if (!message)
return {
type: "json",
value: output as unknown as JsonValue,
};
if (message.content) {
return {
type: "text",
value: message.content,
};
} else if (message.function_call) {
let args = message.function_call.arguments ?? "";
try {
args = JSON.parse(args);
} catch (e) {
// Ignore
}
return {
type: "json",
value: {
...message.function_call,
arguments: args,
},
};
} else {
return {
type: "json",
value: message as unknown as JsonValue,
};
}
},
};
export default frontendModelProvider;

View File

@@ -1,279 +0,0 @@
import { TfiThought } from "react-icons/tfi";
import { type RefinementAction } from "../types";
import { VscJson } from "react-icons/vsc";
export const refinementActions: Record<string, RefinementAction> = {
"Add chain of thought": {
icon: VscJson,
description: "Asking the model to plan its answer can increase accuracy.",
instructions: `Adding chain of thought means asking the model to think about its answer before it gives it to you. This is useful for getting more accurate answers. Do not add an assistant message.
This is what a prompt looks like before adding chain of thought:
definePrompt("openai/ChatCompletion", {
model: "gpt-4",
stream: true,
messages: [
{
role: "system",
content: \`Evaluate sentiment.\`,
},
{
role: "user",
content: \`This is the user's message: \${scenario.user_message}. Return "positive" or "negative" or "neutral"\`,
},
],
});
This is what one looks like after adding chain of thought:
definePrompt("openai/ChatCompletion", {
model: "gpt-4",
stream: true,
messages: [
{
role: "system",
content: \`Evaluate sentiment.\`,
},
{
role: "user",
content: \`This is the user's message: \${scenario.user_message}. Return "positive" or "negative" or "neutral". Explain your answer before you give a score, then return the score on a new line.\`,
},
],
});
Here's another example:
Before:
definePrompt("openai/ChatCompletion", {
model: "gpt-3.5-turbo",
messages: [
{
role: "user",
content: \`Title: \${scenario.title}
Body: \${scenario.body}
Need: \${scenario.need}
Rate likelihood on 1-3 scale.\`,
},
],
temperature: 0,
functions: [
{
name: "score_post",
parameters: {
type: "object",
properties: {
score: {
type: "number",
},
},
},
},
],
function_call: {
name: "score_post",
},
});
After:
definePrompt("openai/ChatCompletion", {
model: "gpt-3.5-turbo",
messages: [
{
role: "user",
content: \`Title: \${scenario.title}
Body: \${scenario.body}
Need: \${scenario.need}
Rate likelihood on 1-3 scale. Provide an explanation, but always provide a score afterward.\`,
},
],
temperature: 0,
functions: [
{
name: "score_post",
parameters: {
type: "object",
properties: {
explanation: {
type: "string",
}
score: {
type: "number",
},
},
},
},
],
function_call: {
name: "score_post",
},
});
Add chain of thought to the original prompt.`,
},
"Convert to function call": {
icon: TfiThought,
description: "Use function calls to get output from the model in a more structured way.",
instructions: `OpenAI functions are a specialized way for an LLM to return output.
This is what a prompt looks like before adding a function:
definePrompt("openai/ChatCompletion", {
model: "gpt-4",
stream: true,
messages: [
{
role: "system",
content: \`Evaluate sentiment.\`,
},
{
role: "user",
content: \`This is the user's message: \${scenario.user_message}. Return "positive" or "negative" or "neutral"\`,
},
],
});
This is what one looks like after adding a function:
definePrompt("openai/ChatCompletion", {
model: "gpt-4",
stream: true,
messages: [
{
role: "system",
content: "Evaluate sentiment.",
},
{
role: "user",
content: scenario.user_message,
},
],
functions: [
{
name: "extract_sentiment",
parameters: {
type: "object", // parameters must always be an object with a properties key
properties: { // properties key is required
sentiment: {
type: "string",
description: "one of positive/negative/neutral",
},
},
},
},
],
function_call: {
name: "extract_sentiment",
},
});
Here's another example of adding a function:
Before:
definePrompt("openai/ChatCompletion", {
model: "gpt-3.5-turbo",
messages: [
{
role: "user",
content: \`Here is the title and body of a reddit post I am interested in:
title: \${scenario.title}
body: \${scenario.body}
On a scale from 1 to 3, how likely is it that the person writing this post has the following need? If you are not sure, make your best guess, or answer 1.
Need: \${scenario.need}
Answer one integer between 1 and 3.\`,
},
],
temperature: 0,
});
After:
definePrompt("openai/ChatCompletion", {
model: "gpt-3.5-turbo",
messages: [
{
role: "user",
content: \`Title: \${scenario.title}
Body: \${scenario.body}
Need: \${scenario.need}
Rate likelihood on 1-3 scale.\`,
},
],
temperature: 0,
functions: [
{
name: "score_post",
parameters: {
type: "object",
properties: {
score: {
type: "number",
},
},
},
},
],
function_call: {
name: "score_post",
},
});
Another example
Before:
definePrompt("openai/ChatCompletion", {
model: "gpt-3.5-turbo",
stream: true,
messages: [
{
role: "system",
content: \`Write 'Start experimenting!' in \${scenario.language}\`,
},
],
});
After:
definePrompt("openai/ChatCompletion", {
model: "gpt-3.5-turbo",
messages: [
{
role: "system",
content: \`Write 'Start experimenting!' in \${scenario.language}\`,
},
],
functions: [
{
name: "write_in_language",
parameters: {
type: "object",
properties: {
text: {
type: "string",
},
},
},
},
],
function_call: {
name: "write_in_language",
},
});
Add an OpenAI function that takes one or more nested parameters that match the expected output from this prompt.`,
},
};

View File

@@ -1,45 +0,0 @@
import { type SupportedModel, type ReplicateLlama2Output } from ".";
import { type FrontendModelProvider } from "../types";
import { refinementActions } from "./refinementActions";
const frontendModelProvider: FrontendModelProvider<SupportedModel, ReplicateLlama2Output> = {
name: "Replicate Llama2",
models: {
"7b-chat": {
name: "LLama 2 7B Chat",
contextWindow: 4096,
pricePerSecond: 0.0023,
speed: "fast",
provider: "replicate/llama2",
learnMoreUrl: "https://replicate.com/a16z-infra/llama7b-v2-chat",
},
"13b-chat": {
name: "LLama 2 13B Chat",
contextWindow: 4096,
pricePerSecond: 0.0023,
speed: "medium",
provider: "replicate/llama2",
learnMoreUrl: "https://replicate.com/a16z-infra/llama13b-v2-chat",
},
"70b-chat": {
name: "LLama 2 70B Chat",
contextWindow: 4096,
pricePerSecond: 0.0032,
speed: "slow",
provider: "replicate/llama2",
learnMoreUrl: "https://replicate.com/replicate/llama70b-v2-chat",
},
},
refinementActions,
normalizeOutput: (output) => {
return {
type: "text",
value: output.join(""),
};
},
};
export default frontendModelProvider;

View File

@@ -1,60 +0,0 @@
import { env } from "~/env.mjs";
import { type ReplicateLlama2Input, type ReplicateLlama2Output } from ".";
import { type CompletionResponse } from "../types";
import Replicate from "replicate";
const replicate = new Replicate({
auth: env.REPLICATE_API_TOKEN || "",
});
const modelIds: Record<ReplicateLlama2Input["model"], string> = {
"7b-chat": "5ec5fdadd80ace49f5a2b2178cceeb9f2f77c493b85b1131002c26e6b2b13184",
"13b-chat": "6b4da803a2382c08868c5af10a523892f38e2de1aafb2ee55b020d9efef2fdb8",
"70b-chat": "2d19859030ff705a87c746f7e96eea03aefb71f166725aee39692f1476566d48",
};
export async function getCompletion(
input: ReplicateLlama2Input,
onStream: ((partialOutput: string[]) => void) | null,
): Promise<CompletionResponse<ReplicateLlama2Output>> {
const start = Date.now();
const { model, ...rest } = input;
try {
const prediction = await replicate.predictions.create({
version: modelIds[model],
input: rest,
});
const interval = onStream
? // eslint-disable-next-line @typescript-eslint/no-misused-promises
setInterval(async () => {
const partialPrediction = await replicate.predictions.get(prediction.id);
if (partialPrediction.output) onStream(partialPrediction.output as ReplicateLlama2Output);
}, 500)
: null;
const resp = await replicate.wait(prediction, {});
if (interval) clearInterval(interval);
const timeToComplete = Date.now() - start;
if (resp.error) throw new Error(resp.error as string);
return {
type: "success",
statusCode: 200,
value: resp.output as ReplicateLlama2Output,
timeToComplete,
};
} catch (error: unknown) {
console.error("ERROR IS", error);
return {
type: "error",
message: (error as Error).message,
autoRetry: true,
};
}
}

View File

@@ -1,81 +0,0 @@
import { type ModelProvider } from "../types";
import frontendModelProvider from "./frontend";
import { getCompletion } from "./getCompletion";
const supportedModels = ["7b-chat", "13b-chat", "70b-chat"] as const;
export type SupportedModel = (typeof supportedModels)[number];
export type ReplicateLlama2Input = {
model: SupportedModel;
prompt: string;
max_length?: number;
temperature?: number;
top_p?: number;
repetition_penalty?: number;
debug?: boolean;
};
export type ReplicateLlama2Output = string[];
export type ReplicateLlama2Provider = ModelProvider<
SupportedModel,
ReplicateLlama2Input,
ReplicateLlama2Output
>;
const modelProvider: ReplicateLlama2Provider = {
getModel: (input) => {
if (supportedModels.includes(input.model)) return input.model;
return null;
},
inputSchema: {
type: "object",
properties: {
model: {
type: "string",
enum: supportedModels as unknown as string[],
},
system_prompt: {
type: "string",
description:
"System prompt to send to Llama v2. This is prepended to the prompt and helps guide system behavior.",
},
prompt: {
type: "string",
description: "Prompt to send to Llama v2.",
},
max_new_tokens: {
type: "number",
description:
"Maximum number of tokens to generate. A word is generally 2-3 tokens (minimum: 1)",
},
temperature: {
type: "number",
description:
"Adjusts randomness of outputs, greater than 1 is random and 0 is deterministic, 0.75 is a good starting value. (minimum: 0.01; maximum: 5)",
},
top_p: {
type: "number",
description:
"When decoding text, samples from the top p percentage of most likely tokens; lower to ignore less likely tokens (minimum: 0.01; maximum: 1)",
},
repetition_penalty: {
type: "number",
description:
"Penalty for repeated words in generated text; 1 is no penalty, values greater than 1 discourage repetition, less than 1 encourage it. (minimum: 0.01; maximum: 5)",
},
debug: {
type: "boolean",
description: "provide debugging output in logs",
},
},
required: ["model", "prompt"],
},
canStream: true,
getCompletion,
...frontendModelProvider,
};
export default modelProvider;

View File

@@ -1,3 +0,0 @@
import { type RefinementAction } from "../types";
export const refinementActions: Record<string, RefinementAction> = {};

View File

@@ -1,71 +0,0 @@
import { type JSONSchema4 } from "json-schema";
import { type IconType } from "react-icons";
import { type JsonValue } from "type-fest";
import { z } from "zod";
export const ZodSupportedProvider = z.union([
z.literal("openai/ChatCompletion"),
z.literal("replicate/llama2"),
z.literal("anthropic"),
]);
export type SupportedProvider = z.infer<typeof ZodSupportedProvider>;
export type Model = {
name: string;
contextWindow: number;
promptTokenPrice?: number;
completionTokenPrice?: number;
pricePerSecond?: number;
speed: "fast" | "medium" | "slow";
provider: SupportedProvider;
description?: string;
learnMoreUrl?: string;
};
export type ProviderModel = { provider: z.infer<typeof ZodSupportedProvider>; model: string };
export type RefinementAction = { icon?: IconType; description: string; instructions: string };
export type FrontendModelProvider<SupportedModels extends string, OutputSchema> = {
name: string;
models: Record<SupportedModels, Model>;
refinementActions?: Record<string, RefinementAction>;
normalizeOutput: (output: OutputSchema) => NormalizedOutput;
};
export type CompletionResponse<T> =
| { type: "error"; message: string; autoRetry: boolean; statusCode?: number }
| {
type: "success";
value: T;
timeToComplete: number;
statusCode: number;
promptTokens?: number;
completionTokens?: number;
cost?: number;
};
export type ModelProvider<SupportedModels extends string, InputSchema, OutputSchema> = {
getModel: (input: InputSchema) => SupportedModels | null;
canStream: boolean;
inputSchema: JSONSchema4;
getCompletion: (
input: InputSchema,
onStream: ((partialOutput: OutputSchema) => void) | null,
) => Promise<CompletionResponse<OutputSchema>>;
// This is just a convenience for type inference, don't use it at runtime
_outputSchema?: OutputSchema | null;
} & FrontendModelProvider<SupportedModels, OutputSchema>;
export type NormalizedOutput =
| {
type: "text";
value: string;
}
| {
type: "json";
value: JsonValue;
};

View File

@@ -1,22 +0,0 @@
import { type NextApiRequest, type NextApiResponse } from "next";
import cors from "nextjs-cors";
import { createOpenApiNextHandler } from "trpc-openapi";
import { createProcedureCache } from "trpc-openapi/dist/adapters/node-http/procedures";
import { appRouter } from "~/server/api/root.router";
import { createTRPCContext } from "~/server/api/trpc";
const openApiHandler = createOpenApiNextHandler({
router: appRouter,
createContext: createTRPCContext,
});
const cache = createProcedureCache(appRouter);
const handler = async (req: NextApiRequest, res: NextApiResponse) => {
// Setup CORS
await cors(req, res);
return openApiHandler(req, res);
};
export default handler;

View File

@@ -1,16 +0,0 @@
import { type NextApiRequest, type NextApiResponse } from "next";
import { generateOpenApiDocument } from "trpc-openapi";
import { appRouter } from "~/server/api/root.router";
export const openApiDocument = generateOpenApiDocument(appRouter, {
title: "OpenPipe API",
description: "The public API for reporting API calls to OpenPipe",
version: "0.1.0",
baseUrl: "https://app.openpipe.ai/api",
});
// Respond with our OpenAPI schema
const handler = (req: NextApiRequest, res: NextApiResponse) => {
res.status(200).send(openApiDocument);
};
export default handler;

View File

@@ -1,92 +0,0 @@
import { z } from "zod";
import { createTRPCRouter, protectedProcedure, publicProcedure } from "~/server/api/trpc";
import { prisma } from "~/server/db";
import {
requireCanAccessDataFlow,
requireNothing,
} from "~/utils/accessControl";
import userOrg from "~/server/utils/userOrg";
export const dataFlowRouter = createTRPCRouter({
list: protectedProcedure.query(async ({ ctx }) => {
// Anyone can list data flows
requireNothing(ctx);
const dataFlows = await prisma.dataFlow.findMany({
where: {
organization: {
organizationUsers: {
some: { userId: ctx.session.user.id },
},
},
},
orderBy: {
createdAt: "desc",
},
});
// TODO: look for cleaner way to do this. Maybe aggregate?
const dataFlowsWithCounts = await Promise.all(
dataFlows.map(async (dataFlow) => {
const loggedCallCount = await prisma.loggedCall.count({
where: {
dataFlowId: dataFlow.id,
},
});
return {
...dataFlow,
loggedCallCount,
};
}),
);
return dataFlowsWithCounts;
}),
get: publicProcedure.input(z.object({ id: z.string() })).query(async ({ input, ctx }) => {
await requireCanAccessDataFlow(input.id, ctx);
return await prisma.dataFlow.findFirstOrThrow({
where: { id: input.id },
});
}),
create: protectedProcedure.input(z.object({})).mutation(async ({ ctx }) => {
// Anyone can create a data flow
requireNothing(ctx);
return await prisma.dataFlow.create({
data: {
label: `Untitled Data Flow`,
organizationId: (await userOrg(ctx.session.user.id)).id,
},
});
}),
update: protectedProcedure
.input(z.object({ id: z.string(), updates: z.object({ label: z.string() }) }))
.mutation(async ({ input, ctx }) => {
await requireCanAccessDataFlow(input.id, ctx);
return await prisma.dataFlow.update({
where: {
id: input.id,
},
data: {
label: input.updates.label,
},
});
}),
delete: protectedProcedure
.input(z.object({ id: z.string() }))
.mutation(async ({ input, ctx }) => {
await requireCanAccessDataFlow(input.id, ctx);
await prisma.dataFlow.delete({
where: {
id: input.id,
},
});
}),
});

View File

@@ -1,388 +0,0 @@
import { z } from "zod";
import { v4 as uuidv4 } from "uuid";
import { createTRPCRouter, protectedProcedure, publicProcedure } from "~/server/api/trpc";
import { type Prisma } from "@prisma/client";
import { prisma } from "~/server/db";
import dedent from "dedent";
import { generateNewCell } from "~/server/utils/generateNewCell";
import {
canModifyExperiment,
requireCanModifyExperiment,
requireCanViewExperiment,
requireNothing,
} from "~/utils/accessControl";
import userOrg from "~/server/utils/userOrg";
import { declarePromptTypes } from "~/modelProviders/generatePromptTypes";
export const experimentsRouter = createTRPCRouter({
list: protectedProcedure.query(async ({ ctx }) => {
// Anyone can list experiments
requireNothing(ctx);
const experiments = await prisma.experiment.findMany({
where: {
organization: {
organizationUsers: {
some: { userId: ctx.session.user.id },
},
},
},
orderBy: {
sortIndex: "desc",
},
});
// TODO: look for cleaner way to do this. Maybe aggregate?
const experimentsWithCounts = await Promise.all(
experiments.map(async (experiment) => {
const visibleTestScenarioCount = await prisma.testScenario.count({
where: {
experimentId: experiment.id,
visible: true,
},
});
const visiblePromptVariantCount = await prisma.promptVariant.count({
where: {
experimentId: experiment.id,
visible: true,
},
});
return {
...experiment,
testScenarioCount: visibleTestScenarioCount,
promptVariantCount: visiblePromptVariantCount,
};
}),
);
return experimentsWithCounts;
}),
get: publicProcedure.input(z.object({ id: z.string() })).query(async ({ input, ctx }) => {
await requireCanViewExperiment(input.id, ctx);
const experiment = await prisma.experiment.findFirstOrThrow({
where: { id: input.id },
});
const canModify = ctx.session?.user.id
? await canModifyExperiment(experiment.id, ctx.session?.user.id)
: false;
return {
...experiment,
access: {
canView: true,
canModify,
},
};
}),
fork: protectedProcedure.input(z.object({ id: z.string() })).mutation(async ({ input, ctx }) => {
await requireCanViewExperiment(input.id, ctx);
const [
existingExp,
existingVariants,
existingScenarios,
existingCells,
evaluations,
templateVariables,
] = await prisma.$transaction([
prisma.experiment.findUniqueOrThrow({
where: {
id: input.id,
},
}),
prisma.promptVariant.findMany({
where: {
experimentId: input.id,
visible: true,
},
}),
prisma.testScenario.findMany({
where: {
experimentId: input.id,
visible: true,
},
}),
prisma.scenarioVariantCell.findMany({
where: {
testScenario: {
visible: true,
},
promptVariant: {
experimentId: input.id,
visible: true,
},
},
include: {
modelResponses: {
include: {
outputEvaluations: true,
},
},
},
}),
prisma.evaluation.findMany({
where: {
experimentId: input.id,
},
}),
prisma.templateVariable.findMany({
where: {
experimentId: input.id,
},
}),
]);
const newExperimentId = uuidv4();
const existingToNewVariantIds = new Map<string, string>();
const variantsToCreate: Prisma.PromptVariantCreateManyInput[] = [];
for (const variant of existingVariants) {
const newVariantId = uuidv4();
existingToNewVariantIds.set(variant.id, newVariantId);
variantsToCreate.push({
...variant,
id: newVariantId,
experimentId: newExperimentId,
});
}
const existingToNewScenarioIds = new Map<string, string>();
const scenariosToCreate: Prisma.TestScenarioCreateManyInput[] = [];
for (const scenario of existingScenarios) {
const newScenarioId = uuidv4();
existingToNewScenarioIds.set(scenario.id, newScenarioId);
scenariosToCreate.push({
...scenario,
id: newScenarioId,
experimentId: newExperimentId,
variableValues: scenario.variableValues as Prisma.InputJsonValue,
});
}
const existingToNewEvaluationIds = new Map<string, string>();
const evaluationsToCreate: Prisma.EvaluationCreateManyInput[] = [];
for (const evaluation of evaluations) {
const newEvaluationId = uuidv4();
existingToNewEvaluationIds.set(evaluation.id, newEvaluationId);
evaluationsToCreate.push({
...evaluation,
id: newEvaluationId,
experimentId: newExperimentId,
});
}
const cellsToCreate: Prisma.ScenarioVariantCellCreateManyInput[] = [];
const modelResponsesToCreate: Prisma.ModelResponseCreateManyInput[] = [];
const outputEvaluationsToCreate: Prisma.OutputEvaluationCreateManyInput[] = [];
for (const cell of existingCells) {
const newCellId = uuidv4();
const { modelResponses, ...cellData } = cell;
cellsToCreate.push({
...cellData,
id: newCellId,
promptVariantId: existingToNewVariantIds.get(cell.promptVariantId) ?? "",
testScenarioId: existingToNewScenarioIds.get(cell.testScenarioId) ?? "",
prompt: (cell.prompt as Prisma.InputJsonValue) ?? undefined,
});
for (const modelResponse of modelResponses) {
const newModelResponseId = uuidv4();
const { outputEvaluations, ...modelResponseData } = modelResponse;
modelResponsesToCreate.push({
...modelResponseData,
id: newModelResponseId,
scenarioVariantCellId: newCellId,
output: (modelResponse.output as Prisma.InputJsonValue) ?? undefined,
});
for (const evaluation of outputEvaluations) {
outputEvaluationsToCreate.push({
...evaluation,
id: uuidv4(),
modelResponseId: newModelResponseId,
evaluationId: existingToNewEvaluationIds.get(evaluation.evaluationId) ?? "",
});
}
}
}
const templateVariablesToCreate: Prisma.TemplateVariableCreateManyInput[] = [];
for (const templateVariable of templateVariables) {
templateVariablesToCreate.push({
...templateVariable,
id: uuidv4(),
experimentId: newExperimentId,
});
}
const maxSortIndex =
(
await prisma.experiment.aggregate({
_max: {
sortIndex: true,
},
})
)._max?.sortIndex ?? 0;
await prisma.$transaction([
prisma.experiment.create({
data: {
id: newExperimentId,
sortIndex: maxSortIndex + 1,
label: `${existingExp.label} (forked)`,
organizationId: (await userOrg(ctx.session.user.id)).id,
},
}),
prisma.promptVariant.createMany({
data: variantsToCreate,
}),
prisma.testScenario.createMany({
data: scenariosToCreate,
}),
prisma.scenarioVariantCell.createMany({
data: cellsToCreate,
}),
prisma.modelResponse.createMany({
data: modelResponsesToCreate,
}),
prisma.evaluation.createMany({
data: evaluationsToCreate,
}),
prisma.outputEvaluation.createMany({
data: outputEvaluationsToCreate,
}),
prisma.templateVariable.createMany({
data: templateVariablesToCreate,
}),
]);
return newExperimentId;
}),
create: protectedProcedure.input(z.object({})).mutation(async ({ ctx }) => {
// Anyone can create an experiment
requireNothing(ctx);
const maxSortIndex =
(
await prisma.experiment.aggregate({
_max: {
sortIndex: true,
},
})
)._max?.sortIndex ?? 0;
const exp = await prisma.experiment.create({
data: {
sortIndex: maxSortIndex + 1,
label: `Experiment ${maxSortIndex + 1}`,
organizationId: (await userOrg(ctx.session.user.id)).id,
},
});
const [variant, _, scenario1, scenario2, scenario3] = await prisma.$transaction([
prisma.promptVariant.create({
data: {
experimentId: exp.id,
label: "Prompt Variant 1",
sortIndex: 0,
// The interpolated $ is necessary until dedent incorporates
// https://github.com/dmnd/dedent/pull/46
constructFn: dedent`
/**
* Use Javascript to define an OpenAI chat completion
* (https://platform.openai.com/docs/api-reference/chat/create).
*
* You have access to the current scenario in the \`scenario\`
* variable.
*/
definePrompt("openai/ChatCompletion", {
model: "gpt-3.5-turbo-0613",
stream: true,
messages: [
{
role: "system",
content: \`Write 'Start experimenting!' in ${"$"}{scenario.language}\`,
},
],
});`,
model: "gpt-3.5-turbo-0613",
modelProvider: "openai/ChatCompletion",
constructFnVersion: 2,
},
}),
prisma.templateVariable.create({
data: {
experimentId: exp.id,
label: "language",
},
}),
prisma.testScenario.create({
data: {
experimentId: exp.id,
variableValues: {
language: "English",
},
},
}),
prisma.testScenario.create({
data: {
experimentId: exp.id,
variableValues: {
language: "Spanish",
},
},
}),
prisma.testScenario.create({
data: {
experimentId: exp.id,
variableValues: {
language: "German",
},
},
}),
]);
await generateNewCell(variant.id, scenario1.id);
await generateNewCell(variant.id, scenario2.id);
await generateNewCell(variant.id, scenario3.id);
return exp;
}),
update: protectedProcedure
.input(z.object({ id: z.string(), updates: z.object({ label: z.string() }) }))
.mutation(async ({ input, ctx }) => {
await requireCanModifyExperiment(input.id, ctx);
return await prisma.experiment.update({
where: {
id: input.id,
},
data: {
label: input.updates.label,
},
});
}),
delete: protectedProcedure
.input(z.object({ id: z.string() }))
.mutation(async ({ input, ctx }) => {
await requireCanModifyExperiment(input.id, ctx);
await prisma.experiment.delete({
where: {
id: input.id,
},
});
}),
// Keeping these on `experiment` for now because we might want to limit the
// providers based on your account/experiment
promptTypes: publicProcedure.query(async () => {
return await declarePromptTypes();
}),
});

View File

@@ -1,113 +0,0 @@
import { type Prisma } from "@prisma/client";
import { type JsonObject } from "type-fest";
import { z } from "zod";
import modelProviders from "~/modelProviders/modelProviders";
import { type SupportedProvider } from "~/modelProviders/types";
import { createTRPCRouter, publicProcedure } from "~/server/api/trpc";
import { prisma } from "~/server/db";
import hashSortedObject, { hashString } from "~/server/utils/hashSortedObject";
export const externalApiRouter = createTRPCRouter({
capturePrompt: publicProcedure
.meta({
openapi: {
method: "POST",
path: "/v1/capture-prompt",
description: "Capture a prompt with its variables call",
},
})
.input(
z.object({
apiKey: z.string().describe("API token for authentication"),
dataFlowId: z.string().describe("Data flow ID"),
promptFunction: z.string().describe("Prompt construction function"),
scenarioVariables: z.unknown().describe("Scenario variables as JSON"),
prompt: z.unknown().describe("Prompt object as JSON"),
model: z.string().describe("Model name"),
modelProvider: z.string().describe("Model provider"),
}),
)
.output(z.string())
.mutation(async ({ input }) => {
const apiKey = await prisma.apiKey.findUnique({
where: { key: input.apiKey },
});
if (!apiKey) {
throw new Error("Invalid API token");
}
const dataFlow = await prisma.dataFlow.findUnique({
where: { id: input.dataFlowId },
});
if (!dataFlow) {
throw new Error("Invalid data flow ID");
}
const loggedCall = await prisma.loggedCall.create({
data: {
dataFlowId: input.dataFlowId,
promptFunctionHash: hashString(input.promptFunction),
// Pass the constructor function and the parsed values here since
// we want to let the user change the constructor function for all scenarios
promptFunction: input.promptFunction,
scenarioVariables: input.scenarioVariables as Prisma.InputJsonValue,
model: input.model,
modelProvider: input.modelProvider,
prompt: input.prompt as Prisma.InputJsonValue,
},
});
return loggedCall.id;
}),
captureResponse: publicProcedure
.meta({
openapi: {
method: "POST",
path: "/v1/capture-response",
description: "Capture a response to a prompt",
},
})
.input(
z.object({
apiKey: z.string().describe("API token for authentication"),
loggedCallId: z.string().describe("Logged call ID"),
responsePayload: z.unknown().describe("JSON-encoded response payload"),
}),
)
.output(z.void())
.mutation(async ({ input }) => {
const apiKey = await prisma.apiKey.findUnique({
where: { key: input.apiKey },
});
if (!apiKey) {
throw new Error("Invalid API token");
}
const loggedCall = await prisma.loggedCall.findUnique({
where: { id: input.loggedCallId },
});
if (!loggedCall) {
throw new Error("Invalid call log ID");
}
await prisma.loggedCall.update({
where: { id: input.loggedCallId },
data: {
responsePayload: input.responsePayload as Prisma.InputJsonValue,
},
});
const modelProvider = modelProviders[loggedCall.modelProvider as SupportedProvider];
const inputHash = hashSortedObject(loggedCall.prompt as JsonObject);
await prisma.modelResponse.create({
data: {
inputHash,
loggedCallId: input.loggedCallId,
output: modelProvider.normalizeOutput(input.responsePayload),
// TODO: get more accurate time values
requestedAt: loggedCall.createdAt,
receivedAt: new Date(),
statusCode: 200,
},
});
}),
});

View File

@@ -1,65 +0,0 @@
import { z } from "zod";
import { createTRPCRouter, protectedProcedure, publicProcedure } from "~/server/api/trpc";
import { prisma } from "~/server/db";
import { requireCanAccessDataFlow } from "~/utils/accessControl";
export const loggedCallRouter = createTRPCRouter({
list: protectedProcedure
.input(z.object({ dataFlowId: z.string() }))
.query(async ({ input, ctx }) => {
await requireCanAccessDataFlow(input.dataFlowId, ctx);
return await prisma.loggedCall.findMany({
where: {
dataFlowId: input.dataFlowId,
},
include: {
modelResponse: true,
},
orderBy: {
createdAt: "desc",
},
});
}),
get: publicProcedure.input(z.object({ id: z.string() })).query(async ({ input, ctx }) => {
const loggedCall = await prisma.loggedCall.findFirst({
where: {
id: input.id,
},
include: {
modelResponse: true,
}
});
if (!loggedCall) {
throw new Error("Logged call not found");
}
await requireCanAccessDataFlow(loggedCall?.dataFlowId, ctx);
return loggedCall;
}),
delete: protectedProcedure
.input(z.object({ id: z.string() }))
.mutation(async ({ input, ctx }) => {
const loggedCall = await prisma.loggedCall.findFirst({
where: {
id: input.id,
},
});
if (!loggedCall) {
throw new Error("Logged call not found");
}
await requireCanAccessDataFlow(loggedCall?.dataFlowId, ctx);
await prisma.loggedCall.delete({
where: {
id: input.id,
},
});
}),
});

View File

@@ -1,26 +0,0 @@
/* eslint-disable */
import "dotenv/config";
import Replicate from "replicate";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN || "",
});
console.log("going to run");
const prediction = await replicate.predictions.create({
version: "3725a659b5afff1a0ba9bead5fac3899d998feaad00e07032ca2b0e35eb14f8a",
input: {
prompt: "...",
},
});
console.log("waiting");
setInterval(() => {
replicate.predictions.get(prediction.id).then((prediction) => {
console.log(prediction);
});
}, 500);
// const output = await replicate.wait(prediction, {});
// console.log(output);

View File

@@ -1,185 +0,0 @@
import { type Prisma } from "@prisma/client";
import { type JsonObject } from "type-fest";
import modelProviders from "~/modelProviders/modelProviders";
import { prisma } from "~/server/db";
import { wsConnection } from "~/utils/wsConnection";
import { runEvalsForOutput } from "../utils/evaluations";
import hashPrompt from "../utils/hashSortedObject";
import parseConstructFn from "../utils/parseConstructFn";
import defineTask from "./defineTask";
export type QueryModelJob = {
cellId: string;
stream: boolean;
numPreviousTries: number;
};
const MAX_AUTO_RETRIES = 50;
const MIN_DELAY = 500; // milliseconds
const MAX_DELAY = 15000; // milliseconds
function calculateDelay(numPreviousTries: number): number {
const baseDelay = Math.min(MAX_DELAY, MIN_DELAY * Math.pow(2, numPreviousTries));
const jitter = Math.random() * baseDelay;
return baseDelay + jitter;
}
export const queryModel = defineTask<QueryModelJob>("queryModel", async (task) => {
console.log("RUNNING TASK", task);
const { cellId, stream, numPreviousTries } = task;
const cell = await prisma.scenarioVariantCell.findUnique({
where: { id: cellId },
include: { modelResponses: true },
});
if (!cell) {
return;
}
// If cell is not pending, then some other job is already processing it
if (cell.retrievalStatus !== "PENDING") {
return;
}
await prisma.scenarioVariantCell.update({
where: { id: cellId },
data: {
retrievalStatus: "IN_PROGRESS",
jobStartedAt: new Date(),
},
});
const variant = await prisma.promptVariant.findUnique({
where: { id: cell.promptVariantId },
});
if (!variant) {
await prisma.scenarioVariantCell.update({
where: { id: cellId },
data: {
errorMessage: "Prompt Variant not found",
retrievalStatus: "ERROR",
},
});
return;
}
const scenario = await prisma.testScenario.findUnique({
where: { id: cell.testScenarioId },
});
if (!scenario) {
await prisma.scenarioVariantCell.update({
where: { id: cellId },
data: {
errorMessage: "Scenario not found",
retrievalStatus: "ERROR",
},
});
return;
}
const prompt = await parseConstructFn(variant.constructFn, scenario.variableValues as JsonObject);
if ("error" in prompt) {
await prisma.scenarioVariantCell.update({
where: { id: cellId },
data: {
errorMessage: prompt.error,
retrievalStatus: "ERROR",
},
});
return;
}
const provider = modelProviders[prompt.modelProvider];
const onStream = stream
? (partialOutput: (typeof provider)["_outputSchema"]) => {
wsConnection.emit("message", { channel: cell.id, payload: partialOutput });
}
: null;
const inputHash = hashPrompt(prompt);
let modelResponse = await prisma.modelResponse.create({
data: {
inputHash,
scenarioVariantCellId: cellId,
requestedAt: new Date(),
},
});
const response = await provider.getCompletion(prompt.modelInput, onStream);
if (response.type === "success") {
modelResponse = await prisma.modelResponse.update({
where: { id: modelResponse.id },
data: {
output: response.value as Prisma.InputJsonObject,
statusCode: response.statusCode,
receivedAt: new Date(),
promptTokens: response.promptTokens,
completionTokens: response.completionTokens,
cost: response.cost,
},
});
await prisma.scenarioVariantCell.update({
where: { id: cellId },
data: {
retrievalStatus: "COMPLETE",
},
});
await runEvalsForOutput(variant.experimentId, scenario, modelResponse, prompt.modelProvider);
} else {
const shouldRetry = response.autoRetry && numPreviousTries < MAX_AUTO_RETRIES;
const delay = calculateDelay(numPreviousTries);
const retryTime = new Date(Date.now() + delay);
await prisma.modelResponse.update({
where: { id: modelResponse.id },
data: {
statusCode: response.statusCode,
errorMessage: response.message,
receivedAt: new Date(),
retryTime: shouldRetry ? retryTime : null,
},
});
if (shouldRetry) {
await queryModel.enqueue(
{
cellId,
stream,
numPreviousTries: numPreviousTries + 1,
},
retryTime,
);
await prisma.scenarioVariantCell.update({
where: { id: cellId },
data: {
retrievalStatus: "PENDING",
},
});
} else {
await prisma.scenarioVariantCell.update({
where: { id: cellId },
data: {
retrievalStatus: "ERROR",
},
});
}
}
});
export const queueQueryModel = async (cellId: string, stream: boolean) => {
await Promise.all([
prisma.scenarioVariantCell.update({
where: {
id: cellId,
},
data: {
retrievalStatus: "PENDING",
errorMessage: null,
jobQueuedAt: new Date(),
},
}),
queryModel.enqueue({ cellId, stream, numPreviousTries: 0 }),
]);
};

View File

@@ -1,17 +0,0 @@
import { runAllEvals } from "../utils/evaluations";
import defineTask from "./defineTask";
export type RunNewEvalJob = {
experimentId: string;
};
// When a new eval is created, we want to run it on all existing outputs, but return the new eval first
export const runNewEval = defineTask<RunNewEvalJob>("runNewEval", async (task) => {
console.log("RUNNING TASK", task);
const { experimentId } = task;
await runAllEvals(experimentId);
});
export const queueRunNewEval = async (experimentId: string) => {
await runNewEval.enqueue({ experimentId });
};

View File

@@ -1,29 +0,0 @@
import { type TaskList, run } from "graphile-worker";
import "dotenv/config";
import { env } from "~/env.mjs";
import { queryModel } from "./queryModel.task";
import { runNewEval } from "./runNewEval.task";
console.log("Starting worker");
const registeredTasks = [queryModel, runNewEval];
const taskList = registeredTasks.reduce((acc, task) => {
acc[task.task.identifier] = task.task.handler;
return acc;
}, {} as TaskList);
// Run a worker to execute jobs:
const runner = await run({
connectionString: env.DATABASE_URL,
concurrency: 50,
// Install signal handlers for graceful shutdown on SIGINT, SIGTERM, etc
noHandleSignals: false,
pollInterval: 1000,
taskList,
});
console.log("Worker successfully started");
await runner.promise;

View File

@@ -1,99 +0,0 @@
import { type ModelResponse, type Evaluation, Prisma } from "@prisma/client";
import { prisma } from "../db";
import { runOneEval } from "./runOneEval";
import { type Scenario } from "~/components/OutputsTable/types";
import { type SupportedProvider } from "~/modelProviders/types";
const runAndSaveEval = async (
evaluation: Evaluation,
scenario: Scenario,
modelResponse: ModelResponse,
provider: SupportedProvider,
) => {
const result = await runOneEval(evaluation, scenario, modelResponse, provider);
return await prisma.outputEvaluation.upsert({
where: {
modelResponseId_evaluationId: {
modelResponseId: modelResponse.id,
evaluationId: evaluation.id,
},
},
create: {
modelResponseId: modelResponse.id,
evaluationId: evaluation.id,
...result,
},
update: {
...result,
},
});
};
export const runEvalsForOutput = async (
experimentId: string,
scenario: Scenario,
modelResponse: ModelResponse,
provider: SupportedProvider,
) => {
const evaluations = await prisma.evaluation.findMany({
where: { experimentId },
});
await Promise.all(
evaluations.map(
async (evaluation) => await runAndSaveEval(evaluation, scenario, modelResponse, provider),
),
);
};
// Will not run eval-output pairs that already exist in the database
export const runAllEvals = async (experimentId: string) => {
const outputs = await prisma.modelResponse.findMany({
where: {
outdated: false,
output: {
not: Prisma.AnyNull,
},
scenarioVariantCell: {
promptVariant: {
experimentId,
visible: true,
},
testScenario: {
visible: true,
},
},
},
include: {
scenarioVariantCell: {
include: {
testScenario: true,
promptVariant: true,
},
},
outputEvaluations: true,
},
});
const evals = await prisma.evaluation.findMany({
where: { experimentId },
});
await Promise.all(
outputs.map(async (output) => {
const evalsToBeRun = evals.filter(
(evaluation) => !output.outputEvaluations.find((e) => e.evaluationId === evaluation.id),
);
await Promise.all(
evalsToBeRun.map(async (evaluation) => {
await runAndSaveEval(
evaluation,
output.scenarioVariantCell.testScenario,
output,
output.scenarioVariantCell.promptVariant.modelProvider as SupportedProvider,
);
}),
);
}),
);
};

View File

@@ -1,126 +0,0 @@
import { Prisma } from "@prisma/client";
import { prisma } from "../db";
import parseConstructFn from "./parseConstructFn";
import { type JsonObject } from "type-fest";
import hashPrompt from "./hashSortedObject";
import { omit } from "lodash-es";
import { queueQueryModel } from "../tasks/queryModel.task";
export const generateNewCell = async (
variantId: string,
scenarioId: string,
options?: { stream?: boolean },
): Promise<void> => {
const stream = options?.stream ?? false;
const variant = await prisma.promptVariant.findUnique({
where: {
id: variantId,
},
});
const scenario = await prisma.testScenario.findUnique({
where: {
id: scenarioId,
},
});
if (!variant || !scenario) return;
let cell = await prisma.scenarioVariantCell.findUnique({
where: {
promptVariantId_testScenarioId: {
promptVariantId: variantId,
testScenarioId: scenarioId,
},
},
include: {
modelResponses: true,
},
});
if (cell) return;
const parsedConstructFn = await parseConstructFn(
variant.constructFn,
scenario.variableValues as JsonObject,
);
if ("error" in parsedConstructFn) {
await prisma.scenarioVariantCell.create({
data: {
promptVariantId: variantId,
testScenarioId: scenarioId,
retrievalStatus: "ERROR",
},
});
return;
}
const inputHash = hashPrompt(parsedConstructFn);
cell = await prisma.scenarioVariantCell.create({
data: {
promptVariantId: variantId,
testScenarioId: scenarioId,
prompt: parsedConstructFn.modelInput as unknown as Prisma.InputJsonValue,
retrievalStatus: "PENDING",
},
include: {
modelResponses: true,
},
});
const matchingModelResponse = await prisma.modelResponse.findFirst({
where: {
inputHash,
output: {
not: Prisma.AnyNull,
},
},
orderBy: {
receivedAt: "desc",
},
include: {
scenarioVariantCell: true,
},
take: 1,
});
if (matchingModelResponse) {
const newModelResponse = await prisma.modelResponse.create({
data: {
...omit(matchingModelResponse, ["id", "scenarioVariantCell"]),
scenarioVariantCellId: cell.id,
output: matchingModelResponse.output as Prisma.InputJsonValue,
},
});
await prisma.scenarioVariantCell.update({
where: { id: cell.id },
data: {
retrievalStatus: "COMPLETE",
jobStartedAt: matchingModelResponse.scenarioVariantCell?.jobStartedAt,
jobQueuedAt: matchingModelResponse.scenarioVariantCell?.jobQueuedAt,
},
});
// Copy over all eval results as well
await Promise.all(
(
await prisma.outputEvaluation.findMany({
where: { modelResponseId: matchingModelResponse.id },
})
).map(async (evaluation) => {
await prisma.outputEvaluation.create({
data: {
...omit(evaluation, ["id"]),
modelResponseId: newModelResponse.id,
},
});
}),
);
} else {
await queueQueryModel(cell.id, stream);
}
};

View File

@@ -1,12 +0,0 @@
import frontendModelProviders from "~/modelProviders/frontendModelProviders";
import { type ProviderModel } from "~/modelProviders/types";
export const truthyFilter = <T>(x: T | null | undefined): x is T => Boolean(x);
export const lookupModel = (provider: string, model: string) => {
const modelObj = frontendModelProviders[provider as ProviderModel["provider"]]?.models[model];
return modelObj ? { ...modelObj, provider } : null;
};
export const modelLabel = (provider: string, model: string) =>
`${provider}/${lookupModel(provider, model)?.name ?? model}`;

View File

@@ -1,3 +0,0 @@
wwwroot/*.js
node_modules
dist

View File

@@ -1,4 +0,0 @@
wwwroot/*.js
node_modules
typings
dist

View File

@@ -1 +0,0 @@
# empty npmignore to ensure all required files (e.g., in the dist folder) are published by npm

View File

@@ -1,23 +0,0 @@
# OpenAPI Generator Ignore
# Generated by openapi-generator https://github.com/openapitools/openapi-generator
# Use this file to prevent files from being overwritten by the generator.
# The patterns follow closely to .gitignore or .dockerignore.
# As an example, the C# client generator defines ApiClient.cs.
# You can make changes and tell OpenAPI Generator to ignore just this file by uncommenting the following line:
#ApiClient.cs
# You can match any string of characters against a directory, file or extension with a single asterisk (*):
#foo/*/qux
# The above matches foo/bar/qux and foo/baz/qux, but not foo/bar/baz/qux
# You can recursively match patterns against a directory, file or extension with a double asterisk (**):
#foo/**/qux
# This matches foo/bar/qux, foo/baz/qux, and foo/bar/baz/qux
# You can also negate patterns with an exclamation (!).
# For example, you can ignore all files in a docs folder with the file extension .md:
#docs/*.md
# Then explicitly reverse the ignore rule for a single file:
#!docs/README.md

View File

@@ -1,9 +0,0 @@
.gitignore
.npmignore
.openapi-generator-ignore
api.ts
base.ts
common.ts
configuration.ts
git_push.sh
index.ts

View File

@@ -1 +0,0 @@
6.6.0

View File

@@ -1,306 +0,0 @@
/* tslint:disable */
/* eslint-disable */
/**
* OpenPipe API
* The public API for reporting API calls to OpenPipe
*
* The version of the OpenAPI document: 0.1.0
*
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
import type { Configuration } from './configuration';
import type { AxiosPromise, AxiosInstance, AxiosRequestConfig } from 'axios';
import globalAxios from 'axios';
// Some imports not used depending on template conditions
// @ts-ignore
import { DUMMY_BASE_URL, assertParamExists, setApiKeyToObject, setBasicAuthToObject, setBearerAuthToObject, setOAuthToObject, setSearchParams, serializeDataIfNeeded, toPathString, createRequestFunction } from './common';
import type { RequestArgs } from './base';
// @ts-ignore
import { BASE_PATH, COLLECTION_FORMATS, BaseAPI, RequiredError } from './base';
/**
*
* @export
* @interface ExternalApiCapturePromptDefaultResponse
*/
export interface ExternalApiCapturePromptDefaultResponse {
/**
*
* @type {string}
* @memberof ExternalApiCapturePromptDefaultResponse
*/
'message': string;
/**
*
* @type {string}
* @memberof ExternalApiCapturePromptDefaultResponse
*/
'code': string;
/**
*
* @type {Array<ExternalApiCapturePromptDefaultResponseIssuesInner>}
* @memberof ExternalApiCapturePromptDefaultResponse
*/
'issues'?: Array<ExternalApiCapturePromptDefaultResponseIssuesInner>;
}
/**
*
* @export
* @interface ExternalApiCapturePromptDefaultResponseIssuesInner
*/
export interface ExternalApiCapturePromptDefaultResponseIssuesInner {
/**
*
* @type {string}
* @memberof ExternalApiCapturePromptDefaultResponseIssuesInner
*/
'message': string;
}
/**
*
* @export
* @interface ExternalApiCapturePromptRequest
*/
export interface ExternalApiCapturePromptRequest {
/**
* API token for authentication
* @type {string}
* @memberof ExternalApiCapturePromptRequest
*/
'apiKey': string;
/**
* Data flow ID
* @type {string}
* @memberof ExternalApiCapturePromptRequest
*/
'dataFlowId': string;
/**
* Prompt construction function
* @type {string}
* @memberof ExternalApiCapturePromptRequest
*/
'promptFunction': string;
/**
* Scenario variables as JSON
* @type {any}
* @memberof ExternalApiCapturePromptRequest
*/
'scenarioVariables'?: any;
/**
* Prompt object as JSON
* @type {any}
* @memberof ExternalApiCapturePromptRequest
*/
'prompt'?: any;
/**
* Model name
* @type {string}
* @memberof ExternalApiCapturePromptRequest
*/
'model': string;
/**
* Model provider
* @type {string}
* @memberof ExternalApiCapturePromptRequest
*/
'modelProvider': string;
}
/**
*
* @export
* @interface ExternalApiCaptureResponseRequest
*/
export interface ExternalApiCaptureResponseRequest {
/**
* API token for authentication
* @type {string}
* @memberof ExternalApiCaptureResponseRequest
*/
'apiKey': string;
/**
* Logged call ID
* @type {string}
* @memberof ExternalApiCaptureResponseRequest
*/
'loggedCallId': string;
/**
* JSON-encoded response payload
* @type {any}
* @memberof ExternalApiCaptureResponseRequest
*/
'responsePayload'?: any;
}
/**
* DefaultApi - axios parameter creator
* @export
*/
export const DefaultApiAxiosParamCreator = function (configuration?: Configuration) {
return {
/**
* Capture a prompt with its variables call
* @param {ExternalApiCapturePromptRequest} externalApiCapturePromptRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
externalApiCapturePrompt: async (externalApiCapturePromptRequest: ExternalApiCapturePromptRequest, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
// verify required parameter 'externalApiCapturePromptRequest' is not null or undefined
assertParamExists('externalApiCapturePrompt', 'externalApiCapturePromptRequest', externalApiCapturePromptRequest)
const localVarPath = `/v1/capture-prompt`;
// use dummy base URL string because the URL constructor only accepts absolute URLs.
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
let baseOptions;
if (configuration) {
baseOptions = configuration.baseOptions;
}
const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
const localVarHeaderParameter = {} as any;
const localVarQueryParameter = {} as any;
localVarHeaderParameter['Content-Type'] = 'application/json';
setSearchParams(localVarUrlObj, localVarQueryParameter);
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
localVarRequestOptions.data = serializeDataIfNeeded(externalApiCapturePromptRequest, localVarRequestOptions, configuration)
return {
url: toPathString(localVarUrlObj),
options: localVarRequestOptions,
};
},
/**
* Capture a response to a prompt
* @param {ExternalApiCaptureResponseRequest} externalApiCaptureResponseRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
externalApiCaptureResponse: async (externalApiCaptureResponseRequest: ExternalApiCaptureResponseRequest, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
// verify required parameter 'externalApiCaptureResponseRequest' is not null or undefined
assertParamExists('externalApiCaptureResponse', 'externalApiCaptureResponseRequest', externalApiCaptureResponseRequest)
const localVarPath = `/v1/capture-response`;
// use dummy base URL string because the URL constructor only accepts absolute URLs.
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
let baseOptions;
if (configuration) {
baseOptions = configuration.baseOptions;
}
const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
const localVarHeaderParameter = {} as any;
const localVarQueryParameter = {} as any;
localVarHeaderParameter['Content-Type'] = 'application/json';
setSearchParams(localVarUrlObj, localVarQueryParameter);
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
localVarRequestOptions.data = serializeDataIfNeeded(externalApiCaptureResponseRequest, localVarRequestOptions, configuration)
return {
url: toPathString(localVarUrlObj),
options: localVarRequestOptions,
};
},
}
};
/**
* DefaultApi - functional programming interface
* @export
*/
export const DefaultApiFp = function(configuration?: Configuration) {
const localVarAxiosParamCreator = DefaultApiAxiosParamCreator(configuration)
return {
/**
* Capture a prompt with its variables call
* @param {ExternalApiCapturePromptRequest} externalApiCapturePromptRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
async externalApiCapturePrompt(externalApiCapturePromptRequest: ExternalApiCapturePromptRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<string>> {
const localVarAxiosArgs = await localVarAxiosParamCreator.externalApiCapturePrompt(externalApiCapturePromptRequest, options);
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
},
/**
* Capture a response to a prompt
* @param {ExternalApiCaptureResponseRequest} externalApiCaptureResponseRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
async externalApiCaptureResponse(externalApiCaptureResponseRequest: ExternalApiCaptureResponseRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<any>> {
const localVarAxiosArgs = await localVarAxiosParamCreator.externalApiCaptureResponse(externalApiCaptureResponseRequest, options);
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
},
}
};
/**
* DefaultApi - factory interface
* @export
*/
export const DefaultApiFactory = function (configuration?: Configuration, basePath?: string, axios?: AxiosInstance) {
const localVarFp = DefaultApiFp(configuration)
return {
/**
* Capture a prompt with its variables call
* @param {ExternalApiCapturePromptRequest} externalApiCapturePromptRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
externalApiCapturePrompt(externalApiCapturePromptRequest: ExternalApiCapturePromptRequest, options?: any): AxiosPromise<string> {
return localVarFp.externalApiCapturePrompt(externalApiCapturePromptRequest, options).then((request) => request(axios, basePath));
},
/**
* Capture a response to a prompt
* @param {ExternalApiCaptureResponseRequest} externalApiCaptureResponseRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
externalApiCaptureResponse(externalApiCaptureResponseRequest: ExternalApiCaptureResponseRequest, options?: any): AxiosPromise<any> {
return localVarFp.externalApiCaptureResponse(externalApiCaptureResponseRequest, options).then((request) => request(axios, basePath));
},
};
};
/**
* DefaultApi - object-oriented interface
* @export
* @class DefaultApi
* @extends {BaseAPI}
*/
export class DefaultApi extends BaseAPI {
/**
* Capture a prompt with its variables call
* @param {ExternalApiCapturePromptRequest} externalApiCapturePromptRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
* @memberof DefaultApi
*/
public externalApiCapturePrompt(externalApiCapturePromptRequest: ExternalApiCapturePromptRequest, options?: AxiosRequestConfig) {
return DefaultApiFp(this.configuration).externalApiCapturePrompt(externalApiCapturePromptRequest, options).then((request) => request(this.axios, this.basePath));
}
/**
* Capture a response to a prompt
* @param {ExternalApiCaptureResponseRequest} externalApiCaptureResponseRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
* @memberof DefaultApi
*/
public externalApiCaptureResponse(externalApiCaptureResponseRequest: ExternalApiCaptureResponseRequest, options?: AxiosRequestConfig) {
return DefaultApiFp(this.configuration).externalApiCaptureResponse(externalApiCaptureResponseRequest, options).then((request) => request(this.axios, this.basePath));
}
}

View File

@@ -1,72 +0,0 @@
/* tslint:disable */
/* eslint-disable */
/**
* OpenPipe API
* The public API for reporting API calls to OpenPipe
*
* The version of the OpenAPI document: 0.1.0
*
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
import type { Configuration } from './configuration';
// Some imports not used depending on template conditions
// @ts-ignore
import type { AxiosPromise, AxiosInstance, AxiosRequestConfig } from 'axios';
import globalAxios from 'axios';
export const BASE_PATH = "https://app.openpipe.ai/api".replace(/\/+$/, "");
/**
*
* @export
*/
export const COLLECTION_FORMATS = {
csv: ",",
ssv: " ",
tsv: "\t",
pipes: "|",
};
/**
*
* @export
* @interface RequestArgs
*/
export interface RequestArgs {
url: string;
options: AxiosRequestConfig;
}
/**
*
* @export
* @class BaseAPI
*/
export class BaseAPI {
protected configuration: Configuration | undefined;
constructor(configuration?: Configuration, protected basePath: string = BASE_PATH, protected axios: AxiosInstance = globalAxios) {
if (configuration) {
this.configuration = configuration;
this.basePath = configuration.basePath || this.basePath;
}
}
};
/**
*
* @export
* @class RequiredError
* @extends {Error}
*/
export class RequiredError extends Error {
constructor(public field: string, msg?: string) {
super(msg);
this.name = "RequiredError"
}
}

View File

@@ -1,150 +0,0 @@
/* tslint:disable */
/* eslint-disable */
/**
* OpenPipe API
* The public API for reporting API calls to OpenPipe
*
* The version of the OpenAPI document: 0.1.0
*
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
import type { Configuration } from "./configuration";
import type { RequestArgs } from "./base";
import type { AxiosInstance, AxiosResponse } from 'axios';
import { RequiredError } from "./base";
/**
*
* @export
*/
export const DUMMY_BASE_URL = 'https://example.com'
/**
*
* @throws {RequiredError}
* @export
*/
export const assertParamExists = function (functionName: string, paramName: string, paramValue: unknown) {
if (paramValue === null || paramValue === undefined) {
throw new RequiredError(paramName, `Required parameter ${paramName} was null or undefined when calling ${functionName}.`);
}
}
/**
*
* @export
*/
export const setApiKeyToObject = async function (object: any, keyParamName: string, configuration?: Configuration) {
if (configuration && configuration.apiKey) {
const localVarApiKeyValue = typeof configuration.apiKey === 'function'
? await configuration.apiKey(keyParamName)
: await configuration.apiKey;
object[keyParamName] = localVarApiKeyValue;
}
}
/**
*
* @export
*/
export const setBasicAuthToObject = function (object: any, configuration?: Configuration) {
if (configuration && (configuration.username || configuration.password)) {
object["auth"] = { username: configuration.username, password: configuration.password };
}
}
/**
*
* @export
*/
export const setBearerAuthToObject = async function (object: any, configuration?: Configuration) {
if (configuration && configuration.accessToken) {
const accessToken = typeof configuration.accessToken === 'function'
? await configuration.accessToken()
: await configuration.accessToken;
object["Authorization"] = "Bearer " + accessToken;
}
}
/**
*
* @export
*/
export const setOAuthToObject = async function (object: any, name: string, scopes: string[], configuration?: Configuration) {
if (configuration && configuration.accessToken) {
const localVarAccessTokenValue = typeof configuration.accessToken === 'function'
? await configuration.accessToken(name, scopes)
: await configuration.accessToken;
object["Authorization"] = "Bearer " + localVarAccessTokenValue;
}
}
function setFlattenedQueryParams(urlSearchParams: URLSearchParams, parameter: any, key: string = ""): void {
if (parameter == null) return;
if (typeof parameter === "object") {
if (Array.isArray(parameter)) {
(parameter as any[]).forEach(item => setFlattenedQueryParams(urlSearchParams, item, key));
}
else {
Object.keys(parameter).forEach(currentKey =>
setFlattenedQueryParams(urlSearchParams, parameter[currentKey], `${key}${key !== '' ? '.' : ''}${currentKey}`)
);
}
}
else {
if (urlSearchParams.has(key)) {
urlSearchParams.append(key, parameter);
}
else {
urlSearchParams.set(key, parameter);
}
}
}
/**
*
* @export
*/
export const setSearchParams = function (url: URL, ...objects: any[]) {
const searchParams = new URLSearchParams(url.search);
setFlattenedQueryParams(searchParams, objects);
url.search = searchParams.toString();
}
/**
*
* @export
*/
export const serializeDataIfNeeded = function (value: any, requestOptions: any, configuration?: Configuration) {
const nonString = typeof value !== 'string';
const needsSerialization = nonString && configuration && configuration.isJsonMime
? configuration.isJsonMime(requestOptions.headers['Content-Type'])
: nonString;
return needsSerialization
? JSON.stringify(value !== undefined ? value : {})
: (value || "");
}
/**
*
* @export
*/
export const toPathString = function (url: URL) {
return url.pathname + url.search + url.hash
}
/**
*
* @export
*/
export const createRequestFunction = function (axiosArgs: RequestArgs, globalAxios: AxiosInstance, BASE_PATH: string, configuration?: Configuration) {
return <T = unknown, R = AxiosResponse<T>>(axios: AxiosInstance = globalAxios, basePath: string = BASE_PATH) => {
const axiosRequestArgs = {...axiosArgs.options, url: (configuration?.basePath || basePath) + axiosArgs.url};
return axios.request<T, R>(axiosRequestArgs);
};
}

View File

@@ -1,101 +0,0 @@
/* tslint:disable */
/* eslint-disable */
/**
* OpenPipe API
* The public API for reporting API calls to OpenPipe
*
* The version of the OpenAPI document: 0.1.0
*
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
export interface ConfigurationParameters {
apiKey?: string | Promise<string> | ((name: string) => string) | ((name: string) => Promise<string>);
username?: string;
password?: string;
accessToken?: string | Promise<string> | ((name?: string, scopes?: string[]) => string) | ((name?: string, scopes?: string[]) => Promise<string>);
basePath?: string;
baseOptions?: any;
formDataCtor?: new () => any;
}
export class Configuration {
/**
* parameter for apiKey security
* @param name security name
* @memberof Configuration
*/
apiKey?: string | Promise<string> | ((name: string) => string) | ((name: string) => Promise<string>);
/**
* parameter for basic security
*
* @type {string}
* @memberof Configuration
*/
username?: string;
/**
* parameter for basic security
*
* @type {string}
* @memberof Configuration
*/
password?: string;
/**
* parameter for oauth2 security
* @param name security name
* @param scopes oauth2 scope
* @memberof Configuration
*/
accessToken?: string | Promise<string> | ((name?: string, scopes?: string[]) => string) | ((name?: string, scopes?: string[]) => Promise<string>);
/**
* override base path
*
* @type {string}
* @memberof Configuration
*/
basePath?: string;
/**
* base options for axios calls
*
* @type {any}
* @memberof Configuration
*/
baseOptions?: any;
/**
* The FormData constructor that will be used to create multipart form data
* requests. You can inject this here so that execution environments that
* do not support the FormData class can still run the generated client.
*
* @type {new () => FormData}
*/
formDataCtor?: new () => any;
constructor(param: ConfigurationParameters = {}) {
this.apiKey = param.apiKey;
this.username = param.username;
this.password = param.password;
this.accessToken = param.accessToken;
this.basePath = param.basePath;
this.baseOptions = param.baseOptions;
this.formDataCtor = param.formDataCtor;
}
/**
* Check if the given MIME is a JSON MIME.
* JSON MIME examples:
* application/json
* application/json; charset=UTF8
* APPLICATION/JSON
* application/vnd.company+json
* @param mime - MIME (Multipurpose Internet Mail Extensions)
* @return True if the given MIME is JSON, false otherwise.
*/
public isJsonMime(mime: string): boolean {
const jsonMime: RegExp = new RegExp('^(application\/json|[^;/ \t]+\/[^;/ \t]+[+]json)[ \t]*(;.*)?$', 'i');
return mime !== null && (jsonMime.test(mime) || mime.toLowerCase() === 'application/json-patch+json');
}
}

View File

@@ -1,57 +0,0 @@
#!/bin/sh
# ref: https://help.github.com/articles/adding-an-existing-project-to-github-using-the-command-line/
#
# Usage example: /bin/sh ./git_push.sh wing328 openapi-petstore-perl "minor update" "gitlab.com"
git_user_id=$1
git_repo_id=$2
release_note=$3
git_host=$4
if [ "$git_host" = "" ]; then
git_host="github.com"
echo "[INFO] No command line input provided. Set \$git_host to $git_host"
fi
if [ "$git_user_id" = "" ]; then
git_user_id="GIT_USER_ID"
echo "[INFO] No command line input provided. Set \$git_user_id to $git_user_id"
fi
if [ "$git_repo_id" = "" ]; then
git_repo_id="GIT_REPO_ID"
echo "[INFO] No command line input provided. Set \$git_repo_id to $git_repo_id"
fi
if [ "$release_note" = "" ]; then
release_note="Minor update"
echo "[INFO] No command line input provided. Set \$release_note to $release_note"
fi
# Initialize the local directory as a Git repository
git init
# Adds the files in the local repository and stages them for commit.
git add .
# Commits the tracked changes and prepares them to be pushed to a remote repository.
git commit -m "$release_note"
# Sets the new remote
git_remote=$(git remote)
if [ "$git_remote" = "" ]; then # git remote not defined
if [ "$GIT_TOKEN" = "" ]; then
echo "[INFO] \$GIT_TOKEN (environment variable) is not set. Using the git credential in your environment."
git remote add origin https://${git_host}/${git_user_id}/${git_repo_id}.git
else
git remote add origin https://${git_user_id}:"${GIT_TOKEN}"@${git_host}/${git_user_id}/${git_repo_id}.git
fi
fi
git pull origin master
# Pushes (Forces) the changes in the local repository up to the remote repository
echo "Git pushing to https://${git_host}/${git_user_id}/${git_repo_id}.git"
git push origin master 2>&1 | grep -v 'To https'

View File

@@ -1,18 +0,0 @@
/* tslint:disable */
/* eslint-disable */
/**
* OpenPipe API
* The public API for reporting API calls to OpenPipe
*
* The version of the OpenAPI document: 0.1.0
*
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
export * from "./api";
export * from "./configuration";

View File

@@ -1,300 +0,0 @@
/**
* This type map defines the input types for each model provider.
*/
export interface PromptTypes {
"openai/ChatCompletion"?: {
/**
* ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.
*/
model:
| "gpt-4"
| "gpt-4-0613"
| "gpt-4-32k"
| "gpt-4-32k-0613"
| "gpt-3.5-turbo"
| "gpt-3.5-turbo-16k"
| "gpt-3.5-turbo-0613"
| "gpt-3.5-turbo-16k-0613";
/**
* A list of messages comprising the conversation so far. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb).
*
* @minItems 1
*/
messages: [
{
/**
* The role of the messages author. One of `system`, `user`, `assistant`, or `function`.
*/
role: "system" | "user" | "assistant" | "function";
/**
* The contents of the message. `content` is required for all messages except assistant messages with function calls.
*/
content?: string;
/**
* The name of the author of this message. `name` is required if role is `function`, and it should be the name of the function whose response is in the `content`. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.
*/
name?: string;
/**
* The name and arguments of a function that should be called, as generated by the model.
*/
function_call?: {
/**
* The name of the function to call.
*/
name?: string;
/**
* The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.
*/
arguments?: string;
};
},
...{
/**
* The role of the messages author. One of `system`, `user`, `assistant`, or `function`.
*/
role: "system" | "user" | "assistant" | "function";
/**
* The contents of the message. `content` is required for all messages except assistant messages with function calls.
*/
content?: string;
/**
* The name of the author of this message. `name` is required if role is `function`, and it should be the name of the function whose response is in the `content`. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.
*/
name?: string;
/**
* The name and arguments of a function that should be called, as generated by the model.
*/
function_call?: {
/**
* The name of the function to call.
*/
name?: string;
/**
* The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.
*/
arguments?: string;
};
}[]
];
/**
* A list of functions the model may generate JSON inputs for.
*
* @minItems 1
*/
functions?: [
{
/**
* The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
*/
name: string;
/**
* The description of what the function does.
*/
description?: string;
/**
* The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.
*/
parameters?: {
[k: string]: unknown;
};
},
...{
/**
* The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
*/
name: string;
/**
* The description of what the function does.
*/
description?: string;
/**
* The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.
*/
parameters?: {
[k: string]: unknown;
};
}[]
];
/**
* Controls how the model responds to function calls. "none" means the model does not call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name":\ "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present.
*/
function_call?:
| ("none" | "auto")
| {
/**
* The name of the function to call.
*/
name: string;
};
/**
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
*
* We generally recommend altering this or `top_p` but not both.
*
*/
temperature?: number;
/**
* An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
*
* We generally recommend altering this or `temperature` but not both.
*
*/
top_p?: number;
/**
* How many chat completion choices to generate for each input message.
*/
n?: number;
/**
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb).
*
*/
stream?: boolean;
/**
* Up to 4 sequences where the API will stop generating further tokens.
*
*/
stop?: string | [string] | [string, string] | [string, string, string] | [string, string, string, string];
/**
* The maximum number of [tokens](/tokenizer) to generate in the chat completion.
*
* The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
*
*/
max_tokens?: number;
/**
* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
*
* [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
*
*/
presence_penalty?: number;
/**
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
*
* [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
*
*/
frequency_penalty?: number;
/**
* Modify the likelihood of specified tokens appearing in the completion.
*
* Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
*
*/
logit_bias?: {};
/**
* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
*
*/
user?: string;
};
"replicate/llama2"?: {
model: "7b-chat" | "13b-chat" | "70b-chat";
/**
* System prompt to send to Llama v2. This is prepended to the prompt and helps guide system behavior.
*/
system_prompt?: string;
/**
* Prompt to send to Llama v2.
*/
prompt: string;
/**
* Maximum number of tokens to generate. A word is generally 2-3 tokens (minimum: 1)
*/
max_new_tokens?: number;
/**
* Adjusts randomness of outputs, greater than 1 is random and 0 is deterministic, 0.75 is a good starting value. (minimum: 0.01; maximum: 5)
*/
temperature?: number;
/**
* When decoding text, samples from the top p percentage of most likely tokens; lower to ignore less likely tokens (minimum: 0.01; maximum: 1)
*/
top_p?: number;
/**
* Penalty for repeated words in generated text; 1 is no penalty, values greater than 1 discourage repetition, less than 1 encourage it. (minimum: 0.01; maximum: 5)
*/
repetition_penalty?: number;
/**
* provide debugging output in logs
*/
debug?: boolean;
};
anthropic?: {
/**
* The model that will complete your prompt.
* As we improve Claude, we develop new versions of it that you can query.
* This parameter controls which version of Claude answers your request.
* Right now we are offering two model families: Claude, and Claude Instant.
* You can use them by setting model to "claude-2" or "claude-instant-1", respectively.
* See models for additional details.
*
*/
model: "claude-2" | "claude-2.0" | "claude-instant-1" | "claude-instant-1.1";
/**
* The prompt that you want Claude to complete.
*
* For proper response generation you will need to format your prompt as follows:
* \n\nHuman: ${userQuestion}\n\nAssistant:
* See our comments on prompts for more context.
*
*/
prompt: string | string[] | [number, ...number[]] | [[number, ...number[]], ...[number, ...number[]][]];
/**
* The maximum number of tokens to generate before stopping.
*
* Note that our models may stop before reaching this maximum. This parameter only specifies the absolute maximum number of tokens to generate.
*
*/
max_tokens_to_sample: number;
/**
* Amount of randomness injected into the response.
*
* Defaults to 1. Ranges from 0 to 1. Use temp closer to 0 for analytical / multiple choice, and closer to 1 for creative and generative tasks.
*
*/
temperature?: number;
/**
* Use nucleus sampling.
*
* In nucleus sampling, we compute the cumulative distribution over all the options
* for each subsequent token in decreasing probability order and cut it off once
* it reaches a particular probability specified by top_p. You should either alter temperature or top_p, but not both.
*
*/
top_p?: number;
/**
* Only sample from the top K options for each subsequent token.
*
* Used to remove "long tail" low probability responses. Learn more technical details here.
*
*/
top_k?: number;
/**
* Whether to incrementally stream the response using server-sent events.
* See this guide to SSE events for details.type: boolean
*
*/
stream?: boolean;
/**
* Sequences that will cause the model to stop generating completion text.
* Our models stop on "\n\nHuman:", and may include additional built-in stop sequences in the future. By providing the stop_sequences parameter, you may include additional strings that will cause the model to stop generating.
*
*/
stop_sequences?: string | [string] | [string, string] | [string, string, string] | [string, string, string, string];
/**
* An object describing metadata about the request.
*
*/
metadata?: {
/**
* An external identifier for the user who is associated with the request.
*
* This should be a uuid, hash value, or other opaque identifier. Anthropic may use this id to help detect abuse.
* Do not include any identifying information such as name, email address, or phone number.
*
*/
user_id?: string;
};
};
}

View File

@@ -1,88 +0,0 @@
import * as opClient from "./codegen";
import { PromptTypes } from "./codegen/promptTypes";
import { version } from "./package.json";
type OPConfigOptions = {
apiKey?: string;
basePath?: string;
};
export class Configuration {
public opConfig?: opClient.Configuration;
// TODO: use OpenAPI apiKey or something similar
public apiKey: string;
constructor(config: { opOptions?: OPConfigOptions }) {
if (config.opOptions) {
this.opConfig = new opClient.Configuration(config.opOptions);
this.apiKey = config.opOptions.apiKey ?? "";
}
}
}
export class OpenPipeApi {
public opApi?: opClient.DefaultApi;
private apiKey: string;
constructor(config: Configuration) {
if (config.opConfig) {
this.opApi = new opClient.DefaultApi(config.opConfig);
this.apiKey = config.apiKey;
}
}
public async capturePrompt<T extends keyof PromptTypes>(
dataFlowId: string,
modelProvider: T,
promptFunction: () => PromptTypes[T],
scenarioVariables: Record<string, unknown>
) {
const prompt = promptFunction() as PromptTypes[T];
if (!prompt) {
console.error("Prompt function returned null", promptFunction.toString());
}
const promptFunctionString = promptFunction.toString();
const startTime = Date.now();
let resp;
try {
resp = await this.opApi?.externalApiCapturePrompt({
apiKey: this.apiKey,
dataFlowId,
promptFunction: promptFunctionString,
scenarioVariables,
model: "",
modelProvider,
prompt,
});
} catch (err) {
console.error("Error reporting to OpenPipe", err);
}
return {
id: resp?.data?.loggedCallId,
prompt: prompt!,
};
}
public async captureResponse(
loggedCallId: string | undefined,
responsePayload: any
): Promise<void> {
if (!loggedCallId) {
console.error("No call log ID provided to captureResponse");
return;
}
try {
await this.opApi?.externalApiCaptureResponse({
apiKey: this.apiKey,
loggedCallId,
responsePayload,
});
} catch (err) {
console.error("Error reporting to OpenPipe", err);
}
}
}

View File

@@ -1,22 +0,0 @@
{
"name": "openai-js",
"version": "0.1.0",
"description": "",
"main": "index.js",
"scripts": {
"smoketest": "tsx ./tests/smoketest.ts"
},
"keywords": [],
"author": "",
"license": "ISC",
"dependencies": {
"axios": "^1.4.0",
"openai": "^3.3.0"
},
"devDependencies": {
"@types/node": "^20.4.5",
"dotenv": "^16.3.1",
"tsx": "^3.12.7",
"typescript": "^5.1.6"
}
}

View File

@@ -1,416 +0,0 @@
lockfileVersion: '6.0'
settings:
autoInstallPeers: true
excludeLinksFromLockfile: false
dependencies:
axios:
specifier: ^1.4.0
version: 1.4.0
openai:
specifier: ^3.3.0
version: 3.3.0
devDependencies:
'@types/node':
specifier: ^20.4.5
version: 20.4.5
dotenv:
specifier: ^16.3.1
version: 16.3.1
tsx:
specifier: ^3.12.7
version: 3.12.7
typescript:
specifier: ^5.1.6
version: 5.1.6
packages:
/@esbuild-kit/cjs-loader@2.4.2:
resolution: {integrity: sha512-BDXFbYOJzT/NBEtp71cvsrGPwGAMGRB/349rwKuoxNSiKjPraNNnlK6MIIabViCjqZugu6j+xeMDlEkWdHHJSg==}
dependencies:
'@esbuild-kit/core-utils': 3.1.0
get-tsconfig: 4.6.2
dev: true
/@esbuild-kit/core-utils@3.1.0:
resolution: {integrity: sha512-Uuk8RpCg/7fdHSceR1M6XbSZFSuMrxcePFuGgyvsBn+u339dk5OeL4jv2EojwTN2st/unJGsVm4qHWjWNmJ/tw==}
dependencies:
esbuild: 0.17.19
source-map-support: 0.5.21
dev: true
/@esbuild-kit/esm-loader@2.5.5:
resolution: {integrity: sha512-Qwfvj/qoPbClxCRNuac1Du01r9gvNOT+pMYtJDapfB1eoGN1YlJ1BixLyL9WVENRx5RXgNLdfYdx/CuswlGhMw==}
dependencies:
'@esbuild-kit/core-utils': 3.1.0
get-tsconfig: 4.6.2
dev: true
/@esbuild/android-arm64@0.17.19:
resolution: {integrity: sha512-KBMWvEZooR7+kzY0BtbTQn0OAYY7CsiydT63pVEaPtVYF0hXbUaOyZog37DKxK7NF3XacBJOpYT4adIJh+avxA==}
engines: {node: '>=12'}
cpu: [arm64]
os: [android]
requiresBuild: true
dev: true
optional: true
/@esbuild/android-arm@0.17.19:
resolution: {integrity: sha512-rIKddzqhmav7MSmoFCmDIb6e2W57geRsM94gV2l38fzhXMwq7hZoClug9USI2pFRGL06f4IOPHHpFNOkWieR8A==}
engines: {node: '>=12'}
cpu: [arm]
os: [android]
requiresBuild: true
dev: true
optional: true
/@esbuild/android-x64@0.17.19:
resolution: {integrity: sha512-uUTTc4xGNDT7YSArp/zbtmbhO0uEEK9/ETW29Wk1thYUJBz3IVnvgEiEwEa9IeLyvnpKrWK64Utw2bgUmDveww==}
engines: {node: '>=12'}
cpu: [x64]
os: [android]
requiresBuild: true
dev: true
optional: true
/@esbuild/darwin-arm64@0.17.19:
resolution: {integrity: sha512-80wEoCfF/hFKM6WE1FyBHc9SfUblloAWx6FJkFWTWiCoht9Mc0ARGEM47e67W9rI09YoUxJL68WHfDRYEAvOhg==}
engines: {node: '>=12'}
cpu: [arm64]
os: [darwin]
requiresBuild: true
dev: true
optional: true
/@esbuild/darwin-x64@0.17.19:
resolution: {integrity: sha512-IJM4JJsLhRYr9xdtLytPLSH9k/oxR3boaUIYiHkAawtwNOXKE8KoU8tMvryogdcT8AU+Bflmh81Xn6Q0vTZbQw==}
engines: {node: '>=12'}
cpu: [x64]
os: [darwin]
requiresBuild: true
dev: true
optional: true
/@esbuild/freebsd-arm64@0.17.19:
resolution: {integrity: sha512-pBwbc7DufluUeGdjSU5Si+P3SoMF5DQ/F/UmTSb8HXO80ZEAJmrykPyzo1IfNbAoaqw48YRpv8shwd1NoI0jcQ==}
engines: {node: '>=12'}
cpu: [arm64]
os: [freebsd]
requiresBuild: true
dev: true
optional: true
/@esbuild/freebsd-x64@0.17.19:
resolution: {integrity: sha512-4lu+n8Wk0XlajEhbEffdy2xy53dpR06SlzvhGByyg36qJw6Kpfk7cp45DR/62aPH9mtJRmIyrXAS5UWBrJT6TQ==}
engines: {node: '>=12'}
cpu: [x64]
os: [freebsd]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-arm64@0.17.19:
resolution: {integrity: sha512-ct1Tg3WGwd3P+oZYqic+YZF4snNl2bsnMKRkb3ozHmnM0dGWuxcPTTntAF6bOP0Sp4x0PjSF+4uHQ1xvxfRKqg==}
engines: {node: '>=12'}
cpu: [arm64]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-arm@0.17.19:
resolution: {integrity: sha512-cdmT3KxjlOQ/gZ2cjfrQOtmhG4HJs6hhvm3mWSRDPtZ/lP5oe8FWceS10JaSJC13GBd4eH/haHnqf7hhGNLerA==}
engines: {node: '>=12'}
cpu: [arm]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-ia32@0.17.19:
resolution: {integrity: sha512-w4IRhSy1VbsNxHRQpeGCHEmibqdTUx61Vc38APcsRbuVgK0OPEnQ0YD39Brymn96mOx48Y2laBQGqgZ0j9w6SQ==}
engines: {node: '>=12'}
cpu: [ia32]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-loong64@0.17.19:
resolution: {integrity: sha512-2iAngUbBPMq439a+z//gE+9WBldoMp1s5GWsUSgqHLzLJ9WoZLZhpwWuym0u0u/4XmZ3gpHmzV84PonE+9IIdQ==}
engines: {node: '>=12'}
cpu: [loong64]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-mips64el@0.17.19:
resolution: {integrity: sha512-LKJltc4LVdMKHsrFe4MGNPp0hqDFA1Wpt3jE1gEyM3nKUvOiO//9PheZZHfYRfYl6AwdTH4aTcXSqBerX0ml4A==}
engines: {node: '>=12'}
cpu: [mips64el]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-ppc64@0.17.19:
resolution: {integrity: sha512-/c/DGybs95WXNS8y3Ti/ytqETiW7EU44MEKuCAcpPto3YjQbyK3IQVKfF6nbghD7EcLUGl0NbiL5Rt5DMhn5tg==}
engines: {node: '>=12'}
cpu: [ppc64]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-riscv64@0.17.19:
resolution: {integrity: sha512-FC3nUAWhvFoutlhAkgHf8f5HwFWUL6bYdvLc/TTuxKlvLi3+pPzdZiFKSWz/PF30TB1K19SuCxDTI5KcqASJqA==}
engines: {node: '>=12'}
cpu: [riscv64]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-s390x@0.17.19:
resolution: {integrity: sha512-IbFsFbxMWLuKEbH+7sTkKzL6NJmG2vRyy6K7JJo55w+8xDk7RElYn6xvXtDW8HCfoKBFK69f3pgBJSUSQPr+4Q==}
engines: {node: '>=12'}
cpu: [s390x]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-x64@0.17.19:
resolution: {integrity: sha512-68ngA9lg2H6zkZcyp22tsVt38mlhWde8l3eJLWkyLrp4HwMUr3c1s/M2t7+kHIhvMjglIBrFpncX1SzMckomGw==}
engines: {node: '>=12'}
cpu: [x64]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/netbsd-x64@0.17.19:
resolution: {integrity: sha512-CwFq42rXCR8TYIjIfpXCbRX0rp1jo6cPIUPSaWwzbVI4aOfX96OXY8M6KNmtPcg7QjYeDmN+DD0Wp3LaBOLf4Q==}
engines: {node: '>=12'}
cpu: [x64]
os: [netbsd]
requiresBuild: true
dev: true
optional: true
/@esbuild/openbsd-x64@0.17.19:
resolution: {integrity: sha512-cnq5brJYrSZ2CF6c35eCmviIN3k3RczmHz8eYaVlNasVqsNY+JKohZU5MKmaOI+KkllCdzOKKdPs762VCPC20g==}
engines: {node: '>=12'}
cpu: [x64]
os: [openbsd]
requiresBuild: true
dev: true
optional: true
/@esbuild/sunos-x64@0.17.19:
resolution: {integrity: sha512-vCRT7yP3zX+bKWFeP/zdS6SqdWB8OIpaRq/mbXQxTGHnIxspRtigpkUcDMlSCOejlHowLqII7K2JKevwyRP2rg==}
engines: {node: '>=12'}
cpu: [x64]
os: [sunos]
requiresBuild: true
dev: true
optional: true
/@esbuild/win32-arm64@0.17.19:
resolution: {integrity: sha512-yYx+8jwowUstVdorcMdNlzklLYhPxjniHWFKgRqH7IFlUEa0Umu3KuYplf1HUZZ422e3NU9F4LGb+4O0Kdcaag==}
engines: {node: '>=12'}
cpu: [arm64]
os: [win32]
requiresBuild: true
dev: true
optional: true
/@esbuild/win32-ia32@0.17.19:
resolution: {integrity: sha512-eggDKanJszUtCdlVs0RB+h35wNlb5v4TWEkq4vZcmVt5u/HiDZrTXe2bWFQUez3RgNHwx/x4sk5++4NSSicKkw==}
engines: {node: '>=12'}
cpu: [ia32]
os: [win32]
requiresBuild: true
dev: true
optional: true
/@esbuild/win32-x64@0.17.19:
resolution: {integrity: sha512-lAhycmKnVOuRYNtRtatQR1LPQf2oYCkRGkSFnseDAKPl8lu5SOsK/e1sXe5a0Pc5kHIHe6P2I/ilntNv2xf3cA==}
engines: {node: '>=12'}
cpu: [x64]
os: [win32]
requiresBuild: true
dev: true
optional: true
/@types/node@20.4.5:
resolution: {integrity: sha512-rt40Nk13II9JwQBdeYqmbn2Q6IVTA5uPhvSO+JVqdXw/6/4glI6oR9ezty/A9Hg5u7JH4OmYmuQ+XvjKm0Datg==}
dev: true
/asynckit@0.4.0:
resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==}
dev: false
/axios@0.26.1:
resolution: {integrity: sha512-fPwcX4EvnSHuInCMItEhAGnaSEXRBjtzh9fOtsE6E1G6p7vl7edEeZe11QHf18+6+9gR5PbKV/sGKNaD8YaMeA==}
dependencies:
follow-redirects: 1.15.2
transitivePeerDependencies:
- debug
dev: false
/axios@1.4.0:
resolution: {integrity: sha512-S4XCWMEmzvo64T9GfvQDOXgYRDJ/wsSZc7Jvdgx5u1sd0JwsuPLqb3SYmusag+edF6ziyMensPVqLTSc1PiSEA==}
dependencies:
follow-redirects: 1.15.2
form-data: 4.0.0
proxy-from-env: 1.1.0
transitivePeerDependencies:
- debug
dev: false
/buffer-from@1.1.2:
resolution: {integrity: sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==}
dev: true
/combined-stream@1.0.8:
resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==}
engines: {node: '>= 0.8'}
dependencies:
delayed-stream: 1.0.0
dev: false
/delayed-stream@1.0.0:
resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==}
engines: {node: '>=0.4.0'}
dev: false
/dotenv@16.3.1:
resolution: {integrity: sha512-IPzF4w4/Rd94bA9imS68tZBaYyBWSCE47V1RGuMrB94iyTOIEwRmVL2x/4An+6mETpLrKJ5hQkB8W4kFAadeIQ==}
engines: {node: '>=12'}
dev: true
/esbuild@0.17.19:
resolution: {integrity: sha512-XQ0jAPFkK/u3LcVRcvVHQcTIqD6E2H1fvZMA5dQPSOWb3suUbWbfbRf94pjc0bNzRYLfIrDRQXr7X+LHIm5oHw==}
engines: {node: '>=12'}
hasBin: true
requiresBuild: true
optionalDependencies:
'@esbuild/android-arm': 0.17.19
'@esbuild/android-arm64': 0.17.19
'@esbuild/android-x64': 0.17.19
'@esbuild/darwin-arm64': 0.17.19
'@esbuild/darwin-x64': 0.17.19
'@esbuild/freebsd-arm64': 0.17.19
'@esbuild/freebsd-x64': 0.17.19
'@esbuild/linux-arm': 0.17.19
'@esbuild/linux-arm64': 0.17.19
'@esbuild/linux-ia32': 0.17.19
'@esbuild/linux-loong64': 0.17.19
'@esbuild/linux-mips64el': 0.17.19
'@esbuild/linux-ppc64': 0.17.19
'@esbuild/linux-riscv64': 0.17.19
'@esbuild/linux-s390x': 0.17.19
'@esbuild/linux-x64': 0.17.19
'@esbuild/netbsd-x64': 0.17.19
'@esbuild/openbsd-x64': 0.17.19
'@esbuild/sunos-x64': 0.17.19
'@esbuild/win32-arm64': 0.17.19
'@esbuild/win32-ia32': 0.17.19
'@esbuild/win32-x64': 0.17.19
dev: true
/follow-redirects@1.15.2:
resolution: {integrity: sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA==}
engines: {node: '>=4.0'}
peerDependencies:
debug: '*'
peerDependenciesMeta:
debug:
optional: true
dev: false
/form-data@4.0.0:
resolution: {integrity: sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==}
engines: {node: '>= 6'}
dependencies:
asynckit: 0.4.0
combined-stream: 1.0.8
mime-types: 2.1.35
dev: false
/fsevents@2.3.2:
resolution: {integrity: sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==}
engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0}
os: [darwin]
requiresBuild: true
dev: true
optional: true
/get-tsconfig@4.6.2:
resolution: {integrity: sha512-E5XrT4CbbXcXWy+1jChlZmrmCwd5KGx502kDCXJJ7y898TtWW9FwoG5HfOLVRKmlmDGkWN2HM9Ho+/Y8F0sJDg==}
dependencies:
resolve-pkg-maps: 1.0.0
dev: true
/mime-db@1.52.0:
resolution: {integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==}
engines: {node: '>= 0.6'}
dev: false
/mime-types@2.1.35:
resolution: {integrity: sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==}
engines: {node: '>= 0.6'}
dependencies:
mime-db: 1.52.0
dev: false
/openai@3.3.0:
resolution: {integrity: sha512-uqxI/Au+aPRnsaQRe8CojU0eCR7I0mBiKjD3sNMzY6DaC1ZVrc85u98mtJW6voDug8fgGN+DIZmTDxTthxb7dQ==}
dependencies:
axios: 0.26.1
form-data: 4.0.0
transitivePeerDependencies:
- debug
dev: false
/proxy-from-env@1.1.0:
resolution: {integrity: sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==}
dev: false
/resolve-pkg-maps@1.0.0:
resolution: {integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==}
dev: true
/source-map-support@0.5.21:
resolution: {integrity: sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==}
dependencies:
buffer-from: 1.1.2
source-map: 0.6.1
dev: true
/source-map@0.6.1:
resolution: {integrity: sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==}
engines: {node: '>=0.10.0'}
dev: true
/tsx@3.12.7:
resolution: {integrity: sha512-C2Ip+jPmqKd1GWVQDvz/Eyc6QJbGfE7NrR3fx5BpEHMZsEHoIxHL1j+lKdGobr8ovEyqeNkPLSKp6SCSOt7gmw==}
hasBin: true
dependencies:
'@esbuild-kit/cjs-loader': 2.4.2
'@esbuild-kit/core-utils': 3.1.0
'@esbuild-kit/esm-loader': 2.5.5
optionalDependencies:
fsevents: 2.3.2
dev: true
/typescript@5.1.6:
resolution: {integrity: sha512-zaWCozRZ6DLEWAWFrVDz1H6FVXzUSfTy5FUMWsQlU8Ym5JP9eO4xkTIROFCQvhQf61z6O/G6ugw3SgAnvvm+HA==}
engines: {node: '>=14.17'}
hasBin: true
dev: true

View File

@@ -1,43 +0,0 @@
import "dotenv/config";
import { Configuration, OpenAIApi } from 'openai'
import { Configuration as OPConfiguration, OpenPipeApi } from "..";
export const openAIConfig = new Configuration({ apiKey: process.env.OPENAI_API_KEY });
const openai = new OpenAIApi(openAIConfig);
const config = new OPConfiguration({
opOptions: {
apiKey: "mock-key",
basePath: "http://localhost:3000/api",
},
});
const client = new OpenPipeApi(config);
async function main() {
const prompt = await client.capturePrompt(
"123",
"openai/ChatCompletion",
() => ({
model: "gpt-3.5-turbo",
messages: [
{
role: "system",
content: "count to 3 in french",
},
],
}),
{}
);
const response = await openai.createChatCompletion(prompt.prompt);
console.log("got response", response.data.choices[0].message);
}
main().catch((err) => {
console.log("exited with error");
// console.error(err);
});

View File

@@ -1,179 +0,0 @@
{
"openapi": "3.0.3",
"info": {
"title": "OpenPipe API",
"description": "The public API for reporting API calls to OpenPipe",
"version": "0.1.0"
},
"servers": [
{
"url": "https://app.openpipe.ai/api"
}
],
"paths": {
"/v1/capture-prompt": {
"post": {
"operationId": "externalApi-capturePrompt",
"description": "Capture a prompt with its variables call",
"requestBody": {
"required": true,
"content": {
"application/json": {
"schema": {
"type": "object",
"properties": {
"apiKey": {
"type": "string",
"description": "API token for authentication"
},
"dataFlowId": {
"type": "string",
"description": "Data flow ID"
},
"promptFunction": {
"type": "string",
"description": "Prompt construction function"
},
"scenarioVariables": {
"description": "Scenario variables as JSON"
},
"prompt": {
"description": "Prompt object as JSON"
},
"model": {
"type": "string",
"description": "Model name"
},
"modelProvider": {
"type": "string",
"description": "Model provider"
}
},
"required": [
"apiKey",
"dataFlowId",
"promptFunction",
"model",
"modelProvider"
],
"additionalProperties": false
}
}
}
},
"parameters": [],
"responses": {
"200": {
"description": "Successful response",
"content": {
"application/json": {
"schema": {
"type": "string"
}
}
}
},
"default": {
"$ref": "#/components/responses/error"
}
}
}
},
"/v1/capture-response": {
"post": {
"operationId": "externalApi-captureResponse",
"description": "Capture a response to a prompt",
"requestBody": {
"required": true,
"content": {
"application/json": {
"schema": {
"type": "object",
"properties": {
"apiKey": {
"type": "string",
"description": "API token for authentication"
},
"loggedCallId": {
"type": "string",
"description": "Logged call ID"
},
"responsePayload": {
"description": "JSON-encoded response payload"
}
},
"required": [
"apiKey",
"loggedCallId"
],
"additionalProperties": false
}
}
}
},
"parameters": [],
"responses": {
"200": {
"description": "Successful response",
"content": {
"application/json": {
"schema": {}
}
}
},
"default": {
"$ref": "#/components/responses/error"
}
}
}
}
},
"components": {
"securitySchemes": {
"Authorization": {
"type": "http",
"scheme": "bearer"
}
},
"responses": {
"error": {
"description": "Error response",
"content": {
"application/json": {
"schema": {
"type": "object",
"properties": {
"message": {
"type": "string"
},
"code": {
"type": "string"
},
"issues": {
"type": "array",
"items": {
"type": "object",
"properties": {
"message": {
"type": "string"
}
},
"required": [
"message"
],
"additionalProperties": false
}
}
},
"required": [
"message",
"code"
],
"additionalProperties": false
}
}
}
}
}
}
}

View File

@@ -12,16 +12,15 @@
"dev:next": "next dev", "dev:next": "next dev",
"dev:wss": "pnpm tsx --watch src/wss-server.ts", "dev:wss": "pnpm tsx --watch src/wss-server.ts",
"dev:worker": "NODE_ENV='development' pnpm tsx --watch src/server/tasks/worker.ts", "dev:worker": "NODE_ENV='development' pnpm tsx --watch src/server/tasks/worker.ts",
"dev": "concurrently --kill-others 'pnpm dev:next' 'pnpm dev:wss' 'pnpm dev:worker'", "dev": "concurrently --kill-others 'pnpm dev:next' 'pnpm dev:wss'",
"postinstall": "prisma generate", "postinstall": "prisma generate",
"lint": "next lint", "lint": "next lint",
"start": "next start", "start": "next start",
"codegen": "tsx src/codegen/export-client-types.ts", "codegen": "tsx src/codegen/export-openai-types.ts",
"seed": "tsx prisma/seed.ts", "seed": "tsx prisma/seed.ts",
"check": "concurrently 'pnpm lint' 'pnpm tsc' 'pnpm prettier . --check'" "check": "concurrently 'pnpm lint' 'pnpm tsc' 'pnpm prettier . --check'"
}, },
"dependencies": { "dependencies": {
"@anthropic-ai/sdk": "^0.5.8",
"@apidevtools/json-schema-ref-parser": "^10.1.0", "@apidevtools/json-schema-ref-parser": "^10.1.0",
"@babel/preset-typescript": "^7.22.5", "@babel/preset-typescript": "^7.22.5",
"@babel/standalone": "^7.22.9", "@babel/standalone": "^7.22.9",
@@ -60,8 +59,6 @@
"lodash-es": "^4.17.21", "lodash-es": "^4.17.21",
"next": "^13.4.2", "next": "^13.4.2",
"next-auth": "^4.22.1", "next-auth": "^4.22.1",
"next-query-params": "^4.2.3",
"nextjs-cors": "^2.1.2",
"nextjs-routes": "^2.0.1", "nextjs-routes": "^2.0.1",
"openai": "4.0.0-beta.2", "openai": "4.0.0-beta.2",
"pluralize": "^8.0.0", "pluralize": "^8.0.0",
@@ -76,15 +73,11 @@
"react-syntax-highlighter": "^15.5.0", "react-syntax-highlighter": "^15.5.0",
"react-textarea-autosize": "^8.5.0", "react-textarea-autosize": "^8.5.0",
"recast": "^0.23.3", "recast": "^0.23.3",
"replicate": "^0.12.3",
"socket.io": "^4.7.1", "socket.io": "^4.7.1",
"socket.io-client": "^4.7.1", "socket.io-client": "^4.7.1",
"superjson": "1.12.2", "superjson": "1.12.2",
"trpc-openapi": "^1.2.0",
"tsx": "^3.12.7", "tsx": "^3.12.7",
"type-fest": "^4.0.0", "type-fest": "^4.0.0",
"use-query-params": "^2.2.1",
"uuid": "^9.0.0",
"vite-tsconfig-paths": "^4.2.0", "vite-tsconfig-paths": "^4.2.0",
"zod": "^3.21.4", "zod": "^3.21.4",
"zustand": "^4.3.9" "zustand": "^4.3.9"
@@ -105,7 +98,6 @@
"@types/react": "^18.2.6", "@types/react": "^18.2.6",
"@types/react-dom": "^18.2.4", "@types/react-dom": "^18.2.4",
"@types/react-syntax-highlighter": "^15.5.7", "@types/react-syntax-highlighter": "^15.5.7",
"@types/uuid": "^9.0.2",
"@typescript-eslint/eslint-plugin": "^5.59.6", "@typescript-eslint/eslint-plugin": "^5.59.6",
"@typescript-eslint/parser": "^5.59.6", "@typescript-eslint/parser": "^5.59.6",
"eslint": "^8.40.0", "eslint": "^8.40.0",

View File

@@ -1,13 +1,10 @@
lockfileVersion: '6.0' lockfileVersion: '6.1'
settings: settings:
autoInstallPeers: true autoInstallPeers: true
excludeLinksFromLockfile: false excludeLinksFromLockfile: false
dependencies: dependencies:
'@anthropic-ai/sdk':
specifier: ^0.5.8
version: 0.5.8
'@apidevtools/json-schema-ref-parser': '@apidevtools/json-schema-ref-parser':
specifier: ^10.1.0 specifier: ^10.1.0
version: 10.1.0 version: 10.1.0
@@ -122,12 +119,6 @@ dependencies:
next-auth: next-auth:
specifier: ^4.22.1 specifier: ^4.22.1
version: 4.22.1(next@13.4.2)(react-dom@18.2.0)(react@18.2.0) version: 4.22.1(next@13.4.2)(react-dom@18.2.0)(react@18.2.0)
next-query-params:
specifier: ^4.2.3
version: 4.2.3(next@13.4.2)(react@18.2.0)(use-query-params@2.2.1)
nextjs-cors:
specifier: ^2.1.2
version: 2.1.2(next@13.4.2)
nextjs-routes: nextjs-routes:
specifier: ^2.0.1 specifier: ^2.0.1
version: 2.0.1(next@13.4.2) version: 2.0.1(next@13.4.2)
@@ -170,9 +161,6 @@ dependencies:
recast: recast:
specifier: ^0.23.3 specifier: ^0.23.3
version: 0.23.3 version: 0.23.3
replicate:
specifier: ^0.12.3
version: 0.12.3
socket.io: socket.io:
specifier: ^4.7.1 specifier: ^4.7.1
version: 4.7.1 version: 4.7.1
@@ -182,21 +170,12 @@ dependencies:
superjson: superjson:
specifier: 1.12.2 specifier: 1.12.2
version: 1.12.2 version: 1.12.2
trpc-openapi:
specifier: ^1.2.0
version: 1.2.0(@trpc/server@10.26.0)(zod@3.21.4)
tsx: tsx:
specifier: ^3.12.7 specifier: ^3.12.7
version: 3.12.7 version: 3.12.7
type-fest: type-fest:
specifier: ^4.0.0 specifier: ^4.0.0
version: 4.0.0 version: 4.0.0
use-query-params:
specifier: ^2.2.1
version: 2.2.1(react-dom@18.2.0)(react@18.2.0)
uuid:
specifier: ^9.0.0
version: 9.0.0
vite-tsconfig-paths: vite-tsconfig-paths:
specifier: ^4.2.0 specifier: ^4.2.0
version: 4.2.0(typescript@5.0.4) version: 4.2.0(typescript@5.0.4)
@@ -253,9 +232,6 @@ devDependencies:
'@types/react-syntax-highlighter': '@types/react-syntax-highlighter':
specifier: ^15.5.7 specifier: ^15.5.7
version: 15.5.7 version: 15.5.7
'@types/uuid':
specifier: ^9.0.2
version: 9.0.2
'@typescript-eslint/eslint-plugin': '@typescript-eslint/eslint-plugin':
specifier: ^5.59.6 specifier: ^5.59.6
version: 5.59.6(@typescript-eslint/parser@5.59.6)(eslint@8.40.0)(typescript@5.0.4) version: 5.59.6(@typescript-eslint/parser@5.59.6)(eslint@8.40.0)(typescript@5.0.4)
@@ -307,22 +283,6 @@ packages:
'@jridgewell/gen-mapping': 0.3.3 '@jridgewell/gen-mapping': 0.3.3
'@jridgewell/trace-mapping': 0.3.18 '@jridgewell/trace-mapping': 0.3.18
/@anthropic-ai/sdk@0.5.8:
resolution: {integrity: sha512-iHenjcE2Q/az6VZiP1DueOSvKNRmxsly6Rx2yjJBoy7OBYVFGVjEdgs2mPQHtTX0ibKAR7tPq6F6MQbKDPWcKg==}
dependencies:
'@types/node': 18.16.0
'@types/node-fetch': 2.6.4
abort-controller: 3.0.0
agentkeepalive: 4.3.0
digest-fetch: 1.3.0
form-data-encoder: 1.7.2
formdata-node: 4.4.1
node-fetch: 2.6.12
transitivePeerDependencies:
- encoding
- supports-color
dev: false
/@apidevtools/json-schema-ref-parser@10.1.0: /@apidevtools/json-schema-ref-parser@10.1.0:
resolution: {integrity: sha512-3e+viyMuXdrcK8v5pvP+SDoAQ77FH6OyRmuK48SZKmdHJRFm87RsSs8qm6kP39a/pOPURByJw+OXzQIqcfmKtA==} resolution: {integrity: sha512-3e+viyMuXdrcK8v5pvP+SDoAQ77FH6OyRmuK48SZKmdHJRFm87RsSs8qm6kP39a/pOPURByJw+OXzQIqcfmKtA==}
engines: {node: '>= 16'} engines: {node: '>= 16'}
@@ -3055,10 +3015,6 @@ packages:
resolution: {integrity: sha512-cputDpIbFgLUaGQn6Vqg3/YsJwxUwHLO13v3i5ouxT4lat0khip9AEWxtERujXV9wxIB1EyF97BSJFt6vpdI8g==} resolution: {integrity: sha512-cputDpIbFgLUaGQn6Vqg3/YsJwxUwHLO13v3i5ouxT4lat0khip9AEWxtERujXV9wxIB1EyF97BSJFt6vpdI8g==}
dev: false dev: false
/@types/uuid@9.0.2:
resolution: {integrity: sha512-kNnC1GFBLuhImSnV7w4njQkUiJi0ZXUycu1rUaouPqiKlXkh77JKgdRnTAp1x5eBwcIwbtI+3otwzuIDEuDoxQ==}
dev: true
/@typescript-eslint/eslint-plugin@5.59.6(@typescript-eslint/parser@5.59.6)(eslint@8.40.0)(typescript@5.0.4): /@typescript-eslint/eslint-plugin@5.59.6(@typescript-eslint/parser@5.59.6)(eslint@8.40.0)(typescript@5.0.4):
resolution: {integrity: sha512-sXtOgJNEuRU5RLwPUb1jxtToZbgvq3M6FPpY4QENxoOggK+UpTxUBpj6tD8+Qh2g46Pi9We87E+eHnUw8YcGsw==} resolution: {integrity: sha512-sXtOgJNEuRU5RLwPUb1jxtToZbgvq3M6FPpY4QENxoOggK+UpTxUBpj6tD8+Qh2g46Pi9We87E+eHnUw8YcGsw==}
engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
@@ -3867,15 +3823,6 @@ packages:
wrap-ansi: 7.0.0 wrap-ansi: 7.0.0
dev: false dev: false
/co-body@6.1.0:
resolution: {integrity: sha512-m7pOT6CdLN7FuXUcpuz/8lfQ/L77x8SchHCF4G0RBTJO20Wzmhn5Sp4/5WsKy8OSpifBSUrmg83qEqaDHdyFuQ==}
dependencies:
inflation: 2.0.0
qs: 6.11.2
raw-body: 2.5.1
type-is: 1.6.18
dev: false
/color-convert@1.9.3: /color-convert@1.9.3:
resolution: {integrity: sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==} resolution: {integrity: sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==}
dependencies: dependencies:
@@ -3950,10 +3897,6 @@ packages:
/convert-source-map@1.9.0: /convert-source-map@1.9.0:
resolution: {integrity: sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==} resolution: {integrity: sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==}
/cookie-es@1.0.0:
resolution: {integrity: sha512-mWYvfOLrfEc996hlKcdABeIiPHUPC6DM2QYZdGGOvhOTbA3tjm2eBwqlJpoFdjC89NI4Qt6h0Pu06Mp+1Pj5OQ==}
dev: false
/cookie-signature@1.0.6: /cookie-signature@1.0.6:
resolution: {integrity: sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==} resolution: {integrity: sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==}
dev: false dev: false
@@ -4156,20 +4099,11 @@ packages:
has-property-descriptors: 1.0.0 has-property-descriptors: 1.0.0
object-keys: 1.1.1 object-keys: 1.1.1
/defu@6.1.2:
resolution: {integrity: sha512-+uO4+qr7msjNNWKYPHqN/3+Dx3NFkmIzayk2L1MyZQlvgZb/J1A0fo410dpKrN2SnqFjt8n4JL8fDJE0wIgjFQ==}
dev: false
/delayed-stream@1.0.0: /delayed-stream@1.0.0:
resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==}
engines: {node: '>=0.4.0'} engines: {node: '>=0.4.0'}
dev: false dev: false
/depd@1.1.2:
resolution: {integrity: sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ==}
engines: {node: '>= 0.6'}
dev: false
/depd@2.0.0: /depd@2.0.0:
resolution: {integrity: sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==} resolution: {integrity: sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==}
engines: {node: '>= 0.8'} engines: {node: '>= 0.8'}
@@ -4180,10 +4114,6 @@ packages:
engines: {node: '>=6'} engines: {node: '>=6'}
dev: true dev: true
/destr@2.0.0:
resolution: {integrity: sha512-FJ9RDpf3GicEBvzI3jxc2XhHzbqD8p4ANw/1kPsFBfTvP1b7Gn/Lg1vO7R9J4IVgoMbyUmFrFGZafJ1hPZpvlg==}
dev: false
/destroy@1.2.0: /destroy@1.2.0:
resolution: {integrity: sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==} resolution: {integrity: sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==}
engines: {node: '>= 0.8', npm: 1.2.8000 || >= 1.4.16} engines: {node: '>= 0.8', npm: 1.2.8000 || >= 1.4.16}
@@ -5305,18 +5235,6 @@ packages:
- pg-native - pg-native
dev: false dev: false
/h3@1.7.1:
resolution: {integrity: sha512-A9V2NEDNHet7v1gCg7CMwerSigLi0SRbhTy7C3lGb0N4YKIpPmLDjedTUopqp4dnn7COHfqUjjaz3zbtz4QduA==}
dependencies:
cookie-es: 1.0.0
defu: 6.1.2
destr: 2.0.0
iron-webcrypto: 0.7.1
radix3: 1.0.1
ufo: 1.1.2
uncrypto: 0.1.3
dev: false
/has-bigints@1.0.2: /has-bigints@1.0.2:
resolution: {integrity: sha512-tSvCKtBr9lkF0Ex0aQiP9N+OpV4zi2r/Nee5VkRDbaqv35RLYMzbwQfFSZZH0kR+Rd6302UJZ2p/bJCEoR3VoQ==} resolution: {integrity: sha512-tSvCKtBr9lkF0Ex0aQiP9N+OpV4zi2r/Nee5VkRDbaqv35RLYMzbwQfFSZZH0kR+Rd6302UJZ2p/bJCEoR3VoQ==}
dev: true dev: true
@@ -5444,11 +5362,6 @@ packages:
engines: {node: '>=0.8.19'} engines: {node: '>=0.8.19'}
dev: true dev: true
/inflation@2.0.0:
resolution: {integrity: sha512-m3xv4hJYR2oXw4o4Y5l6P5P16WYmazYof+el6Al3f+YlggGj6qT9kImBAnzDelRALnP5d3h4jGBPKzYCizjZZw==}
engines: {node: '>= 0.8.0'}
dev: false
/inflight@1.0.6: /inflight@1.0.6:
resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==} resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==}
dependencies: dependencies:
@@ -5478,10 +5391,6 @@ packages:
engines: {node: '>= 0.10'} engines: {node: '>= 0.10'}
dev: false dev: false
/iron-webcrypto@0.7.1:
resolution: {integrity: sha512-K/UmlEhPCPXEHV5hAtH5C0tI5JnFuOrv4yO/j7ODPl3HaiiHBLbOLTde+ieUaAyfCATe4LoAnclyF+hmSCOVmQ==}
dev: false
/is-alphabetical@1.0.4: /is-alphabetical@1.0.4:
resolution: {integrity: sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg==} resolution: {integrity: sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg==}
dev: false dev: false
@@ -6125,19 +6034,6 @@ packages:
uuid: 8.3.2 uuid: 8.3.2
dev: false dev: false
/next-query-params@4.2.3(next@13.4.2)(react@18.2.0)(use-query-params@2.2.1):
resolution: {integrity: sha512-hGNCYRH8YyA5ItiBGSKrtMl21b2MAqfPkdI1mvwloNVqSU142IaGzqHN+OTovyeLIpQfonY01y7BAHb/UH4POg==}
peerDependencies:
next: ^10.0.0 || ^11.0.0 || ^12.0.0 || ^13.0.0
react: ^16.8.0 || ^17.0.0 || ^18.0.0
use-query-params: ^2.0.0
dependencies:
next: 13.4.2(@babel/core@7.22.9)(react-dom@18.2.0)(react@18.2.0)
react: 18.2.0
tslib: 2.6.0
use-query-params: 2.2.1(react-dom@18.2.0)(react@18.2.0)
dev: false
/next-tick@1.1.0: /next-tick@1.1.0:
resolution: {integrity: sha512-CXdUiJembsNjuToQvxayPZF9Vqht7hewsvy2sOWafLvi2awflj9mOC6bHIg50orX8IJvWKY9wYQ/zB2kogPslQ==} resolution: {integrity: sha512-CXdUiJembsNjuToQvxayPZF9Vqht7hewsvy2sOWafLvi2awflj9mOC6bHIg50orX8IJvWKY9wYQ/zB2kogPslQ==}
dev: false dev: false
@@ -6187,15 +6083,6 @@ packages:
- babel-plugin-macros - babel-plugin-macros
dev: false dev: false
/nextjs-cors@2.1.2(next@13.4.2):
resolution: {integrity: sha512-2yOVivaaf2ILe4f/qY32hnj3oC77VCOsUQJQfhVMGsXE/YMEWUY2zy78sH9FKUCM7eG42/l3pDofIzMD781XGA==}
peerDependencies:
next: ^8.1.1-canary.54 || ^9.0.0 || ^10.0.0-0 || ^11.0.0 || ^12.0.0 || ^13.0.0
dependencies:
cors: 2.8.5
next: 13.4.2(@babel/core@7.22.9)(react-dom@18.2.0)(react@18.2.0)
dev: false
/nextjs-routes@2.0.1(next@13.4.2): /nextjs-routes@2.0.1(next@13.4.2):
resolution: {integrity: sha512-pBGRm6uR44zwUjWWYn6+gwz08BhBbqUYlIzsbNHAh1TWohHYKWFaa2YVsj8BxEo726MZYg87OJPnHpaaY1ia0w==} resolution: {integrity: sha512-pBGRm6uR44zwUjWWYn6+gwz08BhBbqUYlIzsbNHAh1TWohHYKWFaa2YVsj8BxEo726MZYg87OJPnHpaaY1ia0w==}
hasBin: true hasBin: true
@@ -6223,22 +6110,6 @@ packages:
whatwg-url: 5.0.0 whatwg-url: 5.0.0
dev: false dev: false
/node-mocks-http@1.12.2:
resolution: {integrity: sha512-xhWwC0dh35R9rf0j3bRZXuISXdHxxtMx0ywZQBwjrg3yl7KpRETzogfeCamUIjltpn0Fxvs/ZhGJul1vPLrdJQ==}
engines: {node: '>=0.6'}
dependencies:
accepts: 1.3.8
content-disposition: 0.5.4
depd: 1.1.2
fresh: 0.5.2
merge-descriptors: 1.0.1
methods: 1.1.2
mime: 1.6.0
parseurl: 1.3.3
range-parser: 1.2.1
type-is: 1.6.18
dev: false
/node-releases@2.0.13: /node-releases@2.0.13:
resolution: {integrity: sha512-uYr7J37ae/ORWdZeQ1xxMJe3NtdmqMC/JZK+geofDrkLUApKRHPd18/TxtBOJ4A0/+uUIliorNrfYV6s1b02eQ==} resolution: {integrity: sha512-uYr7J37ae/ORWdZeQ1xxMJe3NtdmqMC/JZK+geofDrkLUApKRHPd18/TxtBOJ4A0/+uUIliorNrfYV6s1b02eQ==}
@@ -6400,10 +6271,6 @@ packages:
- supports-color - supports-color
dev: false dev: false
/openapi-types@12.1.3:
resolution: {integrity: sha512-N4YtSYJqghVu4iek2ZUvcN/0aqH1kRDuNqzcycDxhOUpg7GdvLa2F3DgS6yBNhInhv2r/6I0Flkn7CqL8+nIcw==}
dev: false
/openapi-typescript@5.4.1: /openapi-typescript@5.4.1:
resolution: {integrity: sha512-AGB2QiZPz4rE7zIwV3dRHtoUC/CWHhUjuzGXvtmMQN2AFV8xCTLKcZUHLcdPQmt/83i22nRE7+TxXOXkK+gf4Q==} resolution: {integrity: sha512-AGB2QiZPz4rE7zIwV3dRHtoUC/CWHhUjuzGXvtmMQN2AFV8xCTLKcZUHLcdPQmt/83i22nRE7+TxXOXkK+gf4Q==}
engines: {node: '>= 14.0.0'} engines: {node: '>= 14.0.0'}
@@ -6827,10 +6694,6 @@ packages:
resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==} resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==}
dev: true dev: true
/radix3@1.0.1:
resolution: {integrity: sha512-y+AcwZ3HcUIGc9zGsNVf5+BY/LxL+z+4h4J3/pp8jxSmy1STaCocPS3qrj4tA5ehUSzqtqK+0Aygvz/r/8vy4g==}
dev: false
/randombytes@2.1.0: /randombytes@2.1.0:
resolution: {integrity: sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==} resolution: {integrity: sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==}
dependencies: dependencies:
@@ -7125,11 +6988,6 @@ packages:
functions-have-names: 1.2.3 functions-have-names: 1.2.3
dev: true dev: true
/replicate@0.12.3:
resolution: {integrity: sha512-HVWKPoVhWVTONlWk+lUXmq9Vy2J8MxBJMtDBQq3dA5uq71ZzKTh0xvJfvzW4+VLBjhBeL7tkdua6hZJmKfzAPQ==}
engines: {git: '>=2.11.0', node: '>=16.6.0', npm: '>=7.19.0', yarn: '>=1.7.0'}
dev: false
/require-directory@2.1.1: /require-directory@2.1.1:
resolution: {integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==} resolution: {integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==}
engines: {node: '>=0.10.0'} engines: {node: '>=0.10.0'}
@@ -7281,10 +7139,6 @@ packages:
randombytes: 2.1.0 randombytes: 2.1.0
dev: true dev: true
/serialize-query-params@2.0.2:
resolution: {integrity: sha512-1chMo1dST4pFA9RDXAtF0Rbjaut4is7bzFbI1Z26IuMub68pNCILku85aYmeFhvnY//BXUPUhoRMjYcsT93J/Q==}
dev: false
/serve-static@1.15.0: /serve-static@1.15.0:
resolution: {integrity: sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==} resolution: {integrity: sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==}
engines: {node: '>= 0.8.0'} engines: {node: '>= 0.8.0'}
@@ -7731,22 +7585,6 @@ packages:
hasBin: true hasBin: true
dev: false dev: false
/trpc-openapi@1.2.0(@trpc/server@10.26.0)(zod@3.21.4):
resolution: {integrity: sha512-pfYoCd/3KYXWXvUPZBKJw455OOwngKN/6SIcj7Yit19OMLJ+8yVZkEvGEeg5wUSwfsiTdRsKuvqkRPXVSwV7ew==}
peerDependencies:
'@trpc/server': ^10.0.0
zod: ^3.14.4
dependencies:
'@trpc/server': 10.26.0
co-body: 6.1.0
h3: 1.7.1
lodash.clonedeep: 4.5.0
node-mocks-http: 1.12.2
openapi-types: 12.1.3
zod: 3.21.4
zod-to-json-schema: 3.21.4(zod@3.21.4)
dev: false
/tsconfck@2.1.2(typescript@5.0.4): /tsconfck@2.1.2(typescript@5.0.4):
resolution: {integrity: sha512-ghqN1b0puy3MhhviwO2kGF8SeMDNhEbnKxjK7h6+fvY9JAxqvXi8y5NAHSQv687OVboS2uZIByzGd45/YxrRHg==} resolution: {integrity: sha512-ghqN1b0puy3MhhviwO2kGF8SeMDNhEbnKxjK7h6+fvY9JAxqvXi8y5NAHSQv687OVboS2uZIByzGd45/YxrRHg==}
engines: {node: ^14.13.1 || ^16 || >=18} engines: {node: ^14.13.1 || ^16 || >=18}
@@ -7884,6 +7722,7 @@ packages:
/ufo@1.1.2: /ufo@1.1.2:
resolution: {integrity: sha512-TrY6DsjTQQgyS3E3dBaOXf0TpPD8u9FVrVYmKVegJuFw51n/YB9XPt+U6ydzFG5ZIN7+DIjPbNmXoBj9esYhgQ==} resolution: {integrity: sha512-TrY6DsjTQQgyS3E3dBaOXf0TpPD8u9FVrVYmKVegJuFw51n/YB9XPt+U6ydzFG5ZIN7+DIjPbNmXoBj9esYhgQ==}
dev: true
/unbox-primitive@1.0.2: /unbox-primitive@1.0.2:
resolution: {integrity: sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw==} resolution: {integrity: sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw==}
@@ -7894,10 +7733,6 @@ packages:
which-boxed-primitive: 1.0.2 which-boxed-primitive: 1.0.2
dev: true dev: true
/uncrypto@0.1.3:
resolution: {integrity: sha512-Ql87qFHB3s/De2ClA9e0gsnS6zXG27SkTiSJwjCc9MebbfapQfuPzumMIUMi38ezPZVNFcHI9sUIepeQfw8J8Q==}
dev: false
/undici@5.22.1: /undici@5.22.1:
resolution: {integrity: sha512-Ji2IJhFXZY0x/0tVBXeQwgPlLWw13GVzpsWPQ3rV50IFMMof2I55PZZxtm4P6iNq+L5znYN9nSTAq0ZyE6lSJw==} resolution: {integrity: sha512-Ji2IJhFXZY0x/0tVBXeQwgPlLWw13GVzpsWPQ3rV50IFMMof2I55PZZxtm4P6iNq+L5znYN9nSTAq0ZyE6lSJw==}
engines: {node: '>=14.0'} engines: {node: '>=14.0'}
@@ -7981,24 +7816,6 @@ packages:
use-isomorphic-layout-effect: 1.1.2(@types/react@18.2.6)(react@18.2.0) use-isomorphic-layout-effect: 1.1.2(@types/react@18.2.6)(react@18.2.0)
dev: false dev: false
/use-query-params@2.2.1(react-dom@18.2.0)(react@18.2.0):
resolution: {integrity: sha512-i6alcyLB8w9i3ZK3caNftdb+UnbfBRNPDnc89CNQWkGRmDrm/gfydHvMBfVsQJRq3NoHOM2dt/ceBWG2397v1Q==}
peerDependencies:
'@reach/router': ^1.2.1
react: '>=16.8.0'
react-dom: '>=16.8.0'
react-router-dom: '>=5'
peerDependenciesMeta:
'@reach/router':
optional: true
react-router-dom:
optional: true
dependencies:
react: 18.2.0
react-dom: 18.2.0(react@18.2.0)
serialize-query-params: 2.0.2
dev: false
/use-sidecar@1.1.2(@types/react@18.2.6)(react@18.2.0): /use-sidecar@1.1.2(@types/react@18.2.6)(react@18.2.0):
resolution: {integrity: sha512-epTbsLuzZ7lPClpz2TyryBfztm7m+28DlEv2ZCQ3MDr5ssiwyOwGH/e5F9CkfWjJ1t4clvI58yF822/GUkjjhw==} resolution: {integrity: sha512-epTbsLuzZ7lPClpz2TyryBfztm7m+28DlEv2ZCQ3MDr5ssiwyOwGH/e5F9CkfWjJ1t4clvI58yF822/GUkjjhw==}
engines: {node: '>=10'} engines: {node: '>=10'}
@@ -8047,11 +7864,6 @@ packages:
hasBin: true hasBin: true
dev: false dev: false
/uuid@9.0.0:
resolution: {integrity: sha512-MXcSTerfPa4uqyzStbRoTgt5XIe3x5+42+q1sDuy3R5MDk66URdLMOZe5aPX/SQd+kuYAh0FdP/pO28IkQyTeg==}
hasBin: true
dev: false
/vary@1.1.2: /vary@1.1.2:
resolution: {integrity: sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==} resolution: {integrity: sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==}
engines: {node: '>= 0.8'} engines: {node: '>= 0.8'}
@@ -8410,14 +8222,6 @@ packages:
engines: {node: '>=12.20'} engines: {node: '>=12.20'}
dev: true dev: true
/zod-to-json-schema@3.21.4(zod@3.21.4):
resolution: {integrity: sha512-fjUZh4nQ1s6HMccgIeE0VP4QG/YRGPmyjO9sAh890aQKPEk3nqbfUXhMFaC+Dr5KvYBm8BCyvfpZf2jY9aGSsw==}
peerDependencies:
zod: ^3.21.4
dependencies:
zod: 3.21.4
dev: false
/zod@3.21.4: /zod@3.21.4:
resolution: {integrity: sha512-m46AKbrzKVzOzs/DZgVnG5H55N1sv1M8qZU3A8RIKbs3mrACDNeIOeilDymVb2HdmP8uwshOCF4uJ8uM9rCqJw==} resolution: {integrity: sha512-m46AKbrzKVzOzs/DZgVnG5H55N1sv1M8qZU3A8RIKbs3mrACDNeIOeilDymVb2HdmP8uwshOCF4uJ8uM9rCqJw==}
dev: false dev: false

View File

@@ -19,16 +19,13 @@ model Experiment {
organizationId String @db.Uuid organizationId String @db.Uuid
organization Organization? @relation(fields: [organizationId], references: [id], onDelete: Cascade) organization Organization? @relation(fields: [organizationId], references: [id], onDelete: Cascade)
dataFlowId String? @db.Uuid
dataFlow DataFlow? @relation(fields: [dataFlowId], references: [id], onDelete: Cascade)
createdAt DateTime @default(now()) createdAt DateTime @default(now())
updatedAt DateTime @updatedAt updatedAt DateTime @updatedAt
templateVariables TemplateVariable[] TemplateVariable TemplateVariable[]
promptVariants PromptVariant[] PromptVariant PromptVariant[]
testScenarios TestScenario[] TestScenario TestScenario[]
evaluations Evaluation[] Evaluation Evaluation[]
} }
model PromptVariant { model PromptVariant {
@@ -93,11 +90,13 @@ enum CellRetrievalStatus {
model ScenarioVariantCell { model ScenarioVariantCell {
id String @id @default(uuid()) @db.Uuid id String @id @default(uuid()) @db.Uuid
retrievalStatus CellRetrievalStatus @default(COMPLETE) statusCode Int?
jobQueuedAt DateTime? errorMessage String?
jobStartedAt DateTime? retryTime DateTime?
modelResponses ModelResponse[] streamingChannel String?
errorMessage String? // Contains errors that occurred independently of model responses retrievalStatus CellRetrievalStatus @default(COMPLETE)
modelOutput ModelOutput?
promptVariantId String @db.Uuid promptVariantId String @db.Uuid
promptVariant PromptVariant @relation(fields: [promptVariantId], references: [id], onDelete: Cascade) promptVariant PromptVariant @relation(fields: [promptVariantId], references: [id], onDelete: Cascade)
@@ -112,31 +111,24 @@ model ScenarioVariantCell {
@@unique([promptVariantId, testScenarioId]) @@unique([promptVariantId, testScenarioId])
} }
model ModelResponse { model ModelOutput {
id String @id @default(uuid()) @db.Uuid id String @id @default(uuid()) @db.Uuid
inputHash String inputHash String
requestedAt DateTime? output Json
receivedAt DateTime? timeToComplete Int @default(0)
output Json? cost Float?
cost Float? promptTokens Int?
promptTokens Int? completionTokens Int?
completionTokens Int?
statusCode Int?
errorMessage String?
retryTime DateTime?
outdated Boolean @default(false)
createdAt DateTime @default(now()) createdAt DateTime @default(now())
updatedAt DateTime @updatedAt updatedAt DateTime @updatedAt
scenarioVariantCellId String? @db.Uuid scenarioVariantCellId String @db.Uuid
scenarioVariantCell ScenarioVariantCell? @relation(fields: [scenarioVariantCellId], references: [id], onDelete: Cascade) scenarioVariantCell ScenarioVariantCell @relation(fields: [scenarioVariantCellId], references: [id], onDelete: Cascade)
outputEvaluations OutputEvaluation[] outputEvaluation OutputEvaluation[]
loggedCallId String? @db.Uuid
loggedCall LoggedCall? @relation(fields: [loggedCallId], references: [id], onDelete: Cascade)
@@unique([scenarioVariantCellId])
@@index([inputHash]) @@index([inputHash])
} }
@@ -158,7 +150,7 @@ model Evaluation {
createdAt DateTime @default(now()) createdAt DateTime @default(now())
updatedAt DateTime @updatedAt updatedAt DateTime @updatedAt
outputEvaluations OutputEvaluation[] OutputEvaluation OutputEvaluation[]
} }
model OutputEvaluation { model OutputEvaluation {
@@ -168,8 +160,8 @@ model OutputEvaluation {
result Float result Float
details String? details String?
modelResponseId String @db.Uuid modelOutputId String @db.Uuid
modelResponse ModelResponse @relation(fields: [modelResponseId], references: [id], onDelete: Cascade) modelOutput ModelOutput @relation(fields: [modelOutputId], references: [id], onDelete: Cascade)
evaluationId String @db.Uuid evaluationId String @db.Uuid
evaluation Evaluation @relation(fields: [evaluationId], references: [id], onDelete: Cascade) evaluation Evaluation @relation(fields: [evaluationId], references: [id], onDelete: Cascade)
@@ -177,65 +169,18 @@ model OutputEvaluation {
createdAt DateTime @default(now()) createdAt DateTime @default(now())
updatedAt DateTime @updatedAt updatedAt DateTime @updatedAt
@@unique([modelResponseId, evaluationId]) @@unique([modelOutputId, evaluationId])
}
model LoggedCall {
id String @id @default(uuid()) @db.Uuid
promptFunctionHash String
promptFunction String
prompt Json
responsePayload Json?
scenarioVariables Json
model String
modelProvider String
dataFlowId String @db.Uuid
dataFlow DataFlow @relation(fields: [dataFlowId], references: [id], onDelete: Cascade)
modelResponse ModelResponse[]
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
}
model DataFlow {
id String @id @default(uuid()) @db.Uuid
label String
experiments Experiment[]
loggedCalls LoggedCall[]
organizationId String @db.Uuid
organization Organization? @relation(fields: [organizationId], references: [id], onDelete: Cascade)
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
}
model ApiKey {
id String @id @default(uuid()) @db.Uuid
name String
key String @unique
organizationId String @db.Uuid
organization Organization? @relation(fields: [organizationId], references: [id], onDelete: Cascade)
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
} }
model Organization { model Organization {
id String @id @default(uuid()) @db.Uuid id String @id @default(uuid()) @db.Uuid
personalOrgUserId String? @unique @db.Uuid personalOrgUserId String? @unique @db.Uuid
personalOrgUser User? @relation(fields: [personalOrgUserId], references: [id], onDelete: Cascade) PersonalOrgUser User? @relation(fields: [personalOrgUserId], references: [id], onDelete: Cascade)
apiKeys ApiKey[]
dataFlows DataFlow[]
createdAt DateTime @default(now()) createdAt DateTime @default(now())
updatedAt DateTime @updatedAt updatedAt DateTime @updatedAt
organizationUsers OrganizationUser[] OrganizationUser OrganizationUser[]
experiments Experiment[] Experiment Experiment[]
} }
enum OrganizationUserRole { enum OrganizationUserRole {
@@ -289,15 +234,15 @@ model Session {
} }
model User { model User {
id String @id @default(uuid()) @db.Uuid id String @id @default(uuid()) @db.Uuid
name String? name String?
email String? @unique email String? @unique
emailVerified DateTime? emailVerified DateTime?
image String? image String?
accounts Account[] accounts Account[]
sessions Session[] sessions Session[]
organizationUsers OrganizationUser[] OrganizationUser OrganizationUser[]
organizations Organization[] Organization Organization[]
} }
model VerificationToken { model VerificationToken {

View File

@@ -7,13 +7,9 @@ const defaultId = "11111111-1111-1111-1111-111111111111";
await prisma.organization.deleteMany({ await prisma.organization.deleteMany({
where: { id: defaultId }, where: { id: defaultId },
}); });
await prisma.organization.create({
// If there's an existing org, just seed into it data: { id: defaultId },
const org = });
(await prisma.organization.findFirst({})) ??
(await prisma.organization.create({
data: { id: defaultId },
}));
await prisma.experiment.deleteMany({ await prisma.experiment.deleteMany({
where: { where: {
@@ -25,7 +21,7 @@ await prisma.experiment.create({
data: { data: {
id: defaultId, id: defaultId,
label: "Country Capitals Example", label: "Country Capitals Example",
organizationId: org.id, organizationId: defaultId,
}, },
}); });
@@ -107,41 +103,30 @@ await prisma.testScenario.deleteMany({
}, },
}); });
const countries = [
"Afghanistan",
"Albania",
"Algeria",
"Andorra",
"Angola",
"Antigua and Barbuda",
"Argentina",
"Armenia",
"Australia",
"Austria",
"Austrian Empire",
"Azerbaijan",
"Baden",
"Bahamas, The",
"Bahrain",
"Bangladesh",
"Barbados",
"Bavaria",
"Belarus",
"Belgium",
"Belize",
"Benin (Dahomey)",
"Bolivia",
"Bosnia and Herzegovina",
"Botswana",
];
await prisma.testScenario.createMany({ await prisma.testScenario.createMany({
data: countries.map((country, i) => ({ data: [
experimentId: defaultId, {
sortIndex: i, experimentId: defaultId,
variableValues: { sortIndex: 0,
country: country, variableValues: {
country: "Spain",
},
}, },
})), {
experimentId: defaultId,
sortIndex: 1,
variableValues: {
country: "USA",
},
},
{
experimentId: defaultId,
sortIndex: 2,
variableValues: {
country: "Chile",
},
},
],
}); });
const variants = await prisma.promptVariant.findMany({ const variants = await prisma.promptVariant.findMany({
@@ -164,5 +149,5 @@ await Promise.all(
testScenarioId: scenario.id, testScenarioId: scenario.id,
})), })),
) )
.map((cell) => generateNewCell(cell.promptVariantId, cell.testScenarioId, { stream: false })), .map((cell) => generateNewCell(cell.promptVariantId, cell.testScenarioId)),
); );

View File

Before

Width:  |  Height:  |  Size: 15 KiB

After

Width:  |  Height:  |  Size: 15 KiB

Some files were not shown because too many files have changed in this diff Show More