Compare commits
27 Commits
catch-reje
...
examples
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
db69b8e496 | ||
|
|
38e28fa30a | ||
|
|
b4cb931f6c | ||
|
|
40638a7848 | ||
|
|
14eae45d18 | ||
|
|
13bac46e0b | ||
|
|
12d01cd3d5 | ||
|
|
ec59252010 | ||
|
|
87e2339df2 | ||
|
|
75ad6619a5 | ||
|
|
4b8941d53a | ||
|
|
0d691d17cc | ||
|
|
815d4faad2 | ||
|
|
9632ccbc71 | ||
|
|
a4131e4a10 | ||
|
|
db1c8f171d | ||
|
|
678392ef17 | ||
|
|
af722128e8 | ||
|
|
50a79b6e3a | ||
|
|
f59150ff5b | ||
|
|
b58e0a8d54 | ||
|
|
dc82a3fa82 | ||
|
|
fedbf5784e | ||
|
|
888c04af50 | ||
|
|
1b36453051 | ||
|
|
2f37b3ed87 | ||
|
|
17866a5249 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -3,3 +3,4 @@
|
||||
*.pyc
|
||||
node_modules/
|
||||
*.tsbuildinfo
|
||||
dist/
|
||||
18
README.md
18
README.md
@@ -1,10 +1,8 @@
|
||||
<!-- <img src="https://github.com/openpipe/openpipe/assets/41524992/ca59596e-eb80-40f9-921f-6d67f6e6d8fa" width="72px" /> -->
|
||||
|
||||
# OpenPipe
|
||||
|
||||
OpenPipe is a flexible playground for comparing and optimizing LLM prompts. It lets you quickly generate, test and compare candidate prompts, and can automatically [translate](#-translate-between-model-apis) those prompts between models.
|
||||
|
||||
<img src="https://github.com/openpipe/openpipe/assets/41524992/219a844e-3f4e-4f6b-8066-41348b42977b" alt="demo">
|
||||
<img src="https://github.com/openpipe/openpipe/assets/41524992/66bb1843-cb72-4130-a369-eec2df3b8201" alt="demo">
|
||||
|
||||
You can use our hosted version of OpenPipe at https://openpipe.ai. You can also clone this repository and [run it locally](#running-locally).
|
||||
|
||||
@@ -37,25 +35,19 @@ OpenPipe lets you _template_ a prompt. Use the templating feature to run the pro
|
||||
|
||||
Write your prompt in one format and automatically convert it to work with any other model.
|
||||
|
||||
<img width="480" alt="Screenshot 2023-08-01 at 11 55 38 PM" src="https://github.com/OpenPipe/OpenPipe/assets/41524992/1e19ccf2-96b6-4e93-a3a5-1449710d1b5b" alt="translate between models">
|
||||
|
||||
<br><br>
|
||||
<!-- <img width="480" alt="Screenshot 2023-08-01 at 11 55 38 PM" src="https://github.com/OpenPipe/OpenPipe/assets/41524992/1e19ccf2-96b6-4e93-a3a5-1449710d1b5b" alt="translate between models"> -->
|
||||
|
||||
### 🛠️ Refine Your Prompts Automatically
|
||||
|
||||
Use a growing database of best-practice refinements to improve your prompts automatically.
|
||||
|
||||
<img width="480" alt="Screenshot 2023-08-01 at 11 55 38 PM" src="https://github.com/OpenPipe/OpenPipe/assets/41524992/87a27fe7-daef-445c-a5e2-1c82b23f9f99" alt="add function call">
|
||||
|
||||
<br><br>
|
||||
<!-- <img width="480" alt="Screenshot 2023-08-01 at 11 55 38 PM" src="https://github.com/OpenPipe/OpenPipe/assets/41524992/87a27fe7-daef-445c-a5e2-1c82b23f9f99" alt="add function call"> -->
|
||||
|
||||
### 🪄 Auto-generate Test Scenarios
|
||||
|
||||
OpenPipe includes a tool to generate new test scenarios based on your existing prompts and scenarios. Just click "Autogenerate Scenario" to try it out!
|
||||
|
||||
<img width="600" src="https://github.com/openpipe/openpipe/assets/41524992/219a844e-3f4e-4f6b-8066-41348b42977b" alt="auto-generate">
|
||||
|
||||
<br><br>
|
||||
<!-- <img width="600" src="https://github.com/openpipe/openpipe/assets/41524992/219a844e-3f4e-4f6b-8066-41348b42977b" alt="auto-generate"> -->
|
||||
|
||||
## Running Locally
|
||||
|
||||
@@ -75,4 +67,4 @@ OpenPipe includes a tool to generate new test scenarios based on your existing p
|
||||
1. Copy your `.env` file to `.env.test`.
|
||||
2. Update the `DATABASE_URL` to have a different database name than your development one
|
||||
3. Run `DATABASE_URL=[your new datatase url] pnpm prisma migrate dev --skip-seed --skip-generate`
|
||||
4. Run `pnpm test`
|
||||
4. Run `pnpm test`
|
||||
|
||||
3
app/@types/nextjs-routes.d.ts
vendored
3
app/@types/nextjs-routes.d.ts
vendored
@@ -19,10 +19,9 @@ declare module "nextjs-routes" {
|
||||
| DynamicRoute<"/api/v1/[...trpc]", { "trpc": string[] }>
|
||||
| StaticRoute<"/api/v1/openapi">
|
||||
| StaticRoute<"/dashboard">
|
||||
| DynamicRoute<"/data/[id]", { "id": string }>
|
||||
| StaticRoute<"/data">
|
||||
| DynamicRoute<"/experiments/[experimentSlug]", { "experimentSlug": string }>
|
||||
| StaticRoute<"/experiments">
|
||||
| StaticRoute<"/fine-tunes">
|
||||
| StaticRoute<"/">
|
||||
| DynamicRoute<"/invitations/[invitationToken]", { "invitationToken": string }>
|
||||
| StaticRoute<"/project/settings">
|
||||
|
||||
@@ -23,7 +23,6 @@ ARG NEXT_PUBLIC_SOCKET_URL
|
||||
ARG NEXT_PUBLIC_HOST
|
||||
ARG NEXT_PUBLIC_SENTRY_DSN
|
||||
ARG SENTRY_AUTH_TOKEN
|
||||
ARG NEXT_PUBLIC_FF_SHOW_LOGGED_CALLS
|
||||
|
||||
WORKDIR /code
|
||||
COPY --from=deps /code/node_modules ./node_modules
|
||||
|
||||
@@ -48,6 +48,7 @@
|
||||
"@trpc/react-query": "^10.26.0",
|
||||
"@trpc/server": "^10.26.0",
|
||||
"@vercel/og": "^0.5.9",
|
||||
"archiver": "^6.0.0",
|
||||
"ast-types": "^0.14.2",
|
||||
"chroma-js": "^2.4.2",
|
||||
"concurrently": "^8.2.0",
|
||||
@@ -60,6 +61,7 @@
|
||||
"framer-motion": "^10.12.17",
|
||||
"gpt-tokens": "^1.0.10",
|
||||
"graphile-worker": "^0.13.0",
|
||||
"human-id": "^4.0.0",
|
||||
"immer": "^10.0.2",
|
||||
"isolated-vm": "^4.5.0",
|
||||
"json-schema-to-typescript": "^13.0.2",
|
||||
@@ -98,6 +100,7 @@
|
||||
"replicate": "^0.12.3",
|
||||
"socket.io": "^4.7.1",
|
||||
"socket.io-client": "^4.7.1",
|
||||
"stream-buffers": "^3.0.2",
|
||||
"superjson": "1.12.2",
|
||||
"trpc-openapi": "^1.2.0",
|
||||
"tsx": "^3.12.7",
|
||||
@@ -110,6 +113,7 @@
|
||||
},
|
||||
"devDependencies": {
|
||||
"@openapi-contrib/openapi-schema-to-json-schema": "^4.0.5",
|
||||
"@types/archiver": "^5.3.2",
|
||||
"@types/babel__core": "^7.20.1",
|
||||
"@types/babel__standalone": "^7.1.4",
|
||||
"@types/chroma-js": "^2.4.0",
|
||||
@@ -126,6 +130,7 @@
|
||||
"@types/react": "^18.2.6",
|
||||
"@types/react-dom": "^18.2.4",
|
||||
"@types/react-syntax-highlighter": "^15.5.7",
|
||||
"@types/stream-buffers": "^3.0.4",
|
||||
"@types/uuid": "^9.0.2",
|
||||
"@typescript-eslint/eslint-plugin": "^5.59.6",
|
||||
"@typescript-eslint/parser": "^5.59.6",
|
||||
|
||||
@@ -0,0 +1,48 @@
|
||||
/*
|
||||
Warnings:
|
||||
|
||||
- You are about to drop the column `input` on the `DatasetEntry` table. All the data in the column will be lost.
|
||||
- You are about to drop the column `output` on the `DatasetEntry` table. All the data in the column will be lost.
|
||||
- Added the required column `loggedCallId` to the `DatasetEntry` table without a default value. This is not possible if the table is not empty.
|
||||
|
||||
*/
|
||||
-- AlterTable
|
||||
ALTER TABLE "DatasetEntry" DROP COLUMN "input",
|
||||
DROP COLUMN "output",
|
||||
ADD COLUMN "loggedCallId" UUID NOT NULL;
|
||||
|
||||
-- AddForeignKey
|
||||
ALTER TABLE "DatasetEntry" ADD CONSTRAINT "DatasetEntry_loggedCallId_fkey" FOREIGN KEY ("loggedCallId") REFERENCES "LoggedCall"("id") ON DELETE CASCADE ON UPDATE CASCADE;
|
||||
|
||||
-- AlterTable
|
||||
ALTER TABLE "LoggedCallModelResponse" ALTER COLUMN "cost" SET DATA TYPE DOUBLE PRECISION;
|
||||
|
||||
-- CreateEnum
|
||||
CREATE TYPE "FineTuneStatus" AS ENUM ('PENDING', 'TRAINING', 'AWAITING_DEPLOYMENT', 'DEPLOYING', 'DEPLOYED', 'ERROR');
|
||||
|
||||
-- CreateTable
|
||||
CREATE TABLE "FineTune" (
|
||||
"id" UUID NOT NULL,
|
||||
"slug" TEXT NOT NULL,
|
||||
"baseModel" TEXT NOT NULL,
|
||||
"status" "FineTuneStatus" NOT NULL DEFAULT 'PENDING',
|
||||
"trainingStartedAt" TIMESTAMP(3),
|
||||
"trainingFinishedAt" TIMESTAMP(3),
|
||||
"deploymentStartedAt" TIMESTAMP(3),
|
||||
"deploymentFinishedAt" TIMESTAMP(3),
|
||||
"datasetId" UUID NOT NULL,
|
||||
"projectId" UUID NOT NULL,
|
||||
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"updatedAt" TIMESTAMP(3) NOT NULL,
|
||||
|
||||
CONSTRAINT "FineTune_pkey" PRIMARY KEY ("id")
|
||||
);
|
||||
|
||||
-- CreateIndex
|
||||
CREATE UNIQUE INDEX "FineTune_slug_key" ON "FineTune"("slug");
|
||||
|
||||
-- AddForeignKey
|
||||
ALTER TABLE "FineTune" ADD CONSTRAINT "FineTune_datasetId_fkey" FOREIGN KEY ("datasetId") REFERENCES "Dataset"("id") ON DELETE CASCADE ON UPDATE CASCADE;
|
||||
|
||||
-- AddForeignKey
|
||||
ALTER TABLE "FineTune" ADD CONSTRAINT "FineTune_projectId_fkey" FOREIGN KEY ("projectId") REFERENCES "Project"("id") ON DELETE CASCADE ON UPDATE CASCADE;
|
||||
@@ -181,6 +181,7 @@ model Dataset {
|
||||
|
||||
name String
|
||||
datasetEntries DatasetEntry[]
|
||||
fineTunes FineTune[]
|
||||
|
||||
projectId String @db.Uuid
|
||||
project Project @relation(fields: [projectId], references: [id], onDelete: Cascade)
|
||||
@@ -192,8 +193,8 @@ model Dataset {
|
||||
model DatasetEntry {
|
||||
id String @id @default(uuid()) @db.Uuid
|
||||
|
||||
input String
|
||||
output String?
|
||||
loggedCallId String @db.Uuid
|
||||
loggedCall LoggedCall @relation(fields: [loggedCallId], references: [id], onDelete: Cascade)
|
||||
|
||||
datasetId String @db.Uuid
|
||||
dataset Dataset? @relation(fields: [datasetId], references: [id], onDelete: Cascade)
|
||||
@@ -216,6 +217,7 @@ model Project {
|
||||
experiments Experiment[]
|
||||
datasets Dataset[]
|
||||
loggedCalls LoggedCall[]
|
||||
fineTunes FineTune[]
|
||||
apiKeys ApiKey[]
|
||||
}
|
||||
|
||||
@@ -276,8 +278,9 @@ model LoggedCall {
|
||||
projectId String @db.Uuid
|
||||
project Project? @relation(fields: [projectId], references: [id], onDelete: Cascade)
|
||||
|
||||
model String?
|
||||
tags LoggedCallTag[]
|
||||
model String?
|
||||
tags LoggedCallTag[]
|
||||
datasetEntries DatasetEntry[]
|
||||
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @updatedAt
|
||||
@@ -312,7 +315,7 @@ model LoggedCallModelResponse {
|
||||
outputTokens Int?
|
||||
finishReason String?
|
||||
completionId String?
|
||||
cost Decimal? @db.Decimal(18, 12)
|
||||
cost Float?
|
||||
|
||||
// The LoggedCall that created this LoggedCallModelResponse
|
||||
originalLoggedCallId String @unique @db.Uuid
|
||||
@@ -427,3 +430,33 @@ model VerificationToken {
|
||||
|
||||
@@unique([identifier, token])
|
||||
}
|
||||
|
||||
enum FineTuneStatus {
|
||||
PENDING
|
||||
TRAINING
|
||||
AWAITING_DEPLOYMENT
|
||||
DEPLOYING
|
||||
DEPLOYED
|
||||
ERROR
|
||||
}
|
||||
|
||||
model FineTune {
|
||||
id String @id @default(uuid()) @db.Uuid
|
||||
|
||||
slug String @unique
|
||||
baseModel String
|
||||
status FineTuneStatus @default(PENDING)
|
||||
trainingStartedAt DateTime?
|
||||
trainingFinishedAt DateTime?
|
||||
deploymentStartedAt DateTime?
|
||||
deploymentFinishedAt DateTime?
|
||||
|
||||
datasetId String @db.Uuid
|
||||
dataset Dataset @relation(fields: [datasetId], references: [id], onDelete: Cascade)
|
||||
|
||||
projectId String @db.Uuid
|
||||
project Project @relation(fields: [projectId], references: [id], onDelete: Cascade)
|
||||
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @updatedAt
|
||||
}
|
||||
|
||||
14
app/src/components/InfoCircle.tsx
Normal file
14
app/src/components/InfoCircle.tsx
Normal file
@@ -0,0 +1,14 @@
|
||||
import { Tooltip, Icon, VStack } from "@chakra-ui/react";
|
||||
import { RiInformationFill } from "react-icons/ri";
|
||||
|
||||
const InfoCircle = ({ tooltipText }: { tooltipText: string }) => {
|
||||
return (
|
||||
<Tooltip label={tooltipText} fontSize="sm" shouldWrapChildren maxW={80}>
|
||||
<VStack>
|
||||
<Icon as={RiInformationFill} boxSize={5} color="gray.500" />
|
||||
</VStack>
|
||||
</Tooltip>
|
||||
);
|
||||
};
|
||||
|
||||
export default InfoCircle;
|
||||
@@ -11,6 +11,7 @@ import {
|
||||
Button,
|
||||
Text,
|
||||
useDisclosure,
|
||||
type InputGroupProps,
|
||||
} from "@chakra-ui/react";
|
||||
|
||||
import { FiChevronDown } from "react-icons/fi";
|
||||
@@ -20,15 +21,25 @@ type InputDropdownProps<T> = {
|
||||
options: ReadonlyArray<T>;
|
||||
selectedOption: T;
|
||||
onSelect: (option: T) => void;
|
||||
inputGroupProps?: InputGroupProps;
|
||||
};
|
||||
|
||||
const InputDropdown = <T,>({ options, selectedOption, onSelect }: InputDropdownProps<T>) => {
|
||||
const InputDropdown = <T,>({
|
||||
options,
|
||||
selectedOption,
|
||||
onSelect,
|
||||
inputGroupProps,
|
||||
}: InputDropdownProps<T>) => {
|
||||
const popover = useDisclosure();
|
||||
|
||||
return (
|
||||
<Popover placement="bottom-start" {...popover}>
|
||||
<PopoverTrigger>
|
||||
<InputGroup cursor="pointer" w={(selectedOption as string).length * 14 + 180}>
|
||||
<InputGroup
|
||||
cursor="pointer"
|
||||
w={(selectedOption as string).length * 14 + 180}
|
||||
{...inputGroupProps}
|
||||
>
|
||||
<Input
|
||||
value={selectedOption as string}
|
||||
// eslint-disable-next-line @typescript-eslint/no-empty-function -- controlled input requires onChange
|
||||
|
||||
@@ -77,6 +77,7 @@ export default function OutputsTable({ experimentId }: { experimentId: string |
|
||||
{...sharedProps}
|
||||
borderBottomLeftRadius={isFirst ? 8 : 0}
|
||||
borderBottomRightRadius={isLast ? 8 : 0}
|
||||
boxShadow="5px 5px 15px 1px rgba(0, 0, 0, 0.1);"
|
||||
>
|
||||
<VariantStats variant={variant} />
|
||||
</GridItem>
|
||||
|
||||
@@ -1,15 +1,19 @@
|
||||
import { HStack, IconButton, Text, Select, type StackProps, Icon } from "@chakra-ui/react";
|
||||
import {
|
||||
HStack,
|
||||
IconButton,
|
||||
Text,
|
||||
Select,
|
||||
type StackProps,
|
||||
Icon,
|
||||
useBreakpointValue,
|
||||
} from "@chakra-ui/react";
|
||||
import React, { useCallback } from "react";
|
||||
import { FiChevronsLeft, FiChevronsRight, FiChevronLeft, FiChevronRight } from "react-icons/fi";
|
||||
import { usePageParams } from "~/utils/hooks";
|
||||
|
||||
const pageSizeOptions = [10, 25, 50, 100];
|
||||
|
||||
const Paginator = ({
|
||||
count,
|
||||
condense,
|
||||
...props
|
||||
}: { count: number; condense?: boolean } & StackProps) => {
|
||||
const Paginator = ({ count, ...props }: { count: number; condense?: boolean } & StackProps) => {
|
||||
const { page, pageSize, setPageParams } = usePageParams();
|
||||
|
||||
const lastPage = Math.ceil(count / pageSize);
|
||||
@@ -37,6 +41,9 @@ const Paginator = ({
|
||||
const goToLastPage = () => setPageParams({ page: lastPage }, "replace");
|
||||
const goToFirstPage = () => setPageParams({ page: 1 }, "replace");
|
||||
|
||||
const isMobile = useBreakpointValue({ base: true, md: false });
|
||||
const condense = isMobile || props.condense;
|
||||
|
||||
if (count === 0) return null;
|
||||
|
||||
return (
|
||||
|
||||
@@ -1,112 +0,0 @@
|
||||
import {
|
||||
HStack,
|
||||
Icon,
|
||||
VStack,
|
||||
Text,
|
||||
Divider,
|
||||
Spinner,
|
||||
AspectRatio,
|
||||
SkeletonText,
|
||||
} from "@chakra-ui/react";
|
||||
import { RiDatabase2Line } from "react-icons/ri";
|
||||
import { formatTimePast } from "~/utils/dayjs";
|
||||
import Link from "next/link";
|
||||
import { useRouter } from "next/router";
|
||||
import { BsPlusSquare } from "react-icons/bs";
|
||||
import { api } from "~/utils/api";
|
||||
import { useHandledAsyncCallback } from "~/utils/hooks";
|
||||
import { useAppStore } from "~/state/store";
|
||||
|
||||
type DatasetData = {
|
||||
name: string;
|
||||
numEntries: number;
|
||||
id: string;
|
||||
createdAt: Date;
|
||||
updatedAt: Date;
|
||||
};
|
||||
|
||||
export const DatasetCard = ({ dataset }: { dataset: DatasetData }) => {
|
||||
return (
|
||||
<AspectRatio ratio={1.2} w="full">
|
||||
<VStack
|
||||
as={Link}
|
||||
href={{ pathname: "/data/[id]", query: { id: dataset.id } }}
|
||||
bg="gray.50"
|
||||
_hover={{ bg: "gray.100" }}
|
||||
transition="background 0.2s"
|
||||
cursor="pointer"
|
||||
borderColor="gray.200"
|
||||
borderWidth={1}
|
||||
p={4}
|
||||
justify="space-between"
|
||||
>
|
||||
<HStack w="full" color="gray.700" justify="center">
|
||||
<Icon as={RiDatabase2Line} boxSize={4} />
|
||||
<Text fontWeight="bold">{dataset.name}</Text>
|
||||
</HStack>
|
||||
<HStack h="full" spacing={4} flex={1} align="center">
|
||||
<CountLabel label="Rows" count={dataset.numEntries} />
|
||||
</HStack>
|
||||
<HStack w="full" color="gray.500" fontSize="xs" textAlign="center">
|
||||
<Text flex={1}>Created {formatTimePast(dataset.createdAt)}</Text>
|
||||
<Divider h={4} orientation="vertical" />
|
||||
<Text flex={1}>Updated {formatTimePast(dataset.updatedAt)}</Text>
|
||||
</HStack>
|
||||
</VStack>
|
||||
</AspectRatio>
|
||||
);
|
||||
};
|
||||
|
||||
const CountLabel = ({ label, count }: { label: string; count: number }) => {
|
||||
return (
|
||||
<VStack alignItems="center" flex={1}>
|
||||
<Text color="gray.500" fontWeight="bold">
|
||||
{label}
|
||||
</Text>
|
||||
<Text fontSize="sm" color="gray.500">
|
||||
{count}
|
||||
</Text>
|
||||
</VStack>
|
||||
);
|
||||
};
|
||||
|
||||
export const NewDatasetCard = () => {
|
||||
const router = useRouter();
|
||||
const selectedProjectId = useAppStore((s) => s.selectedProjectId);
|
||||
const createMutation = api.datasets.create.useMutation();
|
||||
const [createDataset, isLoading] = useHandledAsyncCallback(async () => {
|
||||
const newDataset = await createMutation.mutateAsync({ projectId: selectedProjectId ?? "" });
|
||||
await router.push({ pathname: "/data/[id]", query: { id: newDataset.id } });
|
||||
}, [createMutation, router, selectedProjectId]);
|
||||
|
||||
return (
|
||||
<AspectRatio ratio={1.2} w="full">
|
||||
<VStack
|
||||
align="center"
|
||||
justify="center"
|
||||
_hover={{ cursor: "pointer", bg: "gray.50" }}
|
||||
transition="background 0.2s"
|
||||
cursor="pointer"
|
||||
borderColor="gray.200"
|
||||
borderWidth={1}
|
||||
p={4}
|
||||
onClick={createDataset}
|
||||
>
|
||||
<Icon as={isLoading ? Spinner : BsPlusSquare} boxSize={8} />
|
||||
<Text display={{ base: "none", md: "block" }} ml={2}>
|
||||
New Dataset
|
||||
</Text>
|
||||
</VStack>
|
||||
</AspectRatio>
|
||||
);
|
||||
};
|
||||
|
||||
export const DatasetCardSkeleton = () => (
|
||||
<AspectRatio ratio={1.2} w="full">
|
||||
<VStack align="center" borderColor="gray.200" borderWidth={1} p={4} bg="gray.50">
|
||||
<SkeletonText noOfLines={1} w="80%" />
|
||||
<SkeletonText noOfLines={2} w="60%" />
|
||||
<SkeletonText noOfLines={1} w="80%" />
|
||||
</VStack>
|
||||
</AspectRatio>
|
||||
);
|
||||
@@ -1,16 +0,0 @@
|
||||
import { type StackProps } from "@chakra-ui/react";
|
||||
|
||||
import { useDatasetEntries } from "~/utils/hooks";
|
||||
import Paginator from "../Paginator";
|
||||
|
||||
const DatasetEntriesPaginator = (props: StackProps) => {
|
||||
const { data } = useDatasetEntries();
|
||||
|
||||
if (!data) return null;
|
||||
|
||||
const { count } = data;
|
||||
|
||||
return <Paginator count={count} {...props} />;
|
||||
};
|
||||
|
||||
export default DatasetEntriesPaginator;
|
||||
@@ -1,31 +0,0 @@
|
||||
import { type StackProps, VStack, Table, Th, Tr, Thead, Tbody, Text } from "@chakra-ui/react";
|
||||
import { useDatasetEntries } from "~/utils/hooks";
|
||||
import TableRow from "./TableRow";
|
||||
import DatasetEntriesPaginator from "./DatasetEntriesPaginator";
|
||||
|
||||
const DatasetEntriesTable = (props: StackProps) => {
|
||||
const { data } = useDatasetEntries();
|
||||
|
||||
return (
|
||||
<VStack justifyContent="space-between" {...props}>
|
||||
<Table variant="simple" sx={{ "table-layout": "fixed", width: "full" }}>
|
||||
<Thead>
|
||||
<Tr>
|
||||
<Th>Input</Th>
|
||||
<Th>Output</Th>
|
||||
</Tr>
|
||||
</Thead>
|
||||
<Tbody>{data?.entries.map((entry) => <TableRow key={entry.id} entry={entry} />)}</Tbody>
|
||||
</Table>
|
||||
{(!data || data.entries.length) === 0 ? (
|
||||
<Text alignSelf="flex-start" pl={6} color="gray.500">
|
||||
No entries found
|
||||
</Text>
|
||||
) : (
|
||||
<DatasetEntriesPaginator />
|
||||
)}
|
||||
</VStack>
|
||||
);
|
||||
};
|
||||
|
||||
export default DatasetEntriesTable;
|
||||
@@ -1,26 +0,0 @@
|
||||
import { Button, HStack, useDisclosure } from "@chakra-ui/react";
|
||||
import { BiImport } from "react-icons/bi";
|
||||
import { BsStars } from "react-icons/bs";
|
||||
|
||||
import { GenerateDataModal } from "./GenerateDataModal";
|
||||
|
||||
export const DatasetHeaderButtons = () => {
|
||||
const generateModalDisclosure = useDisclosure();
|
||||
|
||||
return (
|
||||
<>
|
||||
<HStack>
|
||||
<Button leftIcon={<BiImport />} colorScheme="blue" variant="ghost">
|
||||
Import Data
|
||||
</Button>
|
||||
<Button leftIcon={<BsStars />} colorScheme="blue" onClick={generateModalDisclosure.onOpen}>
|
||||
Generate Data
|
||||
</Button>
|
||||
</HStack>
|
||||
<GenerateDataModal
|
||||
isOpen={generateModalDisclosure.isOpen}
|
||||
onClose={generateModalDisclosure.onClose}
|
||||
/>
|
||||
</>
|
||||
);
|
||||
};
|
||||
@@ -1,128 +0,0 @@
|
||||
import {
|
||||
Modal,
|
||||
ModalBody,
|
||||
ModalCloseButton,
|
||||
ModalContent,
|
||||
ModalHeader,
|
||||
ModalOverlay,
|
||||
ModalFooter,
|
||||
Text,
|
||||
HStack,
|
||||
VStack,
|
||||
Icon,
|
||||
NumberInput,
|
||||
NumberInputField,
|
||||
NumberInputStepper,
|
||||
NumberIncrementStepper,
|
||||
NumberDecrementStepper,
|
||||
Button,
|
||||
} from "@chakra-ui/react";
|
||||
import { BsStars } from "react-icons/bs";
|
||||
import { useState } from "react";
|
||||
import { useDataset, useHandledAsyncCallback } from "~/utils/hooks";
|
||||
import { api } from "~/utils/api";
|
||||
import AutoResizeTextArea from "~/components/AutoResizeTextArea";
|
||||
|
||||
export const GenerateDataModal = ({
|
||||
isOpen,
|
||||
onClose,
|
||||
}: {
|
||||
isOpen: boolean;
|
||||
onClose: () => void;
|
||||
}) => {
|
||||
const utils = api.useContext();
|
||||
|
||||
const datasetId = useDataset().data?.id;
|
||||
|
||||
const [numToGenerate, setNumToGenerate] = useState<number>(20);
|
||||
const [inputDescription, setInputDescription] = useState<string>(
|
||||
"Each input should contain an email body. Half of the emails should contain event details, and the other half should not.",
|
||||
);
|
||||
const [outputDescription, setOutputDescription] = useState<string>(
|
||||
`Each output should contain "true" or "false", where "true" indicates that the email contains event details.`,
|
||||
);
|
||||
|
||||
const generateEntriesMutation = api.datasetEntries.autogenerateEntries.useMutation();
|
||||
|
||||
const [generateEntries, generateEntriesInProgress] = useHandledAsyncCallback(async () => {
|
||||
if (!inputDescription || !outputDescription || !numToGenerate || !datasetId) return;
|
||||
await generateEntriesMutation.mutateAsync({
|
||||
datasetId,
|
||||
inputDescription,
|
||||
outputDescription,
|
||||
numToGenerate,
|
||||
});
|
||||
await utils.datasetEntries.list.invalidate();
|
||||
onClose();
|
||||
}, [
|
||||
generateEntriesMutation,
|
||||
onClose,
|
||||
inputDescription,
|
||||
outputDescription,
|
||||
numToGenerate,
|
||||
datasetId,
|
||||
]);
|
||||
|
||||
return (
|
||||
<Modal isOpen={isOpen} onClose={onClose} size={{ base: "xl", sm: "2xl", md: "3xl" }}>
|
||||
<ModalOverlay />
|
||||
<ModalContent w={1200}>
|
||||
<ModalHeader>
|
||||
<HStack>
|
||||
<Icon as={BsStars} />
|
||||
<Text>Generate Data</Text>
|
||||
</HStack>
|
||||
</ModalHeader>
|
||||
<ModalCloseButton />
|
||||
<ModalBody maxW="unset">
|
||||
<VStack w="full" spacing={8} padding={8} alignItems="flex-start">
|
||||
<VStack alignItems="flex-start" spacing={2}>
|
||||
<Text fontWeight="bold">Number of Rows:</Text>
|
||||
<NumberInput
|
||||
step={5}
|
||||
defaultValue={15}
|
||||
min={0}
|
||||
max={100}
|
||||
onChange={(valueString) => setNumToGenerate(parseInt(valueString) || 0)}
|
||||
value={numToGenerate}
|
||||
w="24"
|
||||
>
|
||||
<NumberInputField />
|
||||
<NumberInputStepper>
|
||||
<NumberIncrementStepper />
|
||||
<NumberDecrementStepper />
|
||||
</NumberInputStepper>
|
||||
</NumberInput>
|
||||
</VStack>
|
||||
<VStack alignItems="flex-start" w="full" spacing={2}>
|
||||
<Text fontWeight="bold">Input Description:</Text>
|
||||
<AutoResizeTextArea
|
||||
value={inputDescription}
|
||||
onChange={(e) => setInputDescription(e.target.value)}
|
||||
placeholder="Each input should contain..."
|
||||
/>
|
||||
</VStack>
|
||||
<VStack alignItems="flex-start" w="full" spacing={2}>
|
||||
<Text fontWeight="bold">Output Description (optional):</Text>
|
||||
<AutoResizeTextArea
|
||||
value={outputDescription}
|
||||
onChange={(e) => setOutputDescription(e.target.value)}
|
||||
placeholder="The output should contain..."
|
||||
/>
|
||||
</VStack>
|
||||
</VStack>
|
||||
</ModalBody>
|
||||
<ModalFooter>
|
||||
<Button
|
||||
colorScheme="blue"
|
||||
isLoading={generateEntriesInProgress}
|
||||
isDisabled={!numToGenerate || !inputDescription || !outputDescription}
|
||||
onClick={generateEntries}
|
||||
>
|
||||
Generate
|
||||
</Button>
|
||||
</ModalFooter>
|
||||
</ModalContent>
|
||||
</Modal>
|
||||
);
|
||||
};
|
||||
@@ -1,13 +0,0 @@
|
||||
import { Td, Tr } from "@chakra-ui/react";
|
||||
import { type DatasetEntry } from "@prisma/client";
|
||||
|
||||
const TableRow = ({ entry }: { entry: DatasetEntry }) => {
|
||||
return (
|
||||
<Tr key={entry.id}>
|
||||
<Td>{entry.input}</Td>
|
||||
<Td>{entry.output}</Td>
|
||||
</Tr>
|
||||
);
|
||||
};
|
||||
|
||||
export default TableRow;
|
||||
65
app/src/components/fineTunes/FineTunesTable.tsx
Normal file
65
app/src/components/fineTunes/FineTunesTable.tsx
Normal file
@@ -0,0 +1,65 @@
|
||||
import { Card, Table, Thead, Tr, Th, Tbody, Td, VStack, Icon, Text } from "@chakra-ui/react";
|
||||
import { FaTable } from "react-icons/fa";
|
||||
import { type FineTuneStatus } from "@prisma/client";
|
||||
|
||||
import dayjs from "~/utils/dayjs";
|
||||
import { useFineTunes } from "~/utils/hooks";
|
||||
|
||||
const FineTunesTable = ({}) => {
|
||||
const { data } = useFineTunes();
|
||||
|
||||
const fineTunes = data?.fineTunes || [];
|
||||
|
||||
return (
|
||||
<Card width="100%" overflowX="auto">
|
||||
{fineTunes.length ? (
|
||||
<Table>
|
||||
<Thead>
|
||||
<Tr>
|
||||
<Th>ID</Th>
|
||||
<Th>Created At</Th>
|
||||
<Th>Base Model</Th>
|
||||
<Th>Dataset Size</Th>
|
||||
<Th>Status</Th>
|
||||
</Tr>
|
||||
</Thead>
|
||||
<Tbody>
|
||||
{fineTunes.map((fineTune) => {
|
||||
return (
|
||||
<Tr key={fineTune.id}>
|
||||
<Td>{fineTune.slug}</Td>
|
||||
<Td>{dayjs(fineTune.createdAt).format("MMMM D h:mm A")}</Td>
|
||||
<Td>{fineTune.baseModel}</Td>
|
||||
<Td>{fineTune.dataset._count.datasetEntries}</Td>
|
||||
<Td fontSize="sm" fontWeight="bold">
|
||||
<Text color={getStatusColor(fineTune.status)}>{fineTune.status}</Text>
|
||||
</Td>
|
||||
</Tr>
|
||||
);
|
||||
})}
|
||||
</Tbody>
|
||||
</Table>
|
||||
) : (
|
||||
<VStack py={8}>
|
||||
<Icon as={FaTable} boxSize={16} color="gray.300" />
|
||||
<Text color="gray.400" fontSize="lg" fontWeight="bold">
|
||||
No Fine Tunes Found
|
||||
</Text>
|
||||
</VStack>
|
||||
)}
|
||||
</Card>
|
||||
);
|
||||
};
|
||||
|
||||
export default FineTunesTable;
|
||||
|
||||
const getStatusColor = (status: FineTuneStatus) => {
|
||||
switch (status) {
|
||||
case "DEPLOYED":
|
||||
return "green.500";
|
||||
case "ERROR":
|
||||
return "red.500";
|
||||
default:
|
||||
return "yellow.500";
|
||||
}
|
||||
};
|
||||
@@ -15,12 +15,14 @@ import Head from "next/head";
|
||||
import Link from "next/link";
|
||||
import { BsGearFill, BsGithub, BsPersonCircle } from "react-icons/bs";
|
||||
import { IoStatsChartOutline } from "react-icons/io5";
|
||||
import { RiHome3Line, RiDatabase2Line, RiFlaskLine } from "react-icons/ri";
|
||||
import { RiHome3Line, RiFlaskLine } from "react-icons/ri";
|
||||
import { FaRobot } from "react-icons/fa";
|
||||
import { signIn, useSession } from "next-auth/react";
|
||||
import { env } from "~/env.mjs";
|
||||
import ProjectMenu from "./ProjectMenu";
|
||||
import NavSidebarOption from "./NavSidebarOption";
|
||||
import IconLink from "./IconLink";
|
||||
import { BetaModal } from "./BetaModal";
|
||||
import { useAppStore } from "~/state/store";
|
||||
|
||||
const Divider = () => <Box h="1px" bgColor="gray.300" w="full" />;
|
||||
|
||||
@@ -71,21 +73,10 @@ const NavSidebar = () => {
|
||||
<ProjectMenu />
|
||||
<Divider />
|
||||
|
||||
{env.NEXT_PUBLIC_FF_SHOW_LOGGED_CALLS && (
|
||||
<>
|
||||
<IconLink icon={RiHome3Line} label="Dashboard" href="/dashboard" beta />
|
||||
<IconLink
|
||||
icon={IoStatsChartOutline}
|
||||
label="Request Logs"
|
||||
href="/request-logs"
|
||||
beta
|
||||
/>
|
||||
</>
|
||||
)}
|
||||
<IconLink icon={RiHome3Line} label="Dashboard" href="/dashboard" beta />
|
||||
<IconLink icon={IoStatsChartOutline} label="Request Logs" href="/request-logs" beta />
|
||||
<IconLink icon={FaRobot} label="Fine Tunes" href="/fine-tunes" beta />
|
||||
<IconLink icon={RiFlaskLine} label="Experiments" href="/experiments" />
|
||||
{env.NEXT_PUBLIC_SHOW_DATA && (
|
||||
<IconLink icon={RiDatabase2Line} label="Data" href="/data" />
|
||||
)}
|
||||
<VStack w="full" alignItems="flex-start" spacing={0} pt={8}>
|
||||
<Text
|
||||
pl={2}
|
||||
@@ -105,7 +96,7 @@ const NavSidebar = () => {
|
||||
<NavSidebarOption>
|
||||
<HStack
|
||||
w="full"
|
||||
p={4}
|
||||
p={{ base: 2, md: 4 }}
|
||||
as={ChakraLink}
|
||||
justifyContent="start"
|
||||
onClick={() => {
|
||||
@@ -141,10 +132,12 @@ export default function AppShell({
|
||||
children,
|
||||
title,
|
||||
requireAuth,
|
||||
requireBeta,
|
||||
}: {
|
||||
children: React.ReactNode;
|
||||
title?: string;
|
||||
requireAuth?: boolean;
|
||||
requireBeta?: boolean;
|
||||
}) {
|
||||
const [vh, setVh] = useState("100vh"); // Default height to prevent flicker on initial render
|
||||
|
||||
@@ -174,15 +167,21 @@ export default function AppShell({
|
||||
}
|
||||
}, [requireAuth, user, authLoading]);
|
||||
|
||||
const flags = useAppStore((s) => s.featureFlags.featureFlags);
|
||||
const flagsLoaded = useAppStore((s) => s.featureFlags.flagsLoaded);
|
||||
|
||||
return (
|
||||
<Flex h={vh} w="100vw">
|
||||
<Head>
|
||||
<title>{title ? `${title} | OpenPipe` : "OpenPipe"}</title>
|
||||
</Head>
|
||||
<NavSidebar />
|
||||
<Box h="100%" flex={1} overflowY="auto" bgColor="gray.50">
|
||||
{children}
|
||||
</Box>
|
||||
</Flex>
|
||||
<>
|
||||
<Flex h={vh} w="100vw">
|
||||
<Head>
|
||||
<title>{title ? `${title} | OpenPipe` : "OpenPipe"}</title>
|
||||
</Head>
|
||||
<NavSidebar />
|
||||
<Box h="100%" flex={1} overflowY="auto" bgColor="gray.50">
|
||||
{children}
|
||||
</Box>
|
||||
</Flex>
|
||||
{requireBeta && flagsLoaded && !flags.betaAccess && <BetaModal />}
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
67
app/src/components/nav/BetaModal.tsx
Normal file
67
app/src/components/nav/BetaModal.tsx
Normal file
@@ -0,0 +1,67 @@
|
||||
import {
|
||||
Button,
|
||||
Modal,
|
||||
ModalBody,
|
||||
ModalContent,
|
||||
ModalFooter,
|
||||
ModalHeader,
|
||||
ModalOverlay,
|
||||
VStack,
|
||||
Text,
|
||||
HStack,
|
||||
Icon,
|
||||
Link,
|
||||
} from "@chakra-ui/react";
|
||||
import { BsStars } from "react-icons/bs";
|
||||
import { useRouter } from "next/router";
|
||||
import { useSession } from "next-auth/react";
|
||||
|
||||
export const BetaModal = () => {
|
||||
const router = useRouter();
|
||||
const session = useSession();
|
||||
|
||||
const email = session.data?.user.email ?? "";
|
||||
|
||||
return (
|
||||
<Modal
|
||||
isOpen
|
||||
onClose={router.back}
|
||||
closeOnOverlayClick={false}
|
||||
size={{ base: "xl", md: "2xl" }}
|
||||
>
|
||||
<ModalOverlay />
|
||||
<ModalContent w={1200}>
|
||||
<ModalHeader>
|
||||
<HStack>
|
||||
<Icon as={BsStars} />
|
||||
<Text>Beta-Only Feature</Text>
|
||||
</HStack>
|
||||
</ModalHeader>
|
||||
<ModalBody maxW="unset">
|
||||
<VStack spacing={8} py={4} alignItems="flex-start">
|
||||
<Text fontSize="md">
|
||||
This feature is currently in beta. To receive early access to beta-only features, join
|
||||
the waitlist. You'll receive an email at <b>{email}</b> when you're approved.
|
||||
</Text>
|
||||
</VStack>
|
||||
</ModalBody>
|
||||
<ModalFooter>
|
||||
<HStack spacing={4}>
|
||||
<Button
|
||||
as={Link}
|
||||
textDecoration="none !important"
|
||||
colorScheme="orange"
|
||||
target="_blank"
|
||||
href={`https://ax3nafkw0jp.typeform.com/to/ZNpYqvAc#email=${email}`}
|
||||
>
|
||||
Join Waitlist
|
||||
</Button>
|
||||
<Button colorScheme="blue" onClick={router.back}>
|
||||
Done
|
||||
</Button>
|
||||
</HStack>
|
||||
</ModalFooter>
|
||||
</ModalContent>
|
||||
</Modal>
|
||||
);
|
||||
};
|
||||
@@ -14,6 +14,7 @@ import {
|
||||
Link as ChakraLink,
|
||||
Image,
|
||||
Box,
|
||||
Portal,
|
||||
} from "@chakra-ui/react";
|
||||
import { useEffect } from "react";
|
||||
import Link from "next/link";
|
||||
@@ -109,64 +110,66 @@ export default function ProjectMenu() {
|
||||
</HStack>
|
||||
</NavSidebarOption>
|
||||
</PopoverTrigger>
|
||||
<PopoverContent
|
||||
_focusVisible={{ outline: "unset" }}
|
||||
w={220}
|
||||
ml={{ base: 2, md: 0 }}
|
||||
boxShadow="0 0 40px 4px rgba(0, 0, 0, 0.1);"
|
||||
fontSize="sm"
|
||||
>
|
||||
<VStack alignItems="flex-start" spacing={1} py={1}>
|
||||
<Text px={3} py={2}>
|
||||
{user?.user.email}
|
||||
</Text>
|
||||
<Divider />
|
||||
<Text alignSelf="flex-start" fontWeight="bold" px={3} pt={2}>
|
||||
Your Projects
|
||||
</Text>
|
||||
<VStack spacing={0} w="full" px={1}>
|
||||
{projects?.map((proj) => (
|
||||
<ProjectOption
|
||||
key={proj.id}
|
||||
proj={proj}
|
||||
isActive={proj.id === selectedProjectId}
|
||||
onClose={popover.onClose}
|
||||
/>
|
||||
))}
|
||||
<HStack
|
||||
as={Button}
|
||||
variant="ghost"
|
||||
colorScheme="blue"
|
||||
color="blue.400"
|
||||
fontSize="sm"
|
||||
justifyContent="flex-start"
|
||||
onClick={createProject}
|
||||
w="full"
|
||||
borderRadius={4}
|
||||
spacing={0}
|
||||
>
|
||||
<Text>Add project</Text>
|
||||
<Icon as={isLoading ? Spinner : BsPlus} boxSize={4} strokeWidth={0.5} />
|
||||
</HStack>
|
||||
</VStack>
|
||||
<Portal>
|
||||
<PopoverContent
|
||||
_focusVisible={{ outline: "unset" }}
|
||||
w={220}
|
||||
ml={{ base: 2, md: 0 }}
|
||||
boxShadow="0 0 40px 4px rgba(0, 0, 0, 0.1);"
|
||||
fontSize="sm"
|
||||
>
|
||||
<VStack alignItems="flex-start" spacing={1} py={1}>
|
||||
<Text px={3} py={2}>
|
||||
{user?.user.email}
|
||||
</Text>
|
||||
<Divider />
|
||||
<Text alignSelf="flex-start" fontWeight="bold" px={3} pt={2}>
|
||||
Your Projects
|
||||
</Text>
|
||||
<VStack spacing={0} w="full" px={1}>
|
||||
{projects?.map((proj) => (
|
||||
<ProjectOption
|
||||
key={proj.id}
|
||||
proj={proj}
|
||||
isActive={proj.id === selectedProjectId}
|
||||
onClose={popover.onClose}
|
||||
/>
|
||||
))}
|
||||
<HStack
|
||||
as={Button}
|
||||
variant="ghost"
|
||||
colorScheme="blue"
|
||||
color="blue.400"
|
||||
fontSize="sm"
|
||||
justifyContent="flex-start"
|
||||
onClick={createProject}
|
||||
w="full"
|
||||
borderRadius={4}
|
||||
spacing={0}
|
||||
>
|
||||
<Text>Add project</Text>
|
||||
<Icon as={isLoading ? Spinner : BsPlus} boxSize={4} strokeWidth={0.5} />
|
||||
</HStack>
|
||||
</VStack>
|
||||
|
||||
<Divider />
|
||||
<VStack w="full" px={1}>
|
||||
<ChakraLink
|
||||
onClick={() => {
|
||||
signOut().catch(console.error);
|
||||
}}
|
||||
_hover={{ bgColor: "gray.200", textDecoration: "none" }}
|
||||
w="full"
|
||||
py={2}
|
||||
px={2}
|
||||
borderRadius={4}
|
||||
>
|
||||
<Text>Sign out</Text>
|
||||
</ChakraLink>
|
||||
<Divider />
|
||||
<VStack w="full" px={1}>
|
||||
<ChakraLink
|
||||
onClick={() => {
|
||||
signOut().catch(console.error);
|
||||
}}
|
||||
_hover={{ bgColor: "gray.200", textDecoration: "none" }}
|
||||
w="full"
|
||||
py={2}
|
||||
px={2}
|
||||
borderRadius={4}
|
||||
>
|
||||
<Text>Sign out</Text>
|
||||
</ChakraLink>
|
||||
</VStack>
|
||||
</VStack>
|
||||
</VStack>
|
||||
</PopoverContent>
|
||||
</PopoverContent>
|
||||
</Portal>
|
||||
</Popover>
|
||||
</VStack>
|
||||
);
|
||||
|
||||
@@ -23,50 +23,48 @@ export default function UserMenu({ user, ...rest }: { user: Session } & StackPro
|
||||
);
|
||||
|
||||
return (
|
||||
<>
|
||||
<Popover placement="right">
|
||||
<PopoverTrigger>
|
||||
<NavSidebarOption>
|
||||
<HStack
|
||||
// Weird values to make mobile look right; can clean up when we make the sidebar disappear on mobile
|
||||
py={2}
|
||||
px={1}
|
||||
spacing={3}
|
||||
{...rest}
|
||||
>
|
||||
{profileImage}
|
||||
<VStack spacing={0} align="start" flex={1} flexShrink={1}>
|
||||
<Text fontWeight="bold" fontSize="sm">
|
||||
{user.user.name}
|
||||
</Text>
|
||||
<Text color="gray.500" fontSize="xs">
|
||||
{/* {user.user.email} */}
|
||||
</Text>
|
||||
</VStack>
|
||||
<Icon as={BsChevronRight} boxSize={4} color="gray.500" />
|
||||
</HStack>
|
||||
</NavSidebarOption>
|
||||
</PopoverTrigger>
|
||||
<PopoverContent _focusVisible={{ outline: "unset" }} ml={-1} minW={48} w="full">
|
||||
<VStack align="stretch" spacing={0}>
|
||||
{/* sign out */}
|
||||
<HStack
|
||||
as={Link}
|
||||
onClick={() => {
|
||||
signOut().catch(console.error);
|
||||
}}
|
||||
px={4}
|
||||
py={2}
|
||||
spacing={4}
|
||||
color="gray.500"
|
||||
fontSize="sm"
|
||||
>
|
||||
<Icon as={BsBoxArrowRight} boxSize={6} />
|
||||
<Text>Sign out</Text>
|
||||
</HStack>
|
||||
</VStack>
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
</>
|
||||
<Popover placement="right">
|
||||
<PopoverTrigger>
|
||||
<NavSidebarOption>
|
||||
<HStack
|
||||
// Weird values to make mobile look right; can clean up when we make the sidebar disappear on mobile
|
||||
py={2}
|
||||
px={1}
|
||||
spacing={3}
|
||||
{...rest}
|
||||
>
|
||||
{profileImage}
|
||||
<VStack spacing={0} align="start" flex={1} flexShrink={1}>
|
||||
<Text fontWeight="bold" fontSize="sm">
|
||||
{user.user.name}
|
||||
</Text>
|
||||
<Text color="gray.500" fontSize="xs">
|
||||
{/* {user.user.email} */}
|
||||
</Text>
|
||||
</VStack>
|
||||
<Icon as={BsChevronRight} boxSize={4} color="gray.500" />
|
||||
</HStack>
|
||||
</NavSidebarOption>
|
||||
</PopoverTrigger>
|
||||
<PopoverContent _focusVisible={{ outline: "unset" }} ml={-1} minW={48} w="full">
|
||||
<VStack align="stretch" spacing={0}>
|
||||
{/* sign out */}
|
||||
<HStack
|
||||
as={Link}
|
||||
onClick={() => {
|
||||
signOut().catch(console.error);
|
||||
}}
|
||||
px={4}
|
||||
py={2}
|
||||
spacing={4}
|
||||
color="gray.500"
|
||||
fontSize="sm"
|
||||
>
|
||||
<Icon as={BsBoxArrowRight} boxSize={6} />
|
||||
<Text>Sign out</Text>
|
||||
</HStack>
|
||||
</VStack>
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ const ActionButton = ({
|
||||
>
|
||||
<HStack spacing={1}>
|
||||
{icon && <Icon as={icon} />}
|
||||
<Text>{label}</Text>
|
||||
<Text display={{ base: "none", md: "flex" }}>{label}</Text>
|
||||
</HStack>
|
||||
</Button>
|
||||
);
|
||||
|
||||
117
app/src/components/requestLogs/ColumnVisiblityDropdown.tsx
Normal file
117
app/src/components/requestLogs/ColumnVisiblityDropdown.tsx
Normal file
@@ -0,0 +1,117 @@
|
||||
import {
|
||||
Icon,
|
||||
Popover,
|
||||
PopoverTrigger,
|
||||
PopoverContent,
|
||||
VStack,
|
||||
HStack,
|
||||
Button,
|
||||
Text,
|
||||
useDisclosure,
|
||||
Box,
|
||||
} from "@chakra-ui/react";
|
||||
import { BiCheck } from "react-icons/bi";
|
||||
import { BsToggles } from "react-icons/bs";
|
||||
import { useMemo } from "react";
|
||||
|
||||
import { useIsClientRehydrated, useTagNames } from "~/utils/hooks";
|
||||
import { useAppStore } from "~/state/store";
|
||||
import { StaticColumnKeys } from "~/state/columnVisiblitySlice";
|
||||
import ActionButton from "./ActionButton";
|
||||
|
||||
const ColumnVisiblityDropdown = () => {
|
||||
const tagNames = useTagNames().data;
|
||||
|
||||
const visibleColumns = useAppStore((s) => s.columnVisibility.visibleColumns);
|
||||
const toggleColumnVisibility = useAppStore((s) => s.columnVisibility.toggleColumnVisibility);
|
||||
const totalColumns = Object.keys(StaticColumnKeys).length + (tagNames?.length ?? 0);
|
||||
|
||||
const popover = useDisclosure();
|
||||
|
||||
const columnVisiblityOptions = useMemo(() => {
|
||||
const options: { label: string; key: string }[] = [
|
||||
{
|
||||
label: "Sent At",
|
||||
key: StaticColumnKeys.SENT_AT,
|
||||
},
|
||||
{
|
||||
label: "Model",
|
||||
key: StaticColumnKeys.MODEL,
|
||||
},
|
||||
{
|
||||
label: "Duration",
|
||||
key: StaticColumnKeys.DURATION,
|
||||
},
|
||||
{
|
||||
label: "Input Tokens",
|
||||
key: StaticColumnKeys.INPUT_TOKENS,
|
||||
},
|
||||
{
|
||||
label: "Output Tokens",
|
||||
key: StaticColumnKeys.OUTPUT_TOKENS,
|
||||
},
|
||||
{
|
||||
label: "Status Code",
|
||||
key: StaticColumnKeys.STATUS_CODE,
|
||||
},
|
||||
];
|
||||
for (const tagName of tagNames ?? []) {
|
||||
options.push({
|
||||
label: tagName,
|
||||
key: tagName,
|
||||
});
|
||||
}
|
||||
return options;
|
||||
}, [tagNames]);
|
||||
|
||||
const isClientRehydrated = useIsClientRehydrated();
|
||||
if (!isClientRehydrated) return null;
|
||||
|
||||
return (
|
||||
<Popover
|
||||
placement="bottom-start"
|
||||
isOpen={popover.isOpen}
|
||||
onOpen={popover.onOpen}
|
||||
onClose={popover.onClose}
|
||||
>
|
||||
<PopoverTrigger>
|
||||
<Box>
|
||||
<ActionButton
|
||||
label={`Columns (${visibleColumns.size}/${totalColumns})`}
|
||||
icon={BsToggles}
|
||||
/>
|
||||
</Box>
|
||||
</PopoverTrigger>
|
||||
<PopoverContent boxShadow="0 0 40px 4px rgba(0, 0, 0, 0.1);" minW={0} w="auto">
|
||||
<VStack spacing={0} maxH={400} overflowY="auto">
|
||||
{columnVisiblityOptions?.map((option, index) => (
|
||||
<HStack
|
||||
key={index}
|
||||
as={Button}
|
||||
onClick={() => toggleColumnVisibility(option.key)}
|
||||
w="full"
|
||||
minH={10}
|
||||
variant="ghost"
|
||||
justifyContent="space-between"
|
||||
fontWeight="semibold"
|
||||
borderRadius={0}
|
||||
colorScheme="blue"
|
||||
color="black"
|
||||
fontSize="sm"
|
||||
borderBottomWidth={1}
|
||||
>
|
||||
<Text mr={16}>{option.label}</Text>
|
||||
<Box w={5}>
|
||||
{visibleColumns.has(option.key) && (
|
||||
<Icon as={BiCheck} color="blue.500" boxSize={5} />
|
||||
)}
|
||||
</Box>
|
||||
</HStack>
|
||||
))}
|
||||
</VStack>
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
);
|
||||
};
|
||||
|
||||
export default ColumnVisiblityDropdown;
|
||||
210
app/src/components/requestLogs/ExportButton.tsx
Normal file
210
app/src/components/requestLogs/ExportButton.tsx
Normal file
@@ -0,0 +1,210 @@
|
||||
import { useState, useEffect } from "react";
|
||||
import {
|
||||
Modal,
|
||||
ModalOverlay,
|
||||
ModalContent,
|
||||
ModalHeader,
|
||||
ModalCloseButton,
|
||||
ModalBody,
|
||||
ModalFooter,
|
||||
HStack,
|
||||
VStack,
|
||||
Icon,
|
||||
Text,
|
||||
Button,
|
||||
Checkbox,
|
||||
NumberInput,
|
||||
NumberInputField,
|
||||
NumberInputStepper,
|
||||
NumberIncrementStepper,
|
||||
NumberDecrementStepper,
|
||||
Collapse,
|
||||
Flex,
|
||||
useDisclosure,
|
||||
type UseDisclosureReturn,
|
||||
} from "@chakra-ui/react";
|
||||
import { BiExport } from "react-icons/bi";
|
||||
|
||||
import { useHandledAsyncCallback } from "~/utils/hooks";
|
||||
import { api } from "~/utils/api";
|
||||
import { useAppStore } from "~/state/store";
|
||||
import ActionButton from "./ActionButton";
|
||||
import InputDropdown from "../InputDropdown";
|
||||
import { FiChevronUp, FiChevronDown } from "react-icons/fi";
|
||||
import InfoCircle from "../InfoCircle";
|
||||
|
||||
const SUPPORTED_EXPORT_FORMATS = ["alpaca-finetune", "openai-fine-tune", "unformatted"];
|
||||
|
||||
const ExportButton = () => {
|
||||
const selectedLogIds = useAppStore((s) => s.selectedLogs.selectedLogIds);
|
||||
|
||||
const disclosure = useDisclosure();
|
||||
|
||||
return (
|
||||
<>
|
||||
<ActionButton
|
||||
onClick={disclosure.onOpen}
|
||||
label="Export"
|
||||
icon={BiExport}
|
||||
isDisabled={selectedLogIds.size === 0}
|
||||
/>
|
||||
<ExportLogsModal disclosure={disclosure} />
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
export default ExportButton;
|
||||
|
||||
const ExportLogsModal = ({ disclosure }: { disclosure: UseDisclosureReturn }) => {
|
||||
const selectedProjectId = useAppStore((s) => s.selectedProjectId);
|
||||
const selectedLogIds = useAppStore((s) => s.selectedLogs.selectedLogIds);
|
||||
const clearSelectedLogIds = useAppStore((s) => s.selectedLogs.clearSelectedLogIds);
|
||||
|
||||
const [selectedExportFormat, setSelectedExportFormat] = useState(SUPPORTED_EXPORT_FORMATS[0]);
|
||||
const [testingSplit, setTestingSplit] = useState(10);
|
||||
const [removeDuplicates, setRemoveDuplicates] = useState(true);
|
||||
const [showAdvancedOptions, setShowAdvancedOptions] = useState(false);
|
||||
|
||||
useEffect(() => {
|
||||
if (disclosure.isOpen) {
|
||||
setSelectedExportFormat(SUPPORTED_EXPORT_FORMATS[0]);
|
||||
setTestingSplit(10);
|
||||
setRemoveDuplicates(true);
|
||||
}
|
||||
}, [disclosure.isOpen]);
|
||||
|
||||
const exportLogsMutation = api.loggedCalls.export.useMutation();
|
||||
|
||||
const [exportLogs, exportInProgress] = useHandledAsyncCallback(async () => {
|
||||
if (!selectedProjectId || !selectedLogIds.size || !testingSplit || !selectedExportFormat)
|
||||
return;
|
||||
const response = await exportLogsMutation.mutateAsync({
|
||||
projectId: selectedProjectId,
|
||||
selectedLogIds: Array.from(selectedLogIds),
|
||||
testingSplit,
|
||||
selectedExportFormat,
|
||||
removeDuplicates,
|
||||
});
|
||||
|
||||
const dataUrl = `data:application/pdf;base64,${response}`;
|
||||
const blob = await fetch(dataUrl).then((res) => res.blob());
|
||||
const url = URL.createObjectURL(blob);
|
||||
const a = document.createElement("a");
|
||||
|
||||
a.href = url;
|
||||
a.download = `data.zip`;
|
||||
document.body.appendChild(a);
|
||||
a.click();
|
||||
document.body.removeChild(a);
|
||||
|
||||
disclosure.onClose();
|
||||
clearSelectedLogIds();
|
||||
}, [
|
||||
exportLogsMutation,
|
||||
selectedProjectId,
|
||||
selectedLogIds,
|
||||
testingSplit,
|
||||
selectedExportFormat,
|
||||
removeDuplicates,
|
||||
]);
|
||||
|
||||
return (
|
||||
<Modal size={{ base: "xl", md: "2xl" }} {...disclosure}>
|
||||
<ModalOverlay />
|
||||
<ModalContent w={1200}>
|
||||
<ModalHeader>
|
||||
<HStack>
|
||||
<Icon as={BiExport} />
|
||||
<Text>Export Logs</Text>
|
||||
</HStack>
|
||||
</ModalHeader>
|
||||
<ModalCloseButton />
|
||||
<ModalBody maxW="unset">
|
||||
<VStack w="full" spacing={8} pt={4} alignItems="flex-start">
|
||||
<Text>
|
||||
We'll export the <b>{selectedLogIds.size}</b> logs you have selected in the format of
|
||||
your choice.
|
||||
</Text>
|
||||
<VStack alignItems="flex-start" spacing={4}>
|
||||
<Flex
|
||||
flexDir={{ base: "column", md: "row" }}
|
||||
alignItems={{ base: "flex-start", md: "center" }}
|
||||
>
|
||||
<HStack w={48} alignItems="center" spacing={1}>
|
||||
<Text fontWeight="bold">Format:</Text>
|
||||
<InfoCircle tooltipText="Format logs for for fine tuning or export them without formatting." />
|
||||
</HStack>
|
||||
<InputDropdown
|
||||
options={SUPPORTED_EXPORT_FORMATS}
|
||||
selectedOption={selectedExportFormat}
|
||||
onSelect={(option) => setSelectedExportFormat(option)}
|
||||
inputGroupProps={{ w: 48 }}
|
||||
/>
|
||||
</Flex>
|
||||
<Flex
|
||||
flexDir={{ base: "column", md: "row" }}
|
||||
alignItems={{ base: "flex-start", md: "center" }}
|
||||
>
|
||||
<HStack w={48} alignItems="center" spacing={1}>
|
||||
<Text fontWeight="bold">Testing Split:</Text>
|
||||
<InfoCircle tooltipText="The percent of your logs that will be reserved for testing and saved in another file. Logs are split randomly." />
|
||||
</HStack>
|
||||
<HStack>
|
||||
<NumberInput
|
||||
defaultValue={10}
|
||||
onChange={(_, num) => setTestingSplit(num)}
|
||||
min={1}
|
||||
max={100}
|
||||
w={48}
|
||||
>
|
||||
<NumberInputField />
|
||||
<NumberInputStepper>
|
||||
<NumberIncrementStepper />
|
||||
<NumberDecrementStepper />
|
||||
</NumberInputStepper>
|
||||
</NumberInput>
|
||||
</HStack>
|
||||
</Flex>
|
||||
</VStack>
|
||||
<VStack alignItems="flex-start" spacing={0}>
|
||||
<Button
|
||||
variant="unstyled"
|
||||
color="blue.600"
|
||||
onClick={() => setShowAdvancedOptions(!showAdvancedOptions)}
|
||||
>
|
||||
<HStack>
|
||||
<Text>Advanced Options</Text>
|
||||
<Icon as={showAdvancedOptions ? FiChevronUp : FiChevronDown} />
|
||||
</HStack>
|
||||
</Button>
|
||||
<Collapse in={showAdvancedOptions} unmountOnExit={true}>
|
||||
<VStack align="stretch" pt={4}>
|
||||
<HStack>
|
||||
<Checkbox
|
||||
colorScheme="blue"
|
||||
isChecked={removeDuplicates}
|
||||
onChange={(e) => setRemoveDuplicates(e.target.checked)}
|
||||
>
|
||||
<Text>Remove duplicates</Text>
|
||||
</Checkbox>
|
||||
<InfoCircle tooltipText="To avoid overfitting and speed up training, automatically deduplicate logs with matching input and output." />
|
||||
</HStack>
|
||||
</VStack>
|
||||
</Collapse>
|
||||
</VStack>
|
||||
</VStack>
|
||||
</ModalBody>
|
||||
<ModalFooter>
|
||||
<HStack>
|
||||
<Button colorScheme="gray" onClick={disclosure.onClose} minW={24}>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button colorScheme="blue" onClick={exportLogs} isLoading={exportInProgress} minW={24}>
|
||||
Export
|
||||
</Button>
|
||||
</HStack>
|
||||
</ModalFooter>
|
||||
</ModalContent>
|
||||
</Modal>
|
||||
);
|
||||
};
|
||||
161
app/src/components/requestLogs/FineTuneButton.tsx
Normal file
161
app/src/components/requestLogs/FineTuneButton.tsx
Normal file
@@ -0,0 +1,161 @@
|
||||
import { useState, useEffect } from "react";
|
||||
import {
|
||||
Modal,
|
||||
ModalOverlay,
|
||||
ModalContent,
|
||||
ModalHeader,
|
||||
ModalCloseButton,
|
||||
ModalBody,
|
||||
ModalFooter,
|
||||
HStack,
|
||||
VStack,
|
||||
Icon,
|
||||
Text,
|
||||
Button,
|
||||
useDisclosure,
|
||||
type UseDisclosureReturn,
|
||||
Input,
|
||||
} from "@chakra-ui/react";
|
||||
import { FaRobot } from "react-icons/fa";
|
||||
import humanId from "human-id";
|
||||
import { useRouter } from "next/router";
|
||||
|
||||
import { useHandledAsyncCallback } from "~/utils/hooks";
|
||||
import { api } from "~/utils/api";
|
||||
import { useAppStore } from "~/state/store";
|
||||
import ActionButton from "./ActionButton";
|
||||
import InputDropdown from "../InputDropdown";
|
||||
import { FiChevronDown } from "react-icons/fi";
|
||||
|
||||
const SUPPORTED_BASE_MODELS = ["llama2-7b", "llama2-13b", "llama2-70b", "gpt-3.5-turbo"];
|
||||
|
||||
const FineTuneButton = () => {
|
||||
const selectedLogIds = useAppStore((s) => s.selectedLogs.selectedLogIds);
|
||||
|
||||
const disclosure = useDisclosure();
|
||||
|
||||
return (
|
||||
<>
|
||||
<ActionButton
|
||||
onClick={disclosure.onOpen}
|
||||
label="Fine Tune"
|
||||
icon={FaRobot}
|
||||
isDisabled={selectedLogIds.size === 0}
|
||||
/>
|
||||
<FineTuneModal disclosure={disclosure} />
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
export default FineTuneButton;
|
||||
|
||||
const FineTuneModal = ({ disclosure }: { disclosure: UseDisclosureReturn }) => {
|
||||
const selectedProjectId = useAppStore((s) => s.selectedProjectId);
|
||||
const selectedLogIds = useAppStore((s) => s.selectedLogs.selectedLogIds);
|
||||
const clearSelectedLogIds = useAppStore((s) => s.selectedLogs.clearSelectedLogIds);
|
||||
|
||||
const [selectedBaseModel, setSelectedBaseModel] = useState(SUPPORTED_BASE_MODELS[0]);
|
||||
const [modelSlug, setModelSlug] = useState(humanId({ separator: "-", capitalize: false }));
|
||||
|
||||
useEffect(() => {
|
||||
if (disclosure.isOpen) {
|
||||
setSelectedBaseModel(SUPPORTED_BASE_MODELS[0]);
|
||||
setModelSlug(humanId({ separator: "-", capitalize: false }));
|
||||
}
|
||||
}, [disclosure.isOpen]);
|
||||
|
||||
const utils = api.useContext();
|
||||
const router = useRouter();
|
||||
|
||||
const createFineTuneMutation = api.fineTunes.create.useMutation();
|
||||
|
||||
const [createFineTune, creationInProgress] = useHandledAsyncCallback(async () => {
|
||||
if (!selectedProjectId || !modelSlug || !selectedBaseModel || !selectedLogIds.size) return;
|
||||
await createFineTuneMutation.mutateAsync({
|
||||
projectId: selectedProjectId,
|
||||
slug: modelSlug,
|
||||
baseModel: selectedBaseModel,
|
||||
selectedLogIds: Array.from(selectedLogIds),
|
||||
});
|
||||
|
||||
await utils.fineTunes.list.invalidate();
|
||||
await router.push({ pathname: "/fine-tunes" });
|
||||
clearSelectedLogIds();
|
||||
disclosure.onClose();
|
||||
}, [createFineTuneMutation, selectedProjectId, selectedLogIds, modelSlug, selectedBaseModel]);
|
||||
|
||||
return (
|
||||
<Modal size={{ base: "xl", md: "2xl" }} {...disclosure}>
|
||||
<ModalOverlay />
|
||||
<ModalContent w={1200}>
|
||||
<ModalHeader>
|
||||
<HStack>
|
||||
<Icon as={FaRobot} />
|
||||
<Text>Fine Tune</Text>
|
||||
</HStack>
|
||||
</ModalHeader>
|
||||
<ModalCloseButton />
|
||||
<ModalBody maxW="unset">
|
||||
<VStack w="full" spacing={8} pt={4} alignItems="flex-start">
|
||||
<Text>
|
||||
We'll train on the <b>{selectedLogIds.size}</b> logs you've selected.
|
||||
</Text>
|
||||
<VStack>
|
||||
<HStack spacing={2} w="full">
|
||||
<Text fontWeight="bold" w={36}>
|
||||
Model ID:
|
||||
</Text>
|
||||
<Input
|
||||
value={modelSlug}
|
||||
onChange={(e) => setModelSlug(e.target.value)}
|
||||
w={48}
|
||||
placeholder="unique-id"
|
||||
onKeyDown={(e) => {
|
||||
// If the user types anything other than a-z, A-Z, or 0-9, replace it with -
|
||||
if (!/[a-zA-Z0-9]/.test(e.key)) {
|
||||
e.preventDefault();
|
||||
setModelSlug((s) => s && `${s}-`);
|
||||
}
|
||||
}}
|
||||
/>
|
||||
</HStack>
|
||||
<HStack spacing={2}>
|
||||
<Text fontWeight="bold" w={36}>
|
||||
Base model:
|
||||
</Text>
|
||||
<InputDropdown
|
||||
options={SUPPORTED_BASE_MODELS}
|
||||
selectedOption={selectedBaseModel}
|
||||
onSelect={(option) => setSelectedBaseModel(option)}
|
||||
inputGroupProps={{ w: 48 }}
|
||||
/>
|
||||
</HStack>
|
||||
</VStack>
|
||||
<Button variant="unstyled" color="blue.600">
|
||||
<HStack>
|
||||
<Text>Advanced Options</Text>
|
||||
<Icon as={FiChevronDown} />
|
||||
</HStack>
|
||||
</Button>
|
||||
</VStack>
|
||||
</ModalBody>
|
||||
<ModalFooter>
|
||||
<HStack>
|
||||
<Button colorScheme="gray" onClick={disclosure.onClose} minW={24}>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
colorScheme="blue"
|
||||
onClick={createFineTune}
|
||||
isLoading={creationInProgress}
|
||||
minW={24}
|
||||
isDisabled={!modelSlug}
|
||||
>
|
||||
Start Training
|
||||
</Button>
|
||||
</HStack>
|
||||
</ModalFooter>
|
||||
</ModalContent>
|
||||
</Modal>
|
||||
);
|
||||
};
|
||||
@@ -10,7 +10,7 @@ export default function LoggedCallsTable() {
|
||||
return (
|
||||
<Card width="100%" overflowX="auto">
|
||||
<Table>
|
||||
<TableHeader showCheckbox />
|
||||
<TableHeader showOptions />
|
||||
<Tbody>
|
||||
{loggedCalls?.calls?.map((loggedCall) => {
|
||||
return (
|
||||
@@ -25,7 +25,7 @@ export default function LoggedCallsTable() {
|
||||
setExpandedRow(loggedCall.id);
|
||||
}
|
||||
}}
|
||||
showCheckbox
|
||||
showOptions
|
||||
/>
|
||||
);
|
||||
})}
|
||||
|
||||
@@ -14,21 +14,19 @@ import {
|
||||
Text,
|
||||
Checkbox,
|
||||
} from "@chakra-ui/react";
|
||||
import dayjs from "dayjs";
|
||||
import relativeTime from "dayjs/plugin/relativeTime";
|
||||
import Link from "next/link";
|
||||
|
||||
import dayjs from "~/utils/dayjs";
|
||||
import { type RouterOutputs } from "~/utils/api";
|
||||
import { FormattedJson } from "./FormattedJson";
|
||||
import { useAppStore } from "~/state/store";
|
||||
import { useLoggedCalls, useTagNames } from "~/utils/hooks";
|
||||
import { useIsClientRehydrated, useLoggedCalls, useTagNames } from "~/utils/hooks";
|
||||
import { useMemo } from "react";
|
||||
|
||||
dayjs.extend(relativeTime);
|
||||
import { StaticColumnKeys } from "~/state/columnVisiblitySlice";
|
||||
|
||||
type LoggedCall = RouterOutputs["loggedCalls"]["list"]["calls"][0];
|
||||
|
||||
export const TableHeader = ({ showCheckbox }: { showCheckbox?: boolean }) => {
|
||||
export const TableHeader = ({ showOptions }: { showOptions?: boolean }) => {
|
||||
const matchingLogIds = useLoggedCalls().data?.matchingLogIds;
|
||||
const selectedLogIds = useAppStore((s) => s.selectedLogs.selectedLogIds);
|
||||
const addAll = useAppStore((s) => s.selectedLogs.addSelectedLogIds);
|
||||
@@ -38,10 +36,14 @@ export const TableHeader = ({ showCheckbox }: { showCheckbox?: boolean }) => {
|
||||
return matchingLogIds.every((id) => selectedLogIds.has(id));
|
||||
}, [selectedLogIds, matchingLogIds]);
|
||||
const tagNames = useTagNames().data;
|
||||
const visibleColumns = useAppStore((s) => s.columnVisibility.visibleColumns);
|
||||
const isClientRehydrated = useIsClientRehydrated();
|
||||
if (!isClientRehydrated) return null;
|
||||
|
||||
return (
|
||||
<Thead>
|
||||
<Tr>
|
||||
{showCheckbox && (
|
||||
{showOptions && (
|
||||
<Th pr={0}>
|
||||
<HStack minW={16}>
|
||||
<Checkbox
|
||||
@@ -57,13 +59,19 @@ export const TableHeader = ({ showCheckbox }: { showCheckbox?: boolean }) => {
|
||||
</HStack>
|
||||
</Th>
|
||||
)}
|
||||
<Th>Sent At</Th>
|
||||
<Th>Model</Th>
|
||||
{tagNames?.map((tagName) => <Th key={tagName}>{tagName}</Th>)}
|
||||
<Th isNumeric>Duration</Th>
|
||||
<Th isNumeric>Input tokens</Th>
|
||||
<Th isNumeric>Output tokens</Th>
|
||||
<Th isNumeric>Status</Th>
|
||||
{visibleColumns.has(StaticColumnKeys.SENT_AT) && <Th>Sent At</Th>}
|
||||
{visibleColumns.has(StaticColumnKeys.MODEL) && <Th>Model</Th>}
|
||||
{tagNames
|
||||
?.filter((tagName) => visibleColumns.has(tagName))
|
||||
.map((tagName) => (
|
||||
<Th key={tagName} textTransform={"none"}>
|
||||
{tagName}
|
||||
</Th>
|
||||
))}
|
||||
{visibleColumns.has(StaticColumnKeys.DURATION) && <Th isNumeric>Duration</Th>}
|
||||
{visibleColumns.has(StaticColumnKeys.INPUT_TOKENS) && <Th isNumeric>Input tokens</Th>}
|
||||
{visibleColumns.has(StaticColumnKeys.OUTPUT_TOKENS) && <Th isNumeric>Output tokens</Th>}
|
||||
{visibleColumns.has(StaticColumnKeys.STATUS_CODE) && <Th isNumeric>Status</Th>}
|
||||
</Tr>
|
||||
</Thead>
|
||||
);
|
||||
@@ -73,12 +81,12 @@ export const TableRow = ({
|
||||
loggedCall,
|
||||
isExpanded,
|
||||
onToggle,
|
||||
showCheckbox,
|
||||
showOptions,
|
||||
}: {
|
||||
loggedCall: LoggedCall;
|
||||
isExpanded: boolean;
|
||||
onToggle: () => void;
|
||||
showCheckbox?: boolean;
|
||||
showOptions?: boolean;
|
||||
}) => {
|
||||
const isError = loggedCall.modelResponse?.statusCode !== 200;
|
||||
const requestedAt = dayjs(loggedCall.requestedAt).format("MMMM D h:mm A");
|
||||
@@ -88,6 +96,14 @@ export const TableRow = ({
|
||||
const toggleChecked = useAppStore((s) => s.selectedLogs.toggleSelectedLogId);
|
||||
|
||||
const tagNames = useTagNames().data;
|
||||
const visibleColumns = useAppStore((s) => s.columnVisibility.visibleColumns);
|
||||
|
||||
const visibleTagNames = useMemo(() => {
|
||||
return tagNames?.filter((tagName) => visibleColumns.has(tagName)) ?? [];
|
||||
}, [tagNames, visibleColumns]);
|
||||
|
||||
const isClientRehydrated = useIsClientRehydrated();
|
||||
if (!isClientRehydrated) return null;
|
||||
|
||||
return (
|
||||
<>
|
||||
@@ -100,50 +116,64 @@ export const TableRow = ({
|
||||
}}
|
||||
fontSize="sm"
|
||||
>
|
||||
{showCheckbox && (
|
||||
{showOptions && (
|
||||
<Td>
|
||||
<Checkbox isChecked={isChecked} onChange={() => toggleChecked(loggedCall.id)} />
|
||||
</Td>
|
||||
)}
|
||||
<Td>
|
||||
<Tooltip label={fullTime} placement="top">
|
||||
<Box whiteSpace="nowrap" minW="120px">
|
||||
{requestedAt}
|
||||
</Box>
|
||||
</Tooltip>
|
||||
</Td>
|
||||
<Td>
|
||||
<HStack justifyContent="flex-start">
|
||||
<Text
|
||||
colorScheme="purple"
|
||||
color="purple.500"
|
||||
borderColor="purple.500"
|
||||
px={1}
|
||||
borderRadius={4}
|
||||
borderWidth={1}
|
||||
fontSize="xs"
|
||||
whiteSpace="nowrap"
|
||||
>
|
||||
{loggedCall.model}
|
||||
</Text>
|
||||
</HStack>
|
||||
</Td>
|
||||
{tagNames?.map((tagName) => <Td key={tagName}>{loggedCall.tags[tagName]}</Td>)}
|
||||
<Td isNumeric>
|
||||
{loggedCall.cacheHit ? (
|
||||
<Text color="gray.500">Cached</Text>
|
||||
) : (
|
||||
((loggedCall.modelResponse?.durationMs ?? 0) / 1000).toFixed(2) + "s"
|
||||
)}
|
||||
</Td>
|
||||
<Td isNumeric>{loggedCall.modelResponse?.inputTokens}</Td>
|
||||
<Td isNumeric>{loggedCall.modelResponse?.outputTokens}</Td>
|
||||
<Td sx={{ color: isError ? "red.500" : "green.500", fontWeight: "semibold" }} isNumeric>
|
||||
{loggedCall.modelResponse?.statusCode ?? "No response"}
|
||||
</Td>
|
||||
{visibleColumns.has(StaticColumnKeys.SENT_AT) && (
|
||||
<Td>
|
||||
<Tooltip label={fullTime} placement="top">
|
||||
<Box whiteSpace="nowrap" minW="120px">
|
||||
{requestedAt}
|
||||
</Box>
|
||||
</Tooltip>
|
||||
</Td>
|
||||
)}
|
||||
{visibleColumns.has(StaticColumnKeys.MODEL) && (
|
||||
<Td>
|
||||
<HStack justifyContent="flex-start">
|
||||
<Text
|
||||
colorScheme="purple"
|
||||
color="purple.500"
|
||||
borderColor="purple.500"
|
||||
px={1}
|
||||
borderRadius={4}
|
||||
borderWidth={1}
|
||||
fontSize="xs"
|
||||
whiteSpace="nowrap"
|
||||
>
|
||||
{loggedCall.model}
|
||||
</Text>
|
||||
</HStack>
|
||||
</Td>
|
||||
)}
|
||||
{visibleTagNames.map((tagName) => (
|
||||
<Td key={tagName}>{loggedCall.tags[tagName]}</Td>
|
||||
))}
|
||||
{visibleColumns.has(StaticColumnKeys.DURATION) && (
|
||||
<Td isNumeric>
|
||||
{loggedCall.cacheHit ? (
|
||||
<Text color="gray.500">Cached</Text>
|
||||
) : (
|
||||
((loggedCall.modelResponse?.durationMs ?? 0) / 1000).toFixed(2) + "s"
|
||||
)}
|
||||
</Td>
|
||||
)}
|
||||
{visibleColumns.has(StaticColumnKeys.INPUT_TOKENS) && (
|
||||
<Td isNumeric>{loggedCall.modelResponse?.inputTokens}</Td>
|
||||
)}
|
||||
{visibleColumns.has(StaticColumnKeys.OUTPUT_TOKENS) && (
|
||||
<Td isNumeric>{loggedCall.modelResponse?.outputTokens}</Td>
|
||||
)}
|
||||
{visibleColumns.has(StaticColumnKeys.STATUS_CODE) && (
|
||||
<Td sx={{ color: isError ? "red.500" : "green.500", fontWeight: "semibold" }} isNumeric>
|
||||
{loggedCall.modelResponse?.statusCode ?? "No response"}
|
||||
</Td>
|
||||
)}
|
||||
</Tr>
|
||||
<Tr>
|
||||
<Td colSpan={8} p={0}>
|
||||
<Td colSpan={visibleColumns.size + 1} w="full" p={0}>
|
||||
<Collapse in={isExpanded} unmountOnExit={true}>
|
||||
<VStack p={4} align="stretch">
|
||||
<HStack align="stretch">
|
||||
|
||||
@@ -46,8 +46,6 @@ export const env = createEnv({
|
||||
NEXT_PUBLIC_SOCKET_URL: z.string().url().default("http://localhost:3318"),
|
||||
NEXT_PUBLIC_HOST: z.string().url().default("http://localhost:3000"),
|
||||
NEXT_PUBLIC_SENTRY_DSN: z.string().optional(),
|
||||
NEXT_PUBLIC_SHOW_DATA: z.string().optional(),
|
||||
NEXT_PUBLIC_FF_SHOW_LOGGED_CALLS: z.string().optional(),
|
||||
},
|
||||
|
||||
/**
|
||||
@@ -62,7 +60,6 @@ export const env = createEnv({
|
||||
NEXT_PUBLIC_POSTHOG_KEY: process.env.NEXT_PUBLIC_POSTHOG_KEY,
|
||||
NEXT_PUBLIC_SOCKET_URL: process.env.NEXT_PUBLIC_SOCKET_URL,
|
||||
NEXT_PUBLIC_HOST: process.env.NEXT_PUBLIC_HOST,
|
||||
NEXT_PUBLIC_SHOW_DATA: process.env.NEXT_PUBLIC_SHOW_DATA,
|
||||
GITHUB_CLIENT_ID: process.env.GITHUB_CLIENT_ID,
|
||||
GITHUB_CLIENT_SECRET: process.env.GITHUB_CLIENT_SECRET,
|
||||
REPLICATE_API_TOKEN: process.env.REPLICATE_API_TOKEN,
|
||||
@@ -70,7 +67,6 @@ export const env = createEnv({
|
||||
NEXT_PUBLIC_SENTRY_DSN: process.env.NEXT_PUBLIC_SENTRY_DSN,
|
||||
SENTRY_AUTH_TOKEN: process.env.SENTRY_AUTH_TOKEN,
|
||||
OPENPIPE_API_KEY: process.env.OPENPIPE_API_KEY,
|
||||
NEXT_PUBLIC_FF_SHOW_LOGGED_CALLS: process.env.NEXT_PUBLIC_FF_SHOW_LOGGED_CALLS,
|
||||
SENDER_EMAIL: process.env.SENDER_EMAIL,
|
||||
SMTP_HOST: process.env.SMTP_HOST,
|
||||
SMTP_PORT: process.env.SMTP_PORT,
|
||||
|
||||
@@ -33,7 +33,7 @@ export default function Dashboard() {
|
||||
);
|
||||
|
||||
return (
|
||||
<AppShell title="Dashboard" requireAuth>
|
||||
<AppShell title="Dashboard" requireAuth requireBeta>
|
||||
<VStack px={8} py={8} alignItems="flex-start" spacing={4}>
|
||||
<Text fontSize="2xl" fontWeight="bold">
|
||||
Dashboard
|
||||
|
||||
@@ -1,97 +0,0 @@
|
||||
import {
|
||||
Box,
|
||||
Breadcrumb,
|
||||
BreadcrumbItem,
|
||||
Center,
|
||||
Flex,
|
||||
Icon,
|
||||
Input,
|
||||
VStack,
|
||||
} from "@chakra-ui/react";
|
||||
import Link from "next/link";
|
||||
|
||||
import { useRouter } from "next/router";
|
||||
import { useState, useEffect } from "react";
|
||||
import { RiDatabase2Line } from "react-icons/ri";
|
||||
import AppShell from "~/components/nav/AppShell";
|
||||
import { api } from "~/utils/api";
|
||||
import { useDataset, useHandledAsyncCallback } from "~/utils/hooks";
|
||||
import DatasetEntriesTable from "~/components/datasets/DatasetEntriesTable";
|
||||
import { DatasetHeaderButtons } from "~/components/datasets/DatasetHeaderButtons/DatasetHeaderButtons";
|
||||
import PageHeaderContainer from "~/components/nav/PageHeaderContainer";
|
||||
import ProjectBreadcrumbContents from "~/components/nav/ProjectBreadcrumbContents";
|
||||
|
||||
export default function Dataset() {
|
||||
const router = useRouter();
|
||||
const utils = api.useContext();
|
||||
|
||||
const dataset = useDataset();
|
||||
const datasetId = router.query.id as string;
|
||||
|
||||
const [name, setName] = useState(dataset.data?.name || "");
|
||||
useEffect(() => {
|
||||
setName(dataset.data?.name || "");
|
||||
}, [dataset.data?.name]);
|
||||
|
||||
const updateMutation = api.datasets.update.useMutation();
|
||||
const [onSaveName] = useHandledAsyncCallback(async () => {
|
||||
if (name && name !== dataset.data?.name && dataset.data?.id) {
|
||||
await updateMutation.mutateAsync({
|
||||
id: dataset.data.id,
|
||||
updates: { name: name },
|
||||
});
|
||||
await Promise.all([utils.datasets.list.invalidate(), utils.datasets.get.invalidate()]);
|
||||
}
|
||||
}, [updateMutation, dataset.data?.id, dataset.data?.name, name]);
|
||||
|
||||
if (!dataset.isLoading && !dataset.data) {
|
||||
return (
|
||||
<AppShell title="Dataset not found">
|
||||
<Center h="100%">
|
||||
<div>Dataset not found 😕</div>
|
||||
</Center>
|
||||
</AppShell>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<AppShell title={dataset.data?.name}>
|
||||
<VStack h="full">
|
||||
<PageHeaderContainer>
|
||||
<Breadcrumb>
|
||||
<BreadcrumbItem>
|
||||
<ProjectBreadcrumbContents projectName={dataset.data?.project?.name} />
|
||||
</BreadcrumbItem>
|
||||
<BreadcrumbItem>
|
||||
<Link href="/data">
|
||||
<Flex alignItems="center" _hover={{ textDecoration: "underline" }}>
|
||||
<Icon as={RiDatabase2Line} boxSize={4} mr={2} /> Datasets
|
||||
</Flex>
|
||||
</Link>
|
||||
</BreadcrumbItem>
|
||||
<BreadcrumbItem isCurrentPage>
|
||||
<Input
|
||||
size="sm"
|
||||
value={name}
|
||||
onChange={(e) => setName(e.target.value)}
|
||||
onBlur={onSaveName}
|
||||
borderWidth={1}
|
||||
borderColor="transparent"
|
||||
fontSize={16}
|
||||
px={0}
|
||||
minW={{ base: 100, lg: 300 }}
|
||||
flex={1}
|
||||
_hover={{ borderColor: "gray.300" }}
|
||||
_focus={{ borderColor: "blue.500", outline: "none" }}
|
||||
/>
|
||||
</BreadcrumbItem>
|
||||
</Breadcrumb>
|
||||
<DatasetHeaderButtons />
|
||||
</PageHeaderContainer>
|
||||
<Box w="full" overflowX="auto" flex={1} px={8} pt={8} pb={16}>
|
||||
{datasetId && <DatasetEntriesTable />}
|
||||
</Box>
|
||||
</VStack>
|
||||
</AppShell>
|
||||
);
|
||||
}
|
||||
@@ -1,49 +0,0 @@
|
||||
import { SimpleGrid, Icon, Breadcrumb, BreadcrumbItem, Flex } from "@chakra-ui/react";
|
||||
import AppShell from "~/components/nav/AppShell";
|
||||
import { RiDatabase2Line } from "react-icons/ri";
|
||||
import {
|
||||
DatasetCard,
|
||||
DatasetCardSkeleton,
|
||||
NewDatasetCard,
|
||||
} from "~/components/datasets/DatasetCard";
|
||||
import PageHeaderContainer from "~/components/nav/PageHeaderContainer";
|
||||
import ProjectBreadcrumbContents from "~/components/nav/ProjectBreadcrumbContents";
|
||||
import { useDatasets } from "~/utils/hooks";
|
||||
|
||||
export default function DatasetsPage() {
|
||||
const datasets = useDatasets();
|
||||
|
||||
return (
|
||||
<AppShell title="Data" requireAuth>
|
||||
<PageHeaderContainer>
|
||||
<Breadcrumb>
|
||||
<BreadcrumbItem>
|
||||
<ProjectBreadcrumbContents />
|
||||
</BreadcrumbItem>
|
||||
<BreadcrumbItem minH={8}>
|
||||
<Flex alignItems="center">
|
||||
<Icon as={RiDatabase2Line} boxSize={4} mr={2} /> Datasets
|
||||
</Flex>
|
||||
</BreadcrumbItem>
|
||||
</Breadcrumb>
|
||||
</PageHeaderContainer>
|
||||
<SimpleGrid w="full" columns={{ base: 1, md: 2, lg: 3, xl: 4 }} spacing={8} py={4} px={8}>
|
||||
<NewDatasetCard />
|
||||
{datasets.data && !datasets.isLoading ? (
|
||||
datasets?.data?.map((dataset) => (
|
||||
<DatasetCard
|
||||
key={dataset.id}
|
||||
dataset={{ ...dataset, numEntries: dataset._count.datasetEntries }}
|
||||
/>
|
||||
))
|
||||
) : (
|
||||
<>
|
||||
<DatasetCardSkeleton />
|
||||
<DatasetCardSkeleton />
|
||||
<DatasetCardSkeleton />
|
||||
</>
|
||||
)}
|
||||
</SimpleGrid>
|
||||
</AppShell>
|
||||
);
|
||||
}
|
||||
18
app/src/pages/fine-tunes/index.tsx
Normal file
18
app/src/pages/fine-tunes/index.tsx
Normal file
@@ -0,0 +1,18 @@
|
||||
import { Text, VStack, Divider } from "@chakra-ui/react";
|
||||
import FineTunesTable from "~/components/fineTunes/FineTunesTable";
|
||||
|
||||
import AppShell from "~/components/nav/AppShell";
|
||||
|
||||
export default function FineTunes() {
|
||||
return (
|
||||
<AppShell title="Fine Tunes" requireAuth requireBeta>
|
||||
<VStack px={8} py={8} alignItems="flex-start" spacing={4} w="full">
|
||||
<Text fontSize="2xl" fontWeight="bold">
|
||||
Fine Tunes
|
||||
</Text>
|
||||
<Divider />
|
||||
<FineTunesTable />
|
||||
</VStack>
|
||||
</AppShell>
|
||||
);
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
import { useState } from "react";
|
||||
import { Text, VStack, Divider, HStack } from "@chakra-ui/react";
|
||||
import { Text, VStack, Divider, HStack, Box } from "@chakra-ui/react";
|
||||
|
||||
import AppShell from "~/components/nav/AppShell";
|
||||
import LoggedCallTable from "~/components/requestLogs/LoggedCallsTable";
|
||||
@@ -9,6 +9,9 @@ import { useAppStore } from "~/state/store";
|
||||
import { RiFlaskLine } from "react-icons/ri";
|
||||
import { FiFilter } from "react-icons/fi";
|
||||
import LogFilters from "~/components/requestLogs/LogFilters/LogFilters";
|
||||
import ColumnVisiblityDropdown from "~/components/requestLogs/ColumnVisiblityDropdown";
|
||||
import FineTuneButton from "~/components/requestLogs/FineTuneButton";
|
||||
import ExportButton from "~/components/requestLogs/ExportButton";
|
||||
|
||||
export default function LoggedCalls() {
|
||||
const selectedLogIds = useAppStore((s) => s.selectedLogs.selectedLogIds);
|
||||
@@ -16,33 +19,38 @@ export default function LoggedCalls() {
|
||||
const [filtersShown, setFiltersShown] = useState(true);
|
||||
|
||||
return (
|
||||
<AppShell title="Request Logs" requireAuth>
|
||||
<VStack px={8} py={8} alignItems="flex-start" spacing={4} w="full">
|
||||
<Text fontSize="2xl" fontWeight="bold">
|
||||
Request Logs
|
||||
</Text>
|
||||
<Divider />
|
||||
<HStack w="full" justifyContent="flex-end">
|
||||
<ActionButton
|
||||
onClick={() => {
|
||||
setFiltersShown(!filtersShown);
|
||||
}}
|
||||
label={filtersShown ? "Hide Filters" : "Show Filters"}
|
||||
icon={FiFilter}
|
||||
/>
|
||||
<ActionButton
|
||||
onClick={() => {
|
||||
console.log("experimenting with these ids", selectedLogIds);
|
||||
}}
|
||||
label="Experiment"
|
||||
icon={RiFlaskLine}
|
||||
isDisabled={selectedLogIds.size === 0}
|
||||
/>
|
||||
</HStack>
|
||||
{filtersShown && <LogFilters />}
|
||||
<LoggedCallTable />
|
||||
<LoggedCallsPaginator />
|
||||
</VStack>
|
||||
<AppShell title="Request Logs" requireAuth requireBeta>
|
||||
<Box h="100vh" overflowY="scroll">
|
||||
<VStack px={8} py={8} alignItems="flex-start" spacing={4} w="full">
|
||||
<Text fontSize="2xl" fontWeight="bold">
|
||||
Request Logs
|
||||
</Text>
|
||||
<Divider />
|
||||
<HStack w="full" justifyContent="flex-end">
|
||||
<FineTuneButton />
|
||||
<ActionButton
|
||||
onClick={() => {
|
||||
console.log("experimenting with these ids", selectedLogIds);
|
||||
}}
|
||||
label="Experiment"
|
||||
icon={RiFlaskLine}
|
||||
isDisabled={selectedLogIds.size === 0}
|
||||
/>
|
||||
<ExportButton />
|
||||
<ColumnVisiblityDropdown />
|
||||
<ActionButton
|
||||
onClick={() => {
|
||||
setFiltersShown(!filtersShown);
|
||||
}}
|
||||
label={filtersShown ? "Hide Filters" : "Show Filters"}
|
||||
icon={FiFilter}
|
||||
/>
|
||||
</HStack>
|
||||
{filtersShown && <LogFilters />}
|
||||
<LoggedCallTable />
|
||||
<LoggedCallsPaginator />
|
||||
</VStack>
|
||||
</Box>
|
||||
</AppShell>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,113 +0,0 @@
|
||||
import { type ChatCompletion } from "openai/resources/chat";
|
||||
import { openai } from "../../utils/openai";
|
||||
import { isAxiosError } from "./utils";
|
||||
import { type APIResponse } from "openai/core";
|
||||
import { sleep } from "~/server/utils/sleep";
|
||||
|
||||
const MAX_AUTO_RETRIES = 50;
|
||||
const MIN_DELAY = 500; // milliseconds
|
||||
const MAX_DELAY = 15000; // milliseconds
|
||||
|
||||
function calculateDelay(numPreviousTries: number): number {
|
||||
const baseDelay = Math.min(MAX_DELAY, MIN_DELAY * Math.pow(2, numPreviousTries));
|
||||
const jitter = Math.random() * baseDelay;
|
||||
return baseDelay + jitter;
|
||||
}
|
||||
|
||||
const getCompletionWithBackoff = async (
|
||||
getCompletion: () => Promise<APIResponse<ChatCompletion>>,
|
||||
) => {
|
||||
let completion;
|
||||
let tries = 0;
|
||||
while (tries < MAX_AUTO_RETRIES) {
|
||||
try {
|
||||
completion = await getCompletion();
|
||||
break;
|
||||
} catch (e) {
|
||||
if (isAxiosError(e)) {
|
||||
console.error(e?.response?.data?.error?.message);
|
||||
} else {
|
||||
await sleep(calculateDelay(tries));
|
||||
console.error(e);
|
||||
}
|
||||
}
|
||||
tries++;
|
||||
}
|
||||
return completion;
|
||||
};
|
||||
// TODO: Add seeds to ensure batches don't contain duplicate data
|
||||
const MAX_BATCH_SIZE = 5;
|
||||
|
||||
export const autogenerateDatasetEntries = async (
|
||||
numToGenerate: number,
|
||||
inputDescription: string,
|
||||
outputDescription: string,
|
||||
): Promise<{ input: string; output: string }[]> => {
|
||||
const batchSizes = Array.from({ length: Math.ceil(numToGenerate / MAX_BATCH_SIZE) }, (_, i) =>
|
||||
i === Math.ceil(numToGenerate / MAX_BATCH_SIZE) - 1 && numToGenerate % MAX_BATCH_SIZE
|
||||
? numToGenerate % MAX_BATCH_SIZE
|
||||
: MAX_BATCH_SIZE,
|
||||
);
|
||||
|
||||
const getCompletion = (batchSize: number) =>
|
||||
openai.chat.completions.create({
|
||||
model: "gpt-4",
|
||||
messages: [
|
||||
{
|
||||
role: "system",
|
||||
content: `The user needs ${batchSize} rows of data, each with an input and an output.\n---\n The input should follow these requirements: ${inputDescription}\n---\n The output should follow these requirements: ${outputDescription}`,
|
||||
},
|
||||
],
|
||||
functions: [
|
||||
{
|
||||
name: "add_list_of_data",
|
||||
description: "Add a list of data to the database",
|
||||
parameters: {
|
||||
type: "object",
|
||||
properties: {
|
||||
rows: {
|
||||
type: "array",
|
||||
description: "The rows of data that match the description",
|
||||
items: {
|
||||
type: "object",
|
||||
properties: {
|
||||
input: {
|
||||
type: "string",
|
||||
description: "The input for this row",
|
||||
},
|
||||
output: {
|
||||
type: "string",
|
||||
description: "The output for this row",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
|
||||
function_call: { name: "add_list_of_data" },
|
||||
temperature: 0.5,
|
||||
openpipe: {
|
||||
tags: {
|
||||
prompt_id: "autogenerateDatasetEntries",
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const completionCallbacks = batchSizes.map((batchSize) =>
|
||||
getCompletionWithBackoff(() => getCompletion(batchSize)),
|
||||
);
|
||||
|
||||
const completions = await Promise.all(completionCallbacks);
|
||||
|
||||
const rows = completions.flatMap((completion) => {
|
||||
const parsed = JSON.parse(
|
||||
completion?.choices[0]?.message?.function_call?.arguments ?? "{rows: []}",
|
||||
) as { rows: { input: string; output: string }[] };
|
||||
return parsed.rows;
|
||||
});
|
||||
|
||||
return rows;
|
||||
};
|
||||
@@ -6,11 +6,10 @@ import { scenarioVariantCellsRouter } from "./routers/scenarioVariantCells.route
|
||||
import { scenarioVarsRouter } from "./routers/scenarioVariables.router";
|
||||
import { evaluationsRouter } from "./routers/evaluations.router";
|
||||
import { worldChampsRouter } from "./routers/worldChamps.router";
|
||||
import { datasetsRouter } from "./routers/datasets.router";
|
||||
import { datasetEntries } from "./routers/datasetEntries.router";
|
||||
import { projectsRouter } from "./routers/projects.router";
|
||||
import { dashboardRouter } from "./routers/dashboard.router";
|
||||
import { loggedCallsRouter } from "./routers/loggedCalls.router";
|
||||
import { fineTunesRouter } from "./routers/fineTunes.router";
|
||||
import { usersRouter } from "./routers/users.router";
|
||||
import { adminJobsRouter } from "./routers/adminJobs.router";
|
||||
|
||||
@@ -27,11 +26,10 @@ export const appRouter = createTRPCRouter({
|
||||
scenarioVars: scenarioVarsRouter,
|
||||
evaluations: evaluationsRouter,
|
||||
worldChamps: worldChampsRouter,
|
||||
datasets: datasetsRouter,
|
||||
datasetEntries: datasetEntries,
|
||||
projects: projectsRouter,
|
||||
dashboard: dashboardRouter,
|
||||
loggedCalls: loggedCallsRouter,
|
||||
fineTunes: fineTunesRouter,
|
||||
users: usersRouter,
|
||||
adminJobs: adminJobsRouter,
|
||||
});
|
||||
|
||||
@@ -1,145 +0,0 @@
|
||||
import { z } from "zod";
|
||||
import { createTRPCRouter, protectedProcedure } from "~/server/api/trpc";
|
||||
import { prisma } from "~/server/db";
|
||||
import { requireCanModifyDataset, requireCanViewDataset } from "~/utils/accessControl";
|
||||
import { autogenerateDatasetEntries } from "../autogenerate/autogenerateDatasetEntries";
|
||||
|
||||
export const datasetEntries = createTRPCRouter({
|
||||
list: protectedProcedure
|
||||
.input(z.object({ datasetId: z.string(), page: z.number(), pageSize: z.number() }))
|
||||
.query(async ({ input, ctx }) => {
|
||||
await requireCanViewDataset(input.datasetId, ctx);
|
||||
|
||||
const { datasetId, page, pageSize } = input;
|
||||
|
||||
const entries = await prisma.datasetEntry.findMany({
|
||||
where: {
|
||||
datasetId,
|
||||
},
|
||||
orderBy: { createdAt: "desc" },
|
||||
skip: (page - 1) * pageSize,
|
||||
take: pageSize,
|
||||
});
|
||||
|
||||
const count = await prisma.datasetEntry.count({
|
||||
where: {
|
||||
datasetId,
|
||||
},
|
||||
});
|
||||
|
||||
return {
|
||||
entries,
|
||||
count,
|
||||
};
|
||||
}),
|
||||
createOne: protectedProcedure
|
||||
.input(
|
||||
z.object({
|
||||
datasetId: z.string(),
|
||||
input: z.string(),
|
||||
output: z.string().optional(),
|
||||
}),
|
||||
)
|
||||
.mutation(async ({ input, ctx }) => {
|
||||
await requireCanModifyDataset(input.datasetId, ctx);
|
||||
|
||||
return await prisma.datasetEntry.create({
|
||||
data: {
|
||||
datasetId: input.datasetId,
|
||||
input: input.input,
|
||||
output: input.output,
|
||||
},
|
||||
});
|
||||
}),
|
||||
|
||||
autogenerateEntries: protectedProcedure
|
||||
.input(
|
||||
z.object({
|
||||
datasetId: z.string(),
|
||||
numToGenerate: z.number(),
|
||||
inputDescription: z.string(),
|
||||
outputDescription: z.string(),
|
||||
}),
|
||||
)
|
||||
.mutation(async ({ input, ctx }) => {
|
||||
await requireCanModifyDataset(input.datasetId, ctx);
|
||||
|
||||
const dataset = await prisma.dataset.findUnique({
|
||||
where: {
|
||||
id: input.datasetId,
|
||||
},
|
||||
});
|
||||
|
||||
if (!dataset) {
|
||||
throw new Error(`Dataset with id ${input.datasetId} does not exist`);
|
||||
}
|
||||
|
||||
const entries = await autogenerateDatasetEntries(
|
||||
input.numToGenerate,
|
||||
input.inputDescription,
|
||||
input.outputDescription,
|
||||
);
|
||||
|
||||
const createdEntries = await prisma.datasetEntry.createMany({
|
||||
data: entries.map((entry) => ({
|
||||
datasetId: input.datasetId,
|
||||
input: entry.input,
|
||||
output: entry.output,
|
||||
})),
|
||||
});
|
||||
|
||||
return createdEntries;
|
||||
}),
|
||||
|
||||
delete: protectedProcedure
|
||||
.input(z.object({ id: z.string() }))
|
||||
.mutation(async ({ input, ctx }) => {
|
||||
const datasetId = (
|
||||
await prisma.datasetEntry.findUniqueOrThrow({
|
||||
where: { id: input.id },
|
||||
})
|
||||
).datasetId;
|
||||
|
||||
await requireCanModifyDataset(datasetId, ctx);
|
||||
|
||||
return await prisma.datasetEntry.delete({
|
||||
where: {
|
||||
id: input.id,
|
||||
},
|
||||
});
|
||||
}),
|
||||
|
||||
update: protectedProcedure
|
||||
.input(
|
||||
z.object({
|
||||
id: z.string(),
|
||||
updates: z.object({
|
||||
input: z.string(),
|
||||
output: z.string().optional(),
|
||||
}),
|
||||
}),
|
||||
)
|
||||
.mutation(async ({ input, ctx }) => {
|
||||
const existing = await prisma.datasetEntry.findUnique({
|
||||
where: {
|
||||
id: input.id,
|
||||
},
|
||||
});
|
||||
|
||||
if (!existing) {
|
||||
throw new Error(`dataEntry with id ${input.id} does not exist`);
|
||||
}
|
||||
|
||||
await requireCanModifyDataset(existing.datasetId, ctx);
|
||||
|
||||
return await prisma.datasetEntry.update({
|
||||
where: {
|
||||
id: input.id,
|
||||
},
|
||||
data: {
|
||||
input: input.updates.input,
|
||||
output: input.updates.output,
|
||||
},
|
||||
});
|
||||
}),
|
||||
});
|
||||
@@ -1,88 +0,0 @@
|
||||
import { z } from "zod";
|
||||
import { createTRPCRouter, protectedProcedure, publicProcedure } from "~/server/api/trpc";
|
||||
import { prisma } from "~/server/db";
|
||||
import {
|
||||
requireCanModifyDataset,
|
||||
requireCanModifyProject,
|
||||
requireCanViewDataset,
|
||||
requireCanViewProject,
|
||||
} from "~/utils/accessControl";
|
||||
|
||||
export const datasetsRouter = createTRPCRouter({
|
||||
list: protectedProcedure
|
||||
.input(z.object({ projectId: z.string() }))
|
||||
.query(async ({ input, ctx }) => {
|
||||
await requireCanViewProject(input.projectId, ctx);
|
||||
|
||||
const datasets = await prisma.dataset.findMany({
|
||||
where: {
|
||||
projectId: input.projectId,
|
||||
},
|
||||
orderBy: {
|
||||
createdAt: "desc",
|
||||
},
|
||||
include: {
|
||||
_count: {
|
||||
select: { datasetEntries: true },
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
return datasets;
|
||||
}),
|
||||
|
||||
get: publicProcedure.input(z.object({ id: z.string() })).query(async ({ input, ctx }) => {
|
||||
await requireCanViewDataset(input.id, ctx);
|
||||
return await prisma.dataset.findFirstOrThrow({
|
||||
where: { id: input.id },
|
||||
include: {
|
||||
project: true,
|
||||
},
|
||||
});
|
||||
}),
|
||||
|
||||
create: protectedProcedure
|
||||
.input(z.object({ projectId: z.string() }))
|
||||
.mutation(async ({ input, ctx }) => {
|
||||
await requireCanModifyProject(input.projectId, ctx);
|
||||
|
||||
const numDatasets = await prisma.dataset.count({
|
||||
where: {
|
||||
projectId: input.projectId,
|
||||
},
|
||||
});
|
||||
|
||||
return await prisma.dataset.create({
|
||||
data: {
|
||||
name: `Dataset ${numDatasets + 1}`,
|
||||
projectId: input.projectId,
|
||||
},
|
||||
});
|
||||
}),
|
||||
|
||||
update: protectedProcedure
|
||||
.input(z.object({ id: z.string(), updates: z.object({ name: z.string() }) }))
|
||||
.mutation(async ({ input, ctx }) => {
|
||||
await requireCanModifyDataset(input.id, ctx);
|
||||
return await prisma.dataset.update({
|
||||
where: {
|
||||
id: input.id,
|
||||
},
|
||||
data: {
|
||||
name: input.updates.name,
|
||||
},
|
||||
});
|
||||
}),
|
||||
|
||||
delete: protectedProcedure
|
||||
.input(z.object({ id: z.string() }))
|
||||
.mutation(async ({ input, ctx }) => {
|
||||
await requireCanModifyDataset(input.id, ctx);
|
||||
|
||||
await prisma.dataset.delete({
|
||||
where: {
|
||||
id: input.id,
|
||||
},
|
||||
});
|
||||
}),
|
||||
});
|
||||
113
app/src/server/api/routers/fineTunes.router.ts
Normal file
113
app/src/server/api/routers/fineTunes.router.ts
Normal file
@@ -0,0 +1,113 @@
|
||||
import { z } from "zod";
|
||||
import { v4 as uuidv4 } from "uuid";
|
||||
import { type Prisma } from "@prisma/client";
|
||||
|
||||
import { createTRPCRouter, protectedProcedure } from "~/server/api/trpc";
|
||||
import { prisma } from "~/server/db";
|
||||
import { requireCanViewProject, requireCanModifyProject } from "~/utils/accessControl";
|
||||
import { error, success } from "~/utils/errorHandling/standardResponses";
|
||||
|
||||
export const fineTunesRouter = createTRPCRouter({
|
||||
list: protectedProcedure
|
||||
.input(
|
||||
z.object({
|
||||
projectId: z.string(),
|
||||
page: z.number(),
|
||||
pageSize: z.number(),
|
||||
}),
|
||||
)
|
||||
.query(async ({ input, ctx }) => {
|
||||
const { projectId, page, pageSize } = input;
|
||||
|
||||
await requireCanViewProject(projectId, ctx);
|
||||
|
||||
const fineTunes = await prisma.fineTune.findMany({
|
||||
where: {
|
||||
projectId,
|
||||
},
|
||||
include: {
|
||||
dataset: {
|
||||
include: {
|
||||
_count: {
|
||||
select: {
|
||||
datasetEntries: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
orderBy: { createdAt: "asc" },
|
||||
skip: (page - 1) * pageSize,
|
||||
take: pageSize,
|
||||
});
|
||||
|
||||
const count = await prisma.fineTune.count({
|
||||
where: {
|
||||
projectId,
|
||||
},
|
||||
});
|
||||
|
||||
return {
|
||||
fineTunes,
|
||||
count,
|
||||
};
|
||||
}),
|
||||
create: protectedProcedure
|
||||
.input(
|
||||
z.object({
|
||||
projectId: z.string(),
|
||||
selectedLogIds: z.array(z.string()),
|
||||
slug: z.string(),
|
||||
baseModel: z.string(),
|
||||
}),
|
||||
)
|
||||
.mutation(async ({ input, ctx }) => {
|
||||
await requireCanModifyProject(input.projectId, ctx);
|
||||
|
||||
const existingFineTune = await prisma.fineTune.findFirst({
|
||||
where: {
|
||||
slug: input.slug,
|
||||
},
|
||||
});
|
||||
|
||||
if (existingFineTune) {
|
||||
return error("A fine tune with that slug already exists");
|
||||
}
|
||||
|
||||
const newDatasetId = uuidv4();
|
||||
|
||||
const datasetEntriesToCreate: Prisma.DatasetEntryCreateManyDatasetInput[] =
|
||||
input.selectedLogIds.map((loggedCallId) => ({
|
||||
loggedCallId,
|
||||
}));
|
||||
|
||||
await prisma.$transaction([
|
||||
prisma.dataset.create({
|
||||
data: {
|
||||
id: newDatasetId,
|
||||
name: input.slug,
|
||||
project: {
|
||||
connect: {
|
||||
id: input.projectId,
|
||||
},
|
||||
},
|
||||
datasetEntries: {
|
||||
createMany: {
|
||||
data: datasetEntriesToCreate,
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
prisma.fineTune.create({
|
||||
data: {
|
||||
projectId: input.projectId,
|
||||
slug: input.slug,
|
||||
baseModel: input.baseModel,
|
||||
datasetId: newDatasetId,
|
||||
},
|
||||
}),
|
||||
]);
|
||||
|
||||
return success();
|
||||
}),
|
||||
});
|
||||
@@ -1,11 +1,16 @@
|
||||
import { z } from "zod";
|
||||
import { type Expression, type SqlBool, sql, type RawBuilder } from "kysely";
|
||||
import { jsonArrayFrom } from "kysely/helpers/postgres";
|
||||
import archiver from "archiver";
|
||||
import { WritableStreamBuffer } from "stream-buffers";
|
||||
import { type JsonValue } from "type-fest";
|
||||
import { shuffle } from "lodash-es";
|
||||
|
||||
import { createTRPCRouter, protectedProcedure } from "~/server/api/trpc";
|
||||
import { kysely, prisma } from "~/server/db";
|
||||
import { comparators, defaultFilterableFields } from "~/state/logFiltersSlice";
|
||||
import { requireCanViewProject } from "~/utils/accessControl";
|
||||
import hashObject from "~/server/utils/hashObject";
|
||||
|
||||
// create comparator type based off of comparators
|
||||
const comparatorToSqlExpression = (comparator: (typeof comparators)[number], value: string) => {
|
||||
@@ -180,4 +185,102 @@ export const loggedCallsRouter = createTRPCRouter({
|
||||
|
||||
return tags.map((tag) => tag.name);
|
||||
}),
|
||||
export: protectedProcedure
|
||||
.input(
|
||||
z.object({
|
||||
projectId: z.string(),
|
||||
selectedLogIds: z.string().array(),
|
||||
testingSplit: z.number(),
|
||||
selectedExportFormat: z.string(),
|
||||
removeDuplicates: z.boolean(),
|
||||
}),
|
||||
)
|
||||
.mutation(async ({ input, ctx }) => {
|
||||
await requireCanViewProject(input.projectId, ctx);
|
||||
|
||||
// Fetch the real data using Prisma
|
||||
const loggedCallsFromDb = await ctx.prisma.loggedCallModelResponse.findMany({
|
||||
where: {
|
||||
originalLoggedCall: {
|
||||
projectId: input.projectId,
|
||||
id: { in: input.selectedLogIds },
|
||||
},
|
||||
statusCode: 200,
|
||||
},
|
||||
});
|
||||
|
||||
// Convert the database data into the desired format
|
||||
let formattedLoggedCalls: { instruction: JsonValue[]; output: JsonValue }[] =
|
||||
loggedCallsFromDb.map((call) => ({
|
||||
instruction: (call.reqPayload as unknown as Record<string, unknown>)
|
||||
.messages as JsonValue[],
|
||||
output: (call.respPayload as unknown as { choices: { message: unknown }[] }).choices[0]
|
||||
?.message as JsonValue,
|
||||
}));
|
||||
|
||||
if (input.removeDuplicates) {
|
||||
const deduplicatedLoggedCalls = [];
|
||||
const loggedCallHashSet = new Set<string>();
|
||||
for (const loggedCall of formattedLoggedCalls) {
|
||||
const loggedCallHash = hashObject(loggedCall);
|
||||
if (!loggedCallHashSet.has(loggedCallHash)) {
|
||||
loggedCallHashSet.add(loggedCallHash);
|
||||
deduplicatedLoggedCalls.push(loggedCall);
|
||||
}
|
||||
}
|
||||
formattedLoggedCalls = deduplicatedLoggedCalls;
|
||||
}
|
||||
|
||||
// Remove duplicate messages from instructions
|
||||
const instructionMessageHashMap = new Map<string, number>();
|
||||
for (const loggedCall of formattedLoggedCalls) {
|
||||
for (const message of loggedCall.instruction) {
|
||||
const hash = hashObject(message);
|
||||
if (instructionMessageHashMap.has(hash)) {
|
||||
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
|
||||
instructionMessageHashMap.set(hash, instructionMessageHashMap.get(hash)! + 1);
|
||||
} else {
|
||||
instructionMessageHashMap.set(hash, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (const loggedCall of formattedLoggedCalls) {
|
||||
loggedCall.instruction = loggedCall.instruction.filter((message) => {
|
||||
const hash = hashObject(message);
|
||||
// If the same message appears in a single instruction multiple times, there is some danger of
|
||||
// it being removed from all logged calls. This is enough of an edge case that we don't
|
||||
// need to worry about it for now.
|
||||
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
|
||||
return instructionMessageHashMap.get(hash)! < formattedLoggedCalls.length;
|
||||
});
|
||||
}
|
||||
|
||||
// Stringify instructions and outputs
|
||||
const stringifiedLoggedCalls = shuffle(formattedLoggedCalls).map((loggedCall) => ({
|
||||
instruction: JSON.stringify(loggedCall.instruction),
|
||||
output: JSON.stringify(loggedCall.output),
|
||||
}));
|
||||
|
||||
const splitIndex = Math.floor((stringifiedLoggedCalls.length * input.testingSplit) / 100);
|
||||
|
||||
const testingData = stringifiedLoggedCalls.slice(0, splitIndex);
|
||||
const trainingData = stringifiedLoggedCalls.slice(splitIndex);
|
||||
|
||||
// Convert arrays to JSONL format
|
||||
const trainingDataJSONL = trainingData.map((item) => JSON.stringify(item)).join("\n");
|
||||
const testingDataJSONL = testingData.map((item) => JSON.stringify(item)).join("\n");
|
||||
|
||||
const output = new WritableStreamBuffer();
|
||||
const archive = archiver("zip");
|
||||
|
||||
archive.pipe(output);
|
||||
archive.append(trainingDataJSONL, { name: "train.jsonl" });
|
||||
archive.append(testingDataJSONL, { name: "test.jsonl" });
|
||||
await archive.finalize();
|
||||
|
||||
// Convert buffer to base64
|
||||
const base64 = output.getContents().toString("base64");
|
||||
|
||||
return base64;
|
||||
}),
|
||||
});
|
||||
|
||||
@@ -41,7 +41,7 @@ const requestUpdatedPromptFunction = async (
|
||||
) => {
|
||||
const originalModelProvider = modelProviders[originalVariant.modelProvider as SupportedProvider];
|
||||
const originalModel = originalModelProvider.models[originalVariant.model] as Model;
|
||||
let newContructionFn = "";
|
||||
let newConstructionFn = "";
|
||||
for (let i = 0; i < NUM_RETRIES; i++) {
|
||||
try {
|
||||
const messages: CreateChatCompletionRequestMessage[] = [
|
||||
@@ -137,7 +137,7 @@ const requestUpdatedPromptFunction = async (
|
||||
const args = await contructPromptFunctionArgs.copy(); // Get the actual value from the isolate
|
||||
|
||||
if (args && isObject(args) && "new_prompt_function" in args) {
|
||||
newContructionFn = await formatPromptConstructor(args.new_prompt_function as string);
|
||||
newConstructionFn = await formatPromptConstructor(args.new_prompt_function as string);
|
||||
break;
|
||||
}
|
||||
} catch (e) {
|
||||
@@ -145,5 +145,5 @@ const requestUpdatedPromptFunction = async (
|
||||
}
|
||||
}
|
||||
|
||||
return newContructionFn;
|
||||
return newConstructionFn;
|
||||
};
|
||||
|
||||
@@ -17,13 +17,7 @@ try {
|
||||
// Set a dummy key so it doesn't fail at build time
|
||||
config = {
|
||||
apiKey: env.OPENAI_API_KEY ?? "dummy-key",
|
||||
openpipe: {
|
||||
apiKey: env.OPENPIPE_API_KEY,
|
||||
baseUrl: "http://localhost:3000/api/v1",
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
// export const openai = env.OPENPIPE_API_KEY ? new OpenAI.OpenAI(config) : new OriginalOpenAI(config);
|
||||
|
||||
export const openai = new OpenAI(config);
|
||||
|
||||
37
app/src/state/columnVisiblitySlice.ts
Normal file
37
app/src/state/columnVisiblitySlice.ts
Normal file
@@ -0,0 +1,37 @@
|
||||
import { type SliceCreator } from "./store";
|
||||
|
||||
export const comparators = ["=", "!=", "CONTAINS", "NOT_CONTAINS"] as const;
|
||||
|
||||
export const defaultFilterableFields = ["Request", "Response", "Model", "Status Code"] as const;
|
||||
|
||||
export enum StaticColumnKeys {
|
||||
SENT_AT = "sentAt",
|
||||
MODEL = "model",
|
||||
DURATION = "duration",
|
||||
INPUT_TOKENS = "inputTokens",
|
||||
OUTPUT_TOKENS = "outputTokens",
|
||||
STATUS_CODE = "statusCode",
|
||||
}
|
||||
|
||||
export type ColumnVisibilitySlice = {
|
||||
visibleColumns: Set<string>;
|
||||
toggleColumnVisibility: (columnKey: string) => void;
|
||||
showAllColumns: (columnKeys: string[]) => void;
|
||||
};
|
||||
|
||||
export const createColumnVisibilitySlice: SliceCreator<ColumnVisibilitySlice> = (set, get) => ({
|
||||
// initialize with all static columns visible
|
||||
visibleColumns: new Set(Object.values(StaticColumnKeys)),
|
||||
toggleColumnVisibility: (columnKey: string) =>
|
||||
set((state) => {
|
||||
if (state.columnVisibility.visibleColumns.has(columnKey)) {
|
||||
state.columnVisibility.visibleColumns.delete(columnKey);
|
||||
} else {
|
||||
state.columnVisibility.visibleColumns.add(columnKey);
|
||||
}
|
||||
}),
|
||||
showAllColumns: (columnKeys: string[]) =>
|
||||
set((state) => {
|
||||
state.columnVisibility.visibleColumns = new Set(columnKeys);
|
||||
}),
|
||||
});
|
||||
23
app/src/state/featureFlags.ts
Normal file
23
app/src/state/featureFlags.ts
Normal file
@@ -0,0 +1,23 @@
|
||||
import { type SliceCreator } from "./store";
|
||||
|
||||
export type FeatureFlagsSlice = {
|
||||
flagsLoaded: boolean;
|
||||
featureFlags: {
|
||||
betaAccess: boolean;
|
||||
};
|
||||
setFeatureFlags: (flags: string[] | undefined) => void;
|
||||
};
|
||||
|
||||
export const createFeatureFlagsSlice: SliceCreator<FeatureFlagsSlice> = (set) => ({
|
||||
flagsLoaded: false,
|
||||
featureFlags: {
|
||||
betaAccess: false,
|
||||
},
|
||||
setFeatureFlags: (flags) =>
|
||||
set((state) => {
|
||||
state.featureFlags.featureFlags = {
|
||||
betaAccess: flags?.includes("betaAccess") ?? false,
|
||||
};
|
||||
state.featureFlags.flagsLoaded = true;
|
||||
}),
|
||||
});
|
||||
@@ -1,13 +1,27 @@
|
||||
import { type PersistOptions } from "zustand/middleware/persist";
|
||||
import { type State } from "./store";
|
||||
import SuperJSON from "superjson";
|
||||
import { merge, pick } from "lodash-es";
|
||||
import { type PartialDeep } from "type-fest";
|
||||
|
||||
export const stateToPersist = {
|
||||
selectedProjectId: null as string | null,
|
||||
};
|
||||
export type PersistedState = PartialDeep<State>;
|
||||
|
||||
export const persistOptions: PersistOptions<State, typeof stateToPersist> = {
|
||||
export const persistOptions: PersistOptions<State, PersistedState> = {
|
||||
name: "persisted-app-store",
|
||||
partialize: (state) => ({
|
||||
selectedProjectId: state.selectedProjectId,
|
||||
columnVisibility: pick(state.columnVisibility, ["visibleColumns"]),
|
||||
}),
|
||||
merge: (saved, state) => merge(state, saved),
|
||||
storage: {
|
||||
getItem: (key) => {
|
||||
const data = localStorage.getItem(key);
|
||||
return data ? SuperJSON.parse(data) : null;
|
||||
},
|
||||
setItem: (key, value) => localStorage.setItem(key, SuperJSON.stringify(value)),
|
||||
removeItem: (key) => localStorage.removeItem(key),
|
||||
},
|
||||
onRehydrateStorage: (state) => {
|
||||
if (state) state.isRehydrated = true;
|
||||
},
|
||||
};
|
||||
|
||||
@@ -8,13 +8,16 @@ import {
|
||||
createVariantEditorSlice,
|
||||
} from "./sharedVariantEditor.slice";
|
||||
import { type APIClient } from "~/utils/api";
|
||||
import { persistOptions, type stateToPersist } from "./persist";
|
||||
import { type PersistedState, persistOptions } from "./persist";
|
||||
import { type SelectedLogsSlice, createSelectedLogsSlice } from "./selectedLogsSlice";
|
||||
import { type LogFiltersSlice, createLogFiltersSlice } from "./logFiltersSlice";
|
||||
import { type ColumnVisibilitySlice, createColumnVisibilitySlice } from "./columnVisiblitySlice";
|
||||
import { type FeatureFlagsSlice, createFeatureFlagsSlice } from "./featureFlags";
|
||||
|
||||
enableMapSet();
|
||||
|
||||
export type State = {
|
||||
isRehydrated: boolean;
|
||||
drawerOpen: boolean;
|
||||
openDrawer: () => void;
|
||||
closeDrawer: () => void;
|
||||
@@ -25,6 +28,8 @@ export type State = {
|
||||
setSelectedProjectId: (id: string) => void;
|
||||
selectedLogs: SelectedLogsSlice;
|
||||
logFilters: LogFiltersSlice;
|
||||
columnVisibility: ColumnVisibilitySlice;
|
||||
featureFlags: FeatureFlagsSlice;
|
||||
};
|
||||
|
||||
export type SliceCreator<T> = StateCreator<State, [["zustand/immer", never]], [], T>;
|
||||
@@ -32,18 +37,15 @@ export type SliceCreator<T> = StateCreator<State, [["zustand/immer", never]], []
|
||||
export type SetFn = Parameters<SliceCreator<unknown>>[0];
|
||||
export type GetFn = Parameters<SliceCreator<unknown>>[1];
|
||||
|
||||
const useBaseStore = create<
|
||||
State,
|
||||
[["zustand/persist", typeof stateToPersist], ["zustand/immer", never]]
|
||||
>(
|
||||
const useBaseStore = create<State, [["zustand/persist", PersistedState], ["zustand/immer", never]]>(
|
||||
persist(
|
||||
immer((set, get, ...rest) => ({
|
||||
isRehydrated: false,
|
||||
api: null,
|
||||
setApi: (api) =>
|
||||
set((state) => {
|
||||
state.api = api;
|
||||
}),
|
||||
|
||||
drawerOpen: false,
|
||||
openDrawer: () =>
|
||||
set((state) => {
|
||||
@@ -61,6 +63,8 @@ const useBaseStore = create<
|
||||
}),
|
||||
selectedLogs: createSelectedLogsSlice(set, get, ...rest),
|
||||
logFilters: createLogFiltersSlice(set, get, ...rest),
|
||||
columnVisibility: createColumnVisibilitySlice(set, get, ...rest),
|
||||
featureFlags: createFeatureFlagsSlice(set, get, ...rest),
|
||||
})),
|
||||
persistOptions,
|
||||
),
|
||||
|
||||
@@ -78,33 +78,6 @@ export const requireCanModifyProject = async (projectId: string, ctx: TRPCContex
|
||||
}
|
||||
};
|
||||
|
||||
export const requireCanViewDataset = async (datasetId: string, ctx: TRPCContext) => {
|
||||
ctx.markAccessControlRun();
|
||||
|
||||
const dataset = await prisma.dataset.findFirst({
|
||||
where: {
|
||||
id: datasetId,
|
||||
project: {
|
||||
projectUsers: {
|
||||
some: {
|
||||
role: { in: [ProjectUserRole.ADMIN, ProjectUserRole.MEMBER] },
|
||||
userId: ctx.session?.user.id,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
if (!dataset) {
|
||||
throw new TRPCError({ code: "UNAUTHORIZED" });
|
||||
}
|
||||
};
|
||||
|
||||
export const requireCanModifyDataset = async (datasetId: string, ctx: TRPCContext) => {
|
||||
// Right now all users who can view a dataset can also modify it
|
||||
await requireCanViewDataset(datasetId, ctx);
|
||||
};
|
||||
|
||||
export const requireCanViewExperiment = (experimentId: string, ctx: TRPCContext): Promise<void> => {
|
||||
// Right now all experiments are publicly viewable, so this is a no-op.
|
||||
ctx.markAccessControlRun();
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
"use client";
|
||||
import { useSession } from "next-auth/react";
|
||||
import React, { type ReactNode, useEffect } from "react";
|
||||
import { PostHogProvider } from "posthog-js/react";
|
||||
import { PostHogProvider, useActiveFeatureFlags } from "posthog-js/react";
|
||||
|
||||
import posthog from "posthog-js";
|
||||
import { env } from "~/env.mjs";
|
||||
import { useRouter } from "next/router";
|
||||
import { useAppStore } from "~/state/store";
|
||||
|
||||
// Make sure we're in the browser
|
||||
const inBrowser = typeof window !== "undefined";
|
||||
@@ -24,6 +25,14 @@ export const PosthogAppProvider = ({ children }: { children: ReactNode }) => {
|
||||
};
|
||||
}, [router.events]);
|
||||
|
||||
const setFeatureFlags = useAppStore((s) => s.featureFlags.setFeatureFlags);
|
||||
const activeFlags = useActiveFeatureFlags();
|
||||
useEffect(() => {
|
||||
if (activeFlags) {
|
||||
setFeatureFlags(activeFlags);
|
||||
}
|
||||
}, [activeFlags, setFeatureFlags]);
|
||||
|
||||
useEffect(() => {
|
||||
if (env.NEXT_PUBLIC_POSTHOG_KEY && inBrowser && session && session.user) {
|
||||
posthog.init(env.NEXT_PUBLIC_POSTHOG_KEY, {
|
||||
|
||||
@@ -26,34 +26,6 @@ export const useExperimentAccess = () => {
|
||||
return useExperiment().data?.access ?? { canView: false, canModify: false };
|
||||
};
|
||||
|
||||
export const useDatasets = () => {
|
||||
const selectedProjectId = useAppStore((state) => state.selectedProjectId);
|
||||
return api.datasets.list.useQuery(
|
||||
{ projectId: selectedProjectId ?? "" },
|
||||
{ enabled: !!selectedProjectId },
|
||||
);
|
||||
};
|
||||
|
||||
export const useDataset = () => {
|
||||
const router = useRouter();
|
||||
const dataset = api.datasets.get.useQuery(
|
||||
{ id: router.query.id as string },
|
||||
{ enabled: !!router.query.id },
|
||||
);
|
||||
|
||||
return dataset;
|
||||
};
|
||||
|
||||
export const useDatasetEntries = () => {
|
||||
const dataset = useDataset();
|
||||
const { page, pageSize } = usePageParams();
|
||||
|
||||
return api.datasetEntries.list.useQuery(
|
||||
{ datasetId: dataset.data?.id ?? "", page, pageSize },
|
||||
{ enabled: dataset.data?.id != null },
|
||||
);
|
||||
};
|
||||
|
||||
type AsyncFunction<T extends unknown[], U> = (...args: T) => Promise<U>;
|
||||
|
||||
export function useHandledAsyncCallback<T extends unknown[], U>(
|
||||
@@ -205,3 +177,22 @@ export const useTagNames = () => {
|
||||
{ enabled: !!selectedProjectId },
|
||||
);
|
||||
};
|
||||
|
||||
export const useFineTunes = () => {
|
||||
const selectedProjectId = useAppStore((state) => state.selectedProjectId);
|
||||
const { page, pageSize } = usePageParams();
|
||||
|
||||
return api.fineTunes.list.useQuery(
|
||||
{ projectId: selectedProjectId ?? "", page, pageSize },
|
||||
{ enabled: !!selectedProjectId },
|
||||
);
|
||||
};
|
||||
|
||||
export const useIsClientRehydrated = () => {
|
||||
const isRehydrated = useAppStore((state) => state.isRehydrated);
|
||||
const [isMounted, setIsMounted] = useState(false);
|
||||
useEffect(() => {
|
||||
setIsMounted(true);
|
||||
}, []);
|
||||
return isRehydrated && isMounted;
|
||||
};
|
||||
|
||||
40
client-libs/python/README.md
Normal file
40
client-libs/python/README.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# OpenPipe Python Client
|
||||
|
||||
This client allows you automatically report your OpenAI calls to [OpenPipe](https://openpipe.ai/). OpenPipe
|
||||
|
||||
## Installation
|
||||
`pip install openpipe`
|
||||
|
||||
## Usage
|
||||
|
||||
1. Create a project at https://app.openpipe.ai
|
||||
2. Find your project's API key at https://app.openpipe.ai/project/settings
|
||||
3. Configure the OpenPipe client as shown below.
|
||||
|
||||
```python
|
||||
from openpipe import openai, configure_openpipe
|
||||
import os
|
||||
|
||||
# Set the OpenPipe API key you got in step (3) above.
|
||||
# If you have the `OPENPIPE_API_KEY` environment variable set we'll read from it by default.
|
||||
configure_openpipe(api_key=os.getenv("OPENPIPE_API_KEY"))
|
||||
|
||||
# Configure OpenAI the same way you would normally
|
||||
openai.api_key = os.getenv("OPENAI_API_KEY")
|
||||
```
|
||||
|
||||
You can use the OpenPipe client for normal
|
||||
|
||||
## Special Features
|
||||
|
||||
### Tagging
|
||||
|
||||
OpenPipe has a concept of "tagging." This is very useful for grouping a certain set of completions together. When you're using a dataset for fine-tuning, you can select all the prompts that match a certain set of tags. Here's how you can use the tagging feature:
|
||||
|
||||
```python
|
||||
completion = openai.ChatCompletion.create(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=[{"role": "system", "content": "count to 10"}],
|
||||
openpipe={"tags": {"prompt_id": "counting"}},
|
||||
)
|
||||
```
|
||||
202
client-libs/python/openpipe/LICENSE
Normal file
202
client-libs/python/openpipe/LICENSE
Normal file
@@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
@@ -4,7 +4,7 @@ import time
|
||||
import inspect
|
||||
|
||||
from openpipe.merge_openai_chunks import merge_openai_chunks
|
||||
from openpipe.openpipe_meta import OpenPipeMeta
|
||||
from openpipe.openpipe_meta import openpipe_meta
|
||||
|
||||
from .shared import (
|
||||
_should_check_cache,
|
||||
@@ -41,9 +41,11 @@ class WrappedChatCompletion(original_openai.ChatCompletion):
|
||||
)
|
||||
|
||||
cache_status = (
|
||||
"MISS" if _should_check_cache(openpipe_options) else "SKIP"
|
||||
"MISS"
|
||||
if _should_check_cache(openpipe_options, kwargs)
|
||||
else "SKIP"
|
||||
)
|
||||
chunk.openpipe = OpenPipeMeta(cache_status=cache_status)
|
||||
chunk.openpipe = openpipe_meta(cache_status=cache_status)
|
||||
|
||||
yield chunk
|
||||
|
||||
@@ -72,9 +74,9 @@ class WrappedChatCompletion(original_openai.ChatCompletion):
|
||||
)
|
||||
|
||||
cache_status = (
|
||||
"MISS" if _should_check_cache(openpipe_options) else "SKIP"
|
||||
"MISS" if _should_check_cache(openpipe_options, kwargs) else "SKIP"
|
||||
)
|
||||
chat_completion["openpipe"] = OpenPipeMeta(cache_status=cache_status)
|
||||
chat_completion["openpipe"] = openpipe_meta(cache_status=cache_status)
|
||||
return chat_completion
|
||||
except Exception as e:
|
||||
received_at = int(time.time() * 1000)
|
||||
@@ -126,9 +128,11 @@ class WrappedChatCompletion(original_openai.ChatCompletion):
|
||||
assembled_completion, chunk
|
||||
)
|
||||
cache_status = (
|
||||
"MISS" if _should_check_cache(openpipe_options) else "SKIP"
|
||||
"MISS"
|
||||
if _should_check_cache(openpipe_options, kwargs)
|
||||
else "SKIP"
|
||||
)
|
||||
chunk.openpipe = OpenPipeMeta(cache_status=cache_status)
|
||||
chunk.openpipe = openpipe_meta(cache_status=cache_status)
|
||||
|
||||
yield chunk
|
||||
|
||||
@@ -157,9 +161,9 @@ class WrappedChatCompletion(original_openai.ChatCompletion):
|
||||
)
|
||||
|
||||
cache_status = (
|
||||
"MISS" if _should_check_cache(openpipe_options) else "SKIP"
|
||||
"MISS" if _should_check_cache(openpipe_options, kwargs) else "SKIP"
|
||||
)
|
||||
chat_completion["openpipe"] = OpenPipeMeta(cache_status=cache_status)
|
||||
chat_completion["openpipe"] = openpipe_meta(cache_status=cache_status)
|
||||
|
||||
return chat_completion
|
||||
except Exception as e:
|
||||
|
||||
@@ -1,7 +1,2 @@
|
||||
from attr import dataclass
|
||||
|
||||
|
||||
@dataclass
|
||||
class OpenPipeMeta:
|
||||
# Cache status. One of 'HIT', 'MISS', 'SKIP'
|
||||
cache_status: str
|
||||
def openpipe_meta(cache_status: str):
|
||||
return {"cache_status": cache_status}
|
||||
|
||||
@@ -8,6 +8,7 @@ from openpipe.api_client.models.report_json_body_tags import (
|
||||
)
|
||||
import toml
|
||||
import time
|
||||
import os
|
||||
|
||||
version = toml.load("pyproject.toml")["tool"]["poetry"]["version"]
|
||||
|
||||
@@ -15,6 +16,9 @@ configured_client = AuthenticatedClient(
|
||||
base_url="https://app.openpipe.ai/api/v1", token=""
|
||||
)
|
||||
|
||||
if os.environ.get("OPENPIPE_API_KEY"):
|
||||
configured_client.token = os.environ["OPENPIPE_API_KEY"]
|
||||
|
||||
|
||||
def _get_tags(openpipe_options):
|
||||
tags = openpipe_options.get("tags") or {}
|
||||
|
||||
@@ -27,12 +27,14 @@ def last_logged_call():
|
||||
return local_testing_only_get_latest_logged_call.sync(client=configured_client)
|
||||
|
||||
|
||||
@pytest.mark.focus
|
||||
def test_sync():
|
||||
completion = openai.ChatCompletion.create(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=[{"role": "system", "content": "count to 3"}],
|
||||
)
|
||||
|
||||
print("completion is", completion)
|
||||
last_logged = last_logged_call()
|
||||
assert (
|
||||
last_logged.model_response.resp_payload["choices"][0]["message"]["content"]
|
||||
@@ -42,7 +44,7 @@ def test_sync():
|
||||
last_logged.model_response.req_payload["messages"][0]["content"] == "count to 3"
|
||||
)
|
||||
|
||||
assert completion.openpipe.cache_status == "SKIP"
|
||||
assert completion.openpipe["cache_status"] == "SKIP"
|
||||
|
||||
|
||||
def test_streaming():
|
||||
@@ -75,7 +77,7 @@ async def test_async():
|
||||
== "count down from 5"
|
||||
)
|
||||
|
||||
assert completion.openpipe.cache_status == "SKIP"
|
||||
assert completion.openpipe["cache_status"] == "SKIP"
|
||||
|
||||
|
||||
async def test_async_streaming():
|
||||
@@ -87,7 +89,7 @@ async def test_async_streaming():
|
||||
|
||||
merged = None
|
||||
async for chunk in completion:
|
||||
assert chunk.openpipe.cache_status == "SKIP"
|
||||
assert chunk.openpipe["cache_status"] == "SKIP"
|
||||
merged = merge_openai_chunks(merged, chunk)
|
||||
|
||||
last_logged = last_logged_call()
|
||||
@@ -100,7 +102,7 @@ async def test_async_streaming():
|
||||
last_logged.model_response.req_payload["messages"][0]["content"]
|
||||
== "count down from 5"
|
||||
)
|
||||
assert merged["openpipe"].cache_status == "SKIP"
|
||||
assert merged["openpipe"]["cache_status"] == "SKIP"
|
||||
|
||||
|
||||
def test_sync_with_tags():
|
||||
@@ -146,7 +148,7 @@ async def test_caching():
|
||||
messages=messages,
|
||||
openpipe={"cache": True},
|
||||
)
|
||||
assert completion.openpipe.cache_status == "MISS"
|
||||
assert completion.openpipe["cache_status"] == "MISS"
|
||||
|
||||
first_logged = last_logged_call()
|
||||
assert (
|
||||
@@ -159,4 +161,4 @@ async def test_caching():
|
||||
messages=messages,
|
||||
openpipe={"cache": True},
|
||||
)
|
||||
assert completion2.openpipe.cache_status == "HIT"
|
||||
assert completion2.openpipe["cache_status"] == "HIT"
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
[tool.poetry]
|
||||
name = "openpipe"
|
||||
version = "0.1.0"
|
||||
description = ""
|
||||
authors = ["Kyle Corbitt <kyle@corbt.com>"]
|
||||
version = "3.0.1"
|
||||
description = "Python client library for the OpenPipe service"
|
||||
authors = ["Kyle Corbitt <kyle@openpipe.ai>"]
|
||||
license = "Apache-2.0"
|
||||
readme = "README.md"
|
||||
homepage = "https://github.com/OpenPipe/OpenPipe"
|
||||
repository = "https://github.com/OpenPipe/OpenPipe"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.9"
|
||||
|
||||
6
examples/.gitignore
vendored
Normal file
6
examples/.gitignore
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
axolotl/
|
||||
models/
|
||||
data/
|
||||
wandb/
|
||||
cache/
|
||||
.ipynb_checkpoints/
|
||||
7
examples/classify-recipes/.env.example
Normal file
7
examples/classify-recipes/.env.example
Normal file
@@ -0,0 +1,7 @@
|
||||
OPENAI_API_KEY="[your OpenAI API key]"
|
||||
OPENPIPE_API_KEY="[your OpenPipe API key from https://app.openpipe.ai/project/settings]"
|
||||
|
||||
# You'll need this to download the Llama 2 weights from Hugging Face
|
||||
HUGGING_FACE_HUB_TOKEN="[Your Hugging Face Hub token]"
|
||||
|
||||
WANDB_API_KEY="[Optionally, you can set a Weights & Biases API key to track your training run. Create it at https://wandb.ai/settings]"
|
||||
317
examples/classify-recipes/1-generate-data.ipynb
Normal file
317
examples/classify-recipes/1-generate-data.ipynb
Normal file
@@ -0,0 +1,317 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In this notebook I'm using the OpenPipe client to capture a set of calls to the OpenAI API.\n",
|
||||
"\n",
|
||||
"For this example I'll blithely throw engineering best practices to the wind and use the notebook itself to manage dependencies. 😁\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%capture\n",
|
||||
"%pip install openpipe==3.0.3 python-dotenv==1.0.0 datasets==2.14.4"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"When working with remote datasets (or any data, really), it's a good idea to visually inspect some samples to make sure it looks like you expect. I'll print a recipe.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Recipe dataset shape:\n",
|
||||
"------------------\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Dataset({\n",
|
||||
" features: ['recipe'],\n",
|
||||
" num_rows: 5000\n",
|
||||
"})"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"First recipe:\n",
|
||||
"------------------ Shrimp Creole\n",
|
||||
"\n",
|
||||
"Ingredients:\n",
|
||||
"- 20 shrimp (8 oz.)\n",
|
||||
"- 2 c. (16 oz. can) tomato sauce\n",
|
||||
"- 1 small onion, chopped\n",
|
||||
"- 1 celery stalk, chopped\n",
|
||||
"- 1/4 green bell pepper, diced\n",
|
||||
"- 1/4 c. sliced mushrooms\n",
|
||||
"- 3 Tbsp. parsley\n",
|
||||
"- 1/2 tsp. pepper\n",
|
||||
"- 1 to 1-1/2 c. brown rice, prepared according to pkg. directions (not included in exchanges)\n",
|
||||
"\n",
|
||||
"Directions:\n",
|
||||
"- Peel, devein and wash shrimp; set aside.\n",
|
||||
"- (If shrimp are frozen, let thaw first in the refrigerator.)\n",
|
||||
"- Simmer tomato sauce, onion, celery, green pepper, mushrooms, parsley and pepper in skillet for 30 minutes.\n",
|
||||
"- Add shrimp and cook 10 to 15 minutes more, until shrimp are tender.\n",
|
||||
"- Serve over brown rice.\n",
|
||||
"- Serves 2.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from datasets import load_dataset\n",
|
||||
"\n",
|
||||
"recipes = load_dataset(\"corbt/unlabeled-recipes\")[\"train\"]\n",
|
||||
"print(\"Recipe dataset shape:\\n------------------\")\n",
|
||||
"display(recipes)\n",
|
||||
"print(\"First recipe:\\n------------------\", recipes[\"recipe\"][0])\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Mm, delicious. Anyway, we need to generate a training dataset. We'll call GPT-4 on each of our examples.\n",
|
||||
"\n",
|
||||
"In this case, I'll ask GPT-4 to classify each recipe along 5 dimensions:\n",
|
||||
"\n",
|
||||
"- has_non_fish_meat\n",
|
||||
"- requires_oven\n",
|
||||
"- requires_stove\n",
|
||||
"- cook_time_over_30_mins\n",
|
||||
"- main_dish\n",
|
||||
"\n",
|
||||
"That looks like a pretty random list, but there's actually an important unifying thread: I'm looking for meals that my pescatarian brother/co-founder can make in his kitchen-less, near-window-less basement apartment in San Francisco! (If you haven't tried to get an apartment in SF you probably think I'm joking 😂.)\n",
|
||||
"\n",
|
||||
"I'll use [OpenPipe](https://github.com/openpipe/openpipe) to track the API calls and form a training dataset. To follow along you'll need to create a free OpenPipe account, then copy your API key from https://app.openpipe.ai/project/settings into a file called `.env`. You can see an example in [./.env.example](./.env.example).\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Classifying first recipe:\n",
|
||||
"------------------\n",
|
||||
"{'has_non_fish_meat': False, 'requires_oven': False, 'requires_stove': True, 'cook_time_over_30_mins': True, 'main_dish': True}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from openpipe import openai, configure_openpipe\n",
|
||||
"import json\n",
|
||||
"import os\n",
|
||||
"import dotenv\n",
|
||||
"\n",
|
||||
"# Use `dotenv` to load the contents of the `.env` file into the environment\n",
|
||||
"dotenv.load_dotenv()\n",
|
||||
"\n",
|
||||
"# Configure OpenPipe using the API key from the environment\n",
|
||||
"configure_openpipe(api_key=os.environ[\"OPENPIPE_API_KEY\"])\n",
|
||||
"\n",
|
||||
"# Configure OpenAI using the API key from the environment\n",
|
||||
"openai.api_key = os.environ[\"OPENAI_API_KEY\"]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def classify_recipe(recipe: str):\n",
|
||||
" completion = openai.ChatCompletion.create(\n",
|
||||
" model=\"gpt-4\",\n",
|
||||
" messages=[\n",
|
||||
" {\n",
|
||||
" \"role\": \"system\",\n",
|
||||
" \"content\": \"Your goal is to classify a recipe along several dimensions.Pay attention to the instructions.\",\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": recipe,\n",
|
||||
" },\n",
|
||||
" ],\n",
|
||||
" functions=[\n",
|
||||
" {\n",
|
||||
" \"name\": \"classify\",\n",
|
||||
" \"parameters\": {\n",
|
||||
" \"type\": \"object\",\n",
|
||||
" \"properties\": {\n",
|
||||
" \"has_non_fish_meat\": {\n",
|
||||
" \"type\": \"boolean\",\n",
|
||||
" \"description\": \"True if the recipe contains any meat or meat products (eg. chicken broth) besides fish\",\n",
|
||||
" },\n",
|
||||
" \"requires_oven\": {\n",
|
||||
" \"type\": \"boolean\",\n",
|
||||
" \"description\": \"True if the recipe requires an oven\",\n",
|
||||
" },\n",
|
||||
" \"requires_stove\": {\n",
|
||||
" \"type\": \"boolean\",\n",
|
||||
" \"description\": \"True if the recipe requires a stove\",\n",
|
||||
" },\n",
|
||||
" \"cook_time_over_30_mins\": {\n",
|
||||
" \"type\": \"boolean\",\n",
|
||||
" \"description\": \"True if the recipe takes over 30 minutes to prepare and cook, including waiting time\",\n",
|
||||
" },\n",
|
||||
" \"main_dish\": {\n",
|
||||
" \"type\": \"boolean\",\n",
|
||||
" \"description\": \"True if the recipe can be served as a main dish\",\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" \"required\": [\n",
|
||||
" \"has_non_fish_meat\",\n",
|
||||
" \"requires_oven\",\n",
|
||||
" \"requires_stove\",\n",
|
||||
" \"cook_time_over_30_mins\",\n",
|
||||
" \"main_dish\",\n",
|
||||
" ],\n",
|
||||
" },\n",
|
||||
" }\n",
|
||||
" ],\n",
|
||||
" function_call={\n",
|
||||
" \"name\": \"classify\",\n",
|
||||
" },\n",
|
||||
" openpipe={\"tags\": {\"prompt_id\": \"classify-recipe\"}, \"cache\": True},\n",
|
||||
" )\n",
|
||||
" return json.loads(completion.choices[0].message.function_call.arguments)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"print(\"Classifying first recipe:\\n------------------\")\n",
|
||||
"print(classify_recipe(recipes[\"recipe\"][0]))\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"That's working, so I'll go ahead and classify all 5000 recipes with GPT-4. Using GPT-4 for this is slowwww and costs about $40. The model I'm fine-tuning will be much faster -- we'll see if we can make it as good!\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Classifying recipe 0/5000: Shrimp Creole\n",
|
||||
"Classifying recipe 100/5000: Spoon Bread\n",
|
||||
"Classifying recipe 200/5000: Quadrangle Grille'S Pumpkin-Walnut Cheesecake\n",
|
||||
"Classifying recipe 300/5000: Broccoli Casserole\n",
|
||||
"Classifying recipe 400/5000: Paal Payasam (3-Ingredient Rice Pudding)\n",
|
||||
"Classifying recipe 500/5000: Dirt Dessert\n",
|
||||
"Classifying recipe 600/5000: Dolma, Stuffed Dried Peppers And Eggplants\n",
|
||||
"Classifying recipe 700/5000: Party Pecan Pies\n",
|
||||
"Classifying recipe 800/5000: Pie Crust\n",
|
||||
"Classifying recipe 900/5000: Russian Dressing(Salad Dressing) \n",
|
||||
"Classifying recipe 1000/5000: O'Brien Potatoes\n",
|
||||
"Classifying recipe 1100/5000: Monster Cookies\n",
|
||||
"Classifying recipe 1200/5000: Striped Fruit Pops\n",
|
||||
"Classifying recipe 1300/5000: Cute Heart-Shaped Fried Egg\n",
|
||||
"Classifying recipe 1400/5000: Steak Marinade\n",
|
||||
"Classifying recipe 1500/5000: Bbq Sauce For Fish Recipe\n",
|
||||
"Classifying recipe 1600/5000: Barbecue Ranch Salad\n",
|
||||
"Classifying recipe 1700/5000: White Fudge\n",
|
||||
"Classifying recipe 1800/5000: Seaton Chocolate Chip Cookies\n",
|
||||
"Classifying recipe 1900/5000: Beef Stroganoff\n",
|
||||
"Classifying recipe 2000/5000: Lemon Delight\n",
|
||||
"Classifying recipe 2100/5000: Cream Cheese Chicken Chili\n",
|
||||
"Classifying recipe 2200/5000: Bean Salad\n",
|
||||
"Classifying recipe 2300/5000: Green Beans Almondine\n",
|
||||
"Classifying recipe 2400/5000: Radish-And-Avocado Salad\n",
|
||||
"Classifying recipe 2500/5000: Salsa Rojo\n",
|
||||
"Classifying recipe 2600/5000: Pepperoni Bread\n",
|
||||
"Classifying recipe 2700/5000: Sabzi Polow\n",
|
||||
"Classifying recipe 2800/5000: Italian Vegetable Pizzas\n",
|
||||
"Classifying recipe 2900/5000: Hot Fudge Sauce, Soda Shop Style\n",
|
||||
"Classifying recipe 3000/5000: Meatball Soup With Vegetables And Brown Rice\n",
|
||||
"Classifying recipe 3100/5000: Herbed Potatoes And Onions\n",
|
||||
"Classifying recipe 3200/5000: Apple Crunch Pie (2 Extra Servings)\n",
|
||||
"Classifying recipe 3300/5000: Pineapple-Orange Punch\n",
|
||||
"Classifying recipe 3400/5000: Turkey Veggie Burgers With Avocado Mayo\n",
|
||||
"Classifying recipe 3500/5000: Pear & Goat Cheese Salad\n",
|
||||
"Classifying recipe 3600/5000: Triple Chocolate Cookies\n",
|
||||
"Classifying recipe 3700/5000: Strawberry Banana Yogurt Pops\n",
|
||||
"Classifying recipe 3800/5000: Chicken Croquettes\n",
|
||||
"Classifying recipe 3900/5000: Mushroom Casserole\n",
|
||||
"Classifying recipe 4000/5000: Vegetarian Summer Roll\n",
|
||||
"Classifying recipe 4100/5000: Prune Cake\n",
|
||||
"Classifying recipe 4200/5000: Strawberry Sorbet\n",
|
||||
"Classifying recipe 4300/5000: Lemonade Chicken\n",
|
||||
"Classifying recipe 4400/5000: Crock-Pot Vegetarian Chili\n",
|
||||
"Classifying recipe 4500/5000: Grandma Dickrell'S Molasses Cake - 1936\n",
|
||||
"Classifying recipe 4600/5000: Creamed Corn Casserole\n",
|
||||
"Classifying recipe 4700/5000: Homemade Croutons\n",
|
||||
"Classifying recipe 4800/5000: Potatoes With Leeks And Gruyere\n",
|
||||
"Classifying recipe 4900/5000: Chocolate Oatmeal Cookie\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for i, recipe in enumerate(recipes[\"recipe\"]):\n",
|
||||
" if i % 100 == 0:\n",
|
||||
" recipe_title = recipe.split(\"\\n\")[0]\n",
|
||||
" print(f\"Classifying recipe {i}/{len(recipes)}: {recipe_title}\")\n",
|
||||
" try:\n",
|
||||
" classify_recipe(recipe)\n",
|
||||
" except Exception as e:\n",
|
||||
" print(f\"Error classifying recipe {i}: {e}\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Ok, now that my recipes are classified I'll download the training data.\n",
|
||||
"\n",
|
||||
"Next up I'll train the model -- check out [./train.ipynb](./train.ipynb) for details! Just go to https://app.openpipe.ai/request-logs, select all the logs you created, and click \"Export\". The default 10% testing split is fine for this dataset size.\n",
|
||||
"\n",
|
||||
"I got two files from that: `train.jsonl` and `test.jsonl`. I moved both of them into this repository under `./data/`.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
947
examples/classify-recipes/2-train.ipynb
Normal file
947
examples/classify-recipes/2-train.ipynb
Normal file
@@ -0,0 +1,947 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now let's get to the fun part -- training a model. I'll start by installing the dependencies.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%capture\n",
|
||||
"%pip install peft==0.5.0 python-dotenv==2.0.0\n",
|
||||
"\n",
|
||||
"!git clone https://github.com/OpenAccess-AI-Collective/axolotl\n",
|
||||
"%pip install -e \"./axolotl[flash-attn]\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note to the reader: since we'll be basing our fine-tuned model on Meta's Llama 2, you need to apply for access to the weights (which will be automatically granted). Follow the steps on [HuggingFace](https://huggingface.co/meta-llama/Llama-2-7b-hf), then create a read-only access token [here](https://huggingface.co/settings/tokens) and copy it into your .env file.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Hugging Face token set: True\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import dotenv\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"dotenv.load_dotenv()\n",
|
||||
"\n",
|
||||
"has_token = os.getenv(\"HUGGING_FACE_HUB_TOKEN\") is not None\n",
|
||||
"\n",
|
||||
"print(f\"Hugging Face token set: {has_token}\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"I'll use the [axolotl](https://github.com/OpenAccess-AI-Collective/axolotl) library to manage this training run. It includes a lot of neat tricks that speed up training without sacrificing quality.\n",
|
||||
"\n",
|
||||
"In this case I'm using 8-bit training to use less GPU RAM, and sample packing to maximize GPU utilization. You can read more about the available options at https://github.com/OpenAccess-AI-Collective/axolotl.\n",
|
||||
"\n",
|
||||
"The training run options are defined in [training-config.yaml](./training-config.yaml).\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The following values were not passed to `accelerate launch` and had defaults used instead:\n",
|
||||
"\t`--num_processes` was set to a value of `1`\n",
|
||||
"\t`--num_machines` was set to a value of `1`\n",
|
||||
"\t`--mixed_precision` was set to a value of `'no'`\n",
|
||||
"\t`--dynamo_backend` was set to a value of `'no'`\n",
|
||||
"To avoid this warning pass in values for each of the problematic parameters or run `accelerate config`.\n",
|
||||
"\n",
|
||||
" dP dP dP\n",
|
||||
" 88 88 88\n",
|
||||
".d8888b. dP. .dP .d8888b. 88 .d8888b. d8888P 88\n",
|
||||
"88' `88 `8bd8' 88' `88 88 88' `88 88 88\n",
|
||||
"88. .88 .d88b. 88. .88 88 88. .88 88 88\n",
|
||||
"`88888P8 dP' `dP `88888P' dP `88888P' dP dP\n",
|
||||
"\n",
|
||||
"[2023-08-24 20:18:54,867] [INFO] [axolotl.normalize_config:72] [PID:125016] GPU memory usage baseline: 0.000GB (+0.674GB misc)\u001b[39m\n",
|
||||
"[2023-08-24 20:18:54,867] [INFO] [axolotl.scripts.train:189] [PID:125016] loading tokenizer... meta-llama/Llama-2-7b-hf\u001b[39m\n",
|
||||
"[2023-08-24 20:18:55,078] [DEBUG] [axolotl.load_tokenizer:64] [PID:125016] EOS: 2 / </s>\u001b[39m\n",
|
||||
"[2023-08-24 20:18:55,078] [DEBUG] [axolotl.load_tokenizer:65] [PID:125016] BOS: 1 / <s>\u001b[39m\n",
|
||||
"[2023-08-24 20:18:55,078] [DEBUG] [axolotl.load_tokenizer:66] [PID:125016] PAD: 0 / [PAD]\u001b[39m\n",
|
||||
"[2023-08-24 20:18:55,078] [DEBUG] [axolotl.load_tokenizer:67] [PID:125016] UNK: 0 / <unk>\u001b[39m\n",
|
||||
"[2023-08-24 20:18:55,079] [INFO] [axolotl.load_tokenized_prepared_datasets:126] [PID:125016] Unable to find prepared dataset in data/last_run_prepared/82cd9d58e34e0db98296199248c92d0d\u001b[39m\n",
|
||||
"[2023-08-24 20:18:55,079] [INFO] [axolotl.load_tokenized_prepared_datasets:127] [PID:125016] Loading raw datasets...\u001b[39m\n",
|
||||
"[2023-08-24 20:18:55,079] [INFO] [axolotl.load_tokenized_prepared_datasets:132] [PID:125016] No seed provided, using default seed of 42\u001b[39m\n",
|
||||
"/usr/local/lib/python3.10/dist-packages/datasets/load.py:2072: FutureWarning: 'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n",
|
||||
"You can remove this warning by passing 'token=None' instead.\n",
|
||||
" warnings.warn(\n",
|
||||
"Downloading data files: 100%|███████████████████| 1/1 [00:00<00:00, 1909.97it/s]\n",
|
||||
"Extracting data files: 100%|█████████████████████| 1/1 [00:00<00:00, 130.16it/s]\n",
|
||||
"Generating train split: 4501 examples [00:00, 72594.78 examples/s]\n",
|
||||
"Map (num_proc=64): 100%|███████████| 4501/4501 [00:01<00:00, 3465.17 examples/s]\n",
|
||||
"[2023-08-24 20:18:58,085] [INFO] [axolotl.load_tokenized_prepared_datasets:330] [PID:125016] merging datasets\u001b[39m\n",
|
||||
"[2023-08-24 20:18:58,092] [INFO] [axolotl.load_tokenized_prepared_datasets:337] [PID:125016] Saving merged prepared dataset to disk... data/last_run_prepared/82cd9d58e34e0db98296199248c92d0d\u001b[39m\n",
|
||||
"Saving the dataset (1/1 shards): 100%|█| 4501/4501 [00:00<00:00, 63380.02 exampl\n",
|
||||
"Filter (num_proc=255): 100%|███████| 4275/4275 [00:01<00:00, 3385.29 examples/s]\n",
|
||||
"Filter (num_proc=226): 100%|██████████| 226/226 [00:01<00:00, 196.38 examples/s]\n",
|
||||
"Map (num_proc=255): 100%|██████████| 4275/4275 [00:02<00:00, 1480.29 examples/s]\n",
|
||||
"Map (num_proc=226): 100%|██████████████| 226/226 [00:05<00:00, 44.33 examples/s]\n",
|
||||
"[2023-08-24 20:19:33,527] [INFO] [axolotl.calculate_total_num_steps:346] [PID:125016] calculating total_num_tokens\u001b[39m\n",
|
||||
"[2023-08-24 20:19:33,536] [INFO] [axolotl.calculate_total_num_steps:353] [PID:125016] 📝 UPDATE CONFIG WITH: `total_num_tokens: 1514815`\u001b[39m\n",
|
||||
"[2023-08-24 20:19:33,552] [INFO] [axolotl.utils.dataloader.generate_batches:181] [PID:125016] generating packed batches\u001b[39m\n",
|
||||
"[2023-08-24 20:19:33,590] [INFO] [axolotl.utils.dataloader.generate_batches:187] [PID:125016] 2ae1e19cb9bd6022bcc024ba552b1341f4c424a75595ff3419969cc2f838c2ba\u001b[39m\n",
|
||||
"[2023-08-24 20:19:40,094] [INFO] [axolotl.utils.dataloader.len_w_stats:293] [PID:125016] packing_efficiency_estimate: 1.0 actual packing efficiency: 0.9732312654194079\u001b[39m\n",
|
||||
"[2023-08-24 20:19:40,094] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 1.0 total_num_tokens per device: 1514815\u001b[39m\n",
|
||||
"[2023-08-24 20:19:40,094] [INFO] [axolotl.calculate_total_num_steps:393] [PID:125016] data_loader_len: 182\u001b[39m\n",
|
||||
"[2023-08-24 20:19:40,094] [INFO] [axolotl.calculate_total_num_steps:402] [PID:125016] 📝 UPDATE CONFIG WITH: `sample_packing_eff_est: 0.98`\u001b[39m\n",
|
||||
"[2023-08-24 20:19:40,094] [INFO] [axolotl.calculate_total_num_steps:410] [PID:125016] total_num_steps: 227\u001b[39m\n",
|
||||
"[2023-08-24 20:19:40,094] [INFO] [axolotl.scripts.train:211] [PID:125016] loading model and (optionally) peft_config...\u001b[39m\n",
|
||||
"[2023-08-24 20:19:40,114] [INFO] [axolotl.load_model:106] [PID:125016] patching with flash attention\u001b[39m\n",
|
||||
"[2023-08-24 20:19:40,117] [INFO] [axolotl.load_model:147] [PID:125016] patching _expand_mask\u001b[39m\n",
|
||||
"Loading checkpoint shards: 100%|██████████████████| 2/2 [00:17<00:00, 8.60s/it]\n",
|
||||
"\u001b[33m[2023-08-24 20:19:58,136] [WARNING] [axolotl.load_model:337] [PID:125016] increasing model.config.max_position_embeddings to 4096\u001b[39m\n",
|
||||
"[2023-08-24 20:19:58,136] [INFO] [axolotl.load_model:343] [PID:125016] GPU memory usage after model load: 6.681GB (+0.364GB cache, +1.159GB misc)\u001b[39m\n",
|
||||
"[2023-08-24 20:19:58,136] [INFO] [axolotl.load_model:349] [PID:125016] converting PEFT model w/ prepare_model_for_kbit_training\u001b[39m\n",
|
||||
"[2023-08-24 20:19:58,146] [INFO] [axolotl.load_lora:473] [PID:125016] found linear modules: ['k_proj', 'q_proj', 'o_proj', 'up_proj', 'down_proj', 'gate_proj', 'v_proj']\u001b[39m\n",
|
||||
"trainable params: 79,953,920 || all params: 6,818,369,536 || trainable%: 1.172625208678628\n",
|
||||
"[2023-08-24 20:20:53,348] [INFO] [axolotl.load_model:394] [PID:125016] GPU memory usage after adapters: 6.830GB (+1.365GB cache, +1.159GB misc)\u001b[39m\n",
|
||||
"[2023-08-24 20:20:53,380] [INFO] [axolotl.scripts.train:267] [PID:125016] Compiling torch model\u001b[39m\n",
|
||||
"[2023-08-24 20:20:53,544] [INFO] [axolotl.scripts.train:272] [PID:125016] Pre-saving adapter config to ./models/run1\u001b[39m\n",
|
||||
"[2023-08-24 20:20:53,548] [INFO] [axolotl.scripts.train:288] [PID:125016] Starting trainer...\u001b[39m\n",
|
||||
"[2023-08-24 20:20:53,747] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 1514815\u001b[39m\n",
|
||||
"[2023-08-24 20:20:53,747] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 1514815\u001b[39m\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33mopenpipe\u001b[0m (\u001b[33mopenpipe-team\u001b[0m). Use \u001b[1m`wandb login --relogin`\u001b[0m to force relogin\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: Tracking run with wandb version 0.15.8\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: Run data is saved locally in \u001b[35m\u001b[1m/workspace/OpenPipe/examples/classify-recipes/wandb/run-20230824_202055-run1\u001b[0m\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: Run \u001b[1m`wandb offline`\u001b[0m to turn off syncing.\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: Syncing run \u001b[33mrun1\u001b[0m\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: ⭐️ View project at \u001b[34m\u001b[4mhttps://wandb.ai/openpipe-team/classify-recipes\u001b[0m\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: 🚀 View run at \u001b[34m\u001b[4mhttps://wandb.ai/openpipe-team/classify-recipes/runs/run1\u001b[0m\n",
|
||||
" 0%| | 0/230 [00:00<?, ?it/s][2023-08-24 20:20:56,099] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 1514815\u001b[39m\n",
|
||||
"[2023-08-24 20:20:56,099] [INFO] [axolotl.utils.dataloader.generate_batches:181] [PID:125016] generating packed batches\u001b[39m\n",
|
||||
"[2023-08-24 20:20:56,102] [INFO] [axolotl.utils.dataloader.generate_batches:187] [PID:125016] ac74b20cc92d80020bfc11a9bed8f0bf75dfc745b23630320c27a53a549d7cae\u001b[39m\n",
|
||||
"[2023-08-24 20:20:56,106] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 1514815\u001b[39m\n",
|
||||
"/usr/local/lib/python3.10/dist-packages/bitsandbytes/autograd/_functions.py:322: UserWarning: MatMul8bitLt: inputs will be cast from torch.bfloat16 to float16 during quantization\n",
|
||||
" warnings.warn(f\"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization\")\n",
|
||||
"{'loss': 1.7489, 'learning_rate': 2e-05, 'epoch': 0.02} \n",
|
||||
" 0%|▏ | 1/230 [00:19<1:13:24, 19.24s/it][2023-08-24 20:21:34,307] [INFO] [axolotl.callbacks.on_step_end:96] [PID:125016] GPU memory usage while training: 7.107GB (+10.436GB cache, +1.190GB misc)\u001b[39m\n",
|
||||
"{'loss': 1.7393, 'learning_rate': 4e-05, 'epoch': 0.04} \n",
|
||||
"{'loss': 1.7469, 'learning_rate': 6e-05, 'epoch': 0.06} \n",
|
||||
"{'loss': 1.7368, 'learning_rate': 8e-05, 'epoch': 0.09} \n",
|
||||
"{'loss': 1.6956, 'learning_rate': 0.0001, 'epoch': 0.11} \n",
|
||||
"{'loss': 1.6289, 'learning_rate': 0.00012, 'epoch': 0.13} \n",
|
||||
"{'loss': 1.4673, 'learning_rate': 0.00014, 'epoch': 0.15} \n",
|
||||
"{'loss': 1.2552, 'learning_rate': 0.00016, 'epoch': 0.17} \n",
|
||||
"{'loss': 0.9807, 'learning_rate': 0.00018, 'epoch': 0.19} \n",
|
||||
"{'loss': 0.7046, 'learning_rate': 0.0002, 'epoch': 0.22} \n",
|
||||
"{'loss': 0.4783, 'learning_rate': 0.00019998952044849376, 'epoch': 0.24} \n",
|
||||
"{'loss': 0.3099, 'learning_rate': 0.00019995808399039496, 'epoch': 0.26} \n",
|
||||
"{'loss': 0.2095, 'learning_rate': 0.00019990569721450326, 'epoch': 0.28} \n",
|
||||
"{'loss': 0.0851, 'learning_rate': 0.00019983237110061697, 'epoch': 0.3} \n",
|
||||
"{'loss': 0.0949, 'learning_rate': 0.00019973812101723188, 'epoch': 0.32} \n",
|
||||
"{'loss': 0.0496, 'learning_rate': 0.00019962296671832003, 'epoch': 0.35} \n",
|
||||
"{'loss': 0.0415, 'learning_rate': 0.00019948693233918952, 'epoch': 0.37} \n",
|
||||
"{'loss': 0.0405, 'learning_rate': 0.00019933004639142605, 'epoch': 0.39} \n",
|
||||
"{'loss': 0.0451, 'learning_rate': 0.000199152341756917, 'epoch': 0.41} \n",
|
||||
"{'loss': 0.0326, 'learning_rate': 0.00019895385568095982, 'epoch': 0.43} \n",
|
||||
" 9%|███▍ | 20/230 [06:15<1:05:47, 18.80s/it][2023-08-24 20:27:11,801] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"[2023-08-24 20:27:11,810] [INFO] [axolotl.utils.dataloader.generate_batches:181] [PID:125016] generating packed batches\u001b[39m\n",
|
||||
"[2023-08-24 20:27:11,810] [INFO] [axolotl.utils.dataloader.generate_batches:187] [PID:125016] c0ef04db402ba917eb072daff58b8c0ef38c662600f92eee3292e60918d59b78\u001b[39m\n",
|
||||
"[2023-08-24 20:27:11,810] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"/usr/local/lib/python3.10/dist-packages/bitsandbytes/autograd/_functions.py:322: UserWarning: MatMul8bitLt: inputs will be cast from torch.bfloat16 to float16 during quantization\n",
|
||||
" warnings.warn(f\"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization\")\n",
|
||||
"[2023-08-24 20:27:13,176] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:27:13,176] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"[2023-08-24 20:27:13,177] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 0%| | 0/8 [00:00<?, ?it/s]\u001b[A[2023-08-24 20:27:14,581] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:27:14,582] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 25%|███████████▎ | 2/8 [00:01<00:04, 1.42it/s]\u001b[A[2023-08-24 20:27:16,012] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:27:16,013] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 38%|████████████████▉ | 3/8 [00:02<00:05, 1.01s/it]\u001b[A[2023-08-24 20:27:17,381] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:27:17,381] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 50%|██████████████████████▌ | 4/8 [00:04<00:04, 1.14s/it]\u001b[A[2023-08-24 20:27:18,789] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:27:18,789] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 62%|████████████████████████████▏ | 5/8 [00:05<00:03, 1.23s/it]\u001b[A[2023-08-24 20:27:20,178] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:27:20,178] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 75%|█████████████████████████████████▊ | 6/8 [00:07<00:02, 1.29s/it]\u001b[A[2023-08-24 20:27:21,602] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:27:21,602] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 88%|███████████████████████████████████████▍ | 7/8 [00:08<00:01, 1.33s/it]\u001b[A[2023-08-24 20:27:22,986] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:27:22,986] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" \u001b[A\n",
|
||||
"\u001b[A{'eval_loss': 0.03450942039489746, 'eval_runtime': 11.2098, 'eval_samples_per_second': 20.161, 'eval_steps_per_second': 10.08, 'epoch': 0.43}\n",
|
||||
" 9%|███▍ | 20/230 [06:26<1:05:47, 18.80s/it]\n",
|
||||
"100%|█████████████████████████████████████████████| 8/8 [00:09<00:00, 1.35s/it]\u001b[A\n",
|
||||
"{'loss': 0.0336, 'learning_rate': 0.00019873462976445553, 'epoch': 0.45} \u001b[A\n",
|
||||
"{'loss': 0.0329, 'learning_rate': 0.00019849470995518992, 'epoch': 0.48} \n",
|
||||
"{'loss': 0.0317, 'learning_rate': 0.0001982341465382029, 'epoch': 0.5} \n",
|
||||
"{'loss': 0.0319, 'learning_rate': 0.00019795299412524945, 'epoch': 0.52} \n",
|
||||
"{'loss': 0.0258, 'learning_rate': 0.00019765131164335345, 'epoch': 0.54} \n",
|
||||
"{'loss': 0.024, 'learning_rate': 0.000197329162322457, 'epoch': 0.56} \n",
|
||||
"{'loss': 0.0251, 'learning_rate': 0.00019698661368216817, 'epoch': 0.58} \n",
|
||||
"{'loss': 0.025, 'learning_rate': 0.00019662373751760934, 'epoch': 0.61} \n",
|
||||
"{'loss': 0.0258, 'learning_rate': 0.00019624060988436966, 'epoch': 0.63} \n",
|
||||
"{'loss': 0.0225, 'learning_rate': 0.0001958373110825644, 'epoch': 0.65} \n",
|
||||
"{'loss': 0.0252, 'learning_rate': 0.00019541392564000488, 'epoch': 0.67} \n",
|
||||
"{'loss': 0.0233, 'learning_rate': 0.00019497054229448223, 'epoch': 0.69} \n",
|
||||
"{'loss': 0.0231, 'learning_rate': 0.0001945072539751685, 'epoch': 0.71} \n",
|
||||
"{'loss': 0.0208, 'learning_rate': 0.00019402415778313977, 'epoch': 0.74} \n",
|
||||
"{'loss': 0.0221, 'learning_rate': 0.00019352135497102463, 'epoch': 0.76} \n",
|
||||
"{'loss': 0.0251, 'learning_rate': 0.0001929989509217824, 'epoch': 0.78} \n",
|
||||
"{'loss': 0.0192, 'learning_rate': 0.0001924570551266159, 'epoch': 0.8} \n",
|
||||
"{'loss': 0.021, 'learning_rate': 0.00019189578116202307, 'epoch': 0.82} \n",
|
||||
"{'loss': 0.017, 'learning_rate': 0.00019131524666599233, 'epoch': 0.84} \n",
|
||||
"{'loss': 0.0235, 'learning_rate': 0.00019071557331334669, 'epoch': 0.86} \n",
|
||||
" 17%|███████▎ | 40/230 [12:42<59:13, 18.70s/it][2023-08-24 20:33:38,317] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"[2023-08-24 20:33:38,326] [INFO] [axolotl.utils.dataloader.generate_batches:181] [PID:125016] generating packed batches\u001b[39m\n",
|
||||
"[2023-08-24 20:33:38,326] [INFO] [axolotl.utils.dataloader.generate_batches:187] [PID:125016] c0ef04db402ba917eb072daff58b8c0ef38c662600f92eee3292e60918d59b78\u001b[39m\n",
|
||||
"[2023-08-24 20:33:38,327] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"/usr/local/lib/python3.10/dist-packages/bitsandbytes/autograd/_functions.py:322: UserWarning: MatMul8bitLt: inputs will be cast from torch.bfloat16 to float16 during quantization\n",
|
||||
" warnings.warn(f\"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization\")\n",
|
||||
"[2023-08-24 20:33:39,687] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:33:39,688] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"[2023-08-24 20:33:39,688] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 0%| | 0/8 [00:00<?, ?it/s]\u001b[A[2023-08-24 20:33:41,085] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:33:41,085] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 25%|███████████▎ | 2/8 [00:01<00:04, 1.43it/s]\u001b[A[2023-08-24 20:33:42,510] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:33:42,511] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 38%|████████████████▉ | 3/8 [00:02<00:05, 1.00s/it]\u001b[A[2023-08-24 20:33:43,870] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:33:43,870] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 50%|██████████████████████▌ | 4/8 [00:04<00:04, 1.13s/it]\u001b[A[2023-08-24 20:33:45,268] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:33:45,268] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 62%|████████████████████████████▏ | 5/8 [00:05<00:03, 1.23s/it]\u001b[A[2023-08-24 20:33:46,650] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:33:46,650] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 75%|█████████████████████████████████▊ | 6/8 [00:06<00:02, 1.28s/it]\u001b[A[2023-08-24 20:33:48,071] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:33:48,072] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 88%|███████████████████████████████████████▍ | 7/8 [00:08<00:01, 1.32s/it]\u001b[A[2023-08-24 20:33:49,455] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:33:49,455] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" \u001b[A\n",
|
||||
"\u001b[A{'eval_loss': 0.018499867990612984, 'eval_runtime': 11.1626, 'eval_samples_per_second': 20.246, 'eval_steps_per_second': 10.123, 'epoch': 0.86}\n",
|
||||
" 17%|███████▎ | 40/230 [12:53<59:13, 18.70s/it]\n",
|
||||
"100%|█████████████████████████████████████████████| 8/8 [00:09<00:00, 1.34s/it]\u001b[A\n",
|
||||
"{'loss': 0.0207, 'learning_rate': 0.0001900968867902419, 'epoch': 0.89} \u001b[A\n",
|
||||
"{'loss': 0.0188, 'learning_rate': 0.00018945931676782373, 'epoch': 0.91} \n",
|
||||
"{'loss': 0.0169, 'learning_rate': 0.0001888029968750498, 'epoch': 0.93} \n",
|
||||
"{'loss': 0.0176, 'learning_rate': 0.00018812806467068268, 'epoch': 0.95} \n",
|
||||
"{'loss': 0.0162, 'learning_rate': 0.00018743466161445823, 'epoch': 0.97} \n",
|
||||
"{'loss': 0.0204, 'learning_rate': 0.00018672293303743738, 'epoch': 0.99} \n",
|
||||
" 20%|████████▍ | 46/230 [14:46<59:16, 19.33s/it][2023-08-24 20:35:47,036] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 1514815\u001b[39m\n",
|
||||
"[2023-08-24 20:35:47,036] [INFO] [axolotl.utils.dataloader.generate_batches:181] [PID:125016] generating packed batches\u001b[39m\n",
|
||||
"[2023-08-24 20:35:47,038] [INFO] [axolotl.utils.dataloader.generate_batches:187] [PID:125016] 6076f9186c2a908489c30feee8b4739eb4ac652346e4a07ad9ea9efc2cefc22f\u001b[39m\n",
|
||||
"[2023-08-24 20:35:47,040] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 1514815\u001b[39m\n",
|
||||
"{'loss': 0.0158, 'learning_rate': 0.00018599302811154572, 'epoch': 1.02} \n",
|
||||
"{'loss': 0.0182, 'learning_rate': 0.00018524509981830852, 'epoch': 1.04} \n",
|
||||
"{'loss': 0.0185, 'learning_rate': 0.00018447930491678733, 'epoch': 1.06} \n",
|
||||
"{'loss': 0.0169, 'learning_rate': 0.00018369580391072433, 'epoch': 1.08} \n",
|
||||
"{'loss': 0.0167, 'learning_rate': 0.00018289476101490256, 'epoch': 1.1} \n",
|
||||
"{'loss': 0.0177, 'learning_rate': 0.00018207634412072764, 'epoch': 1.12} \n",
|
||||
"{'loss': 0.0196, 'learning_rate': 0.00018124072476103956, 'epoch': 1.15} \n",
|
||||
"{'loss': 0.0165, 'learning_rate': 0.00018038807807416068, 'epoch': 1.17} \n",
|
||||
"{'loss': 0.0148, 'learning_rate': 0.00017951858276718844, 'epoch': 1.19} \n",
|
||||
"{'loss': 0.0149, 'learning_rate': 0.00017863242107853995, 'epoch': 1.21} \n",
|
||||
"{'loss': 0.0161, 'learning_rate': 0.0001777297787397563, 'epoch': 1.23} \n",
|
||||
"{'loss': 0.0165, 'learning_rate': 0.00017681084493657525, 'epoch': 1.25} \n",
|
||||
"{'loss': 0.0169, 'learning_rate': 0.0001758758122692791, 'epoch': 1.28} \n",
|
||||
"{'loss': 0.0183, 'learning_rate': 0.00017492487671232784, 'epoch': 1.3} \n",
|
||||
" 26%|██████████▉ | 60/230 [19:07<52:41, 18.59s/it][2023-08-24 20:40:03,988] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"[2023-08-24 20:40:03,996] [INFO] [axolotl.utils.dataloader.generate_batches:181] [PID:125016] generating packed batches\u001b[39m\n",
|
||||
"[2023-08-24 20:40:03,996] [INFO] [axolotl.utils.dataloader.generate_batches:187] [PID:125016] c0ef04db402ba917eb072daff58b8c0ef38c662600f92eee3292e60918d59b78\u001b[39m\n",
|
||||
"[2023-08-24 20:40:03,996] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"/usr/local/lib/python3.10/dist-packages/bitsandbytes/autograd/_functions.py:322: UserWarning: MatMul8bitLt: inputs will be cast from torch.bfloat16 to float16 during quantization\n",
|
||||
" warnings.warn(f\"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization\")\n",
|
||||
"[2023-08-24 20:40:05,357] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:40:05,357] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"[2023-08-24 20:40:05,357] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 0%| | 0/8 [00:00<?, ?it/s]\u001b[A[2023-08-24 20:40:06,753] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:40:06,753] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 25%|███████████▎ | 2/8 [00:01<00:04, 1.43it/s]\u001b[A[2023-08-24 20:40:08,176] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:40:08,177] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 38%|████████████████▉ | 3/8 [00:02<00:05, 1.00s/it]\u001b[A[2023-08-24 20:40:09,534] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:40:09,534] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 50%|██████████████████████▌ | 4/8 [00:04<00:04, 1.13s/it]\u001b[A[2023-08-24 20:40:10,931] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:40:10,931] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 62%|████████████████████████████▏ | 5/8 [00:05<00:03, 1.23s/it]\u001b[A[2023-08-24 20:40:12,312] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:40:12,312] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 75%|█████████████████████████████████▊ | 6/8 [00:06<00:02, 1.28s/it]\u001b[A[2023-08-24 20:40:13,734] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:40:13,735] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 88%|███████████████████████████████████████▍ | 7/8 [00:08<00:01, 1.32s/it]\u001b[A[2023-08-24 20:40:15,118] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:40:15,118] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" \u001b[A\n",
|
||||
"\u001b[A{'eval_loss': 0.016062971204519272, 'eval_runtime': 11.1551, 'eval_samples_per_second': 20.26, 'eval_steps_per_second': 10.13, 'epoch': 1.3}\n",
|
||||
" 26%|██████████▉ | 60/230 [19:19<52:41, 18.59s/it]\n",
|
||||
"100%|█████████████████████████████████████████████| 8/8 [00:09<00:00, 1.34s/it]\u001b[A\n",
|
||||
" \u001b[A/usr/local/lib/python3.10/dist-packages/bitsandbytes/autograd/_functions.py:322: UserWarning: MatMul8bitLt: inputs will be cast from torch.bfloat16 to float16 during quantization\n",
|
||||
" warnings.warn(f\"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization\")\n",
|
||||
"{'loss': 0.0161, 'learning_rate': 0.00017395823757328444, 'epoch': 1.32} \n",
|
||||
"{'loss': 0.0153, 'learning_rate': 0.00017297609745104184, 'epoch': 1.34} \n",
|
||||
"{'loss': 0.018, 'learning_rate': 0.0001719786621933599, 'epoch': 1.36} \n",
|
||||
"{'loss': 0.0128, 'learning_rate': 0.00017096614085372185, 'epoch': 1.38} \n",
|
||||
"{'loss': 0.0156, 'learning_rate': 0.00016993874564751822, 'epoch': 1.41} \n",
|
||||
"{'loss': 0.0139, 'learning_rate': 0.00016889669190756868, 'epoch': 1.43} \n",
|
||||
"{'loss': 0.0205, 'learning_rate': 0.00016784019803899, 'epoch': 1.45} \n",
|
||||
"{'loss': 0.0151, 'learning_rate': 0.0001667694854734204, 'epoch': 1.47} \n",
|
||||
"{'loss': 0.0148, 'learning_rate': 0.0001656847786226095, 'epoch': 1.49} \n",
|
||||
"{'loss': 0.0142, 'learning_rate': 0.00016458630483138356, 'epoch': 1.51} \n",
|
||||
"{'loss': 0.0159, 'learning_rate': 0.00016347429432999602, 'epoch': 1.54} \n",
|
||||
"{'loss': 0.018, 'learning_rate': 0.00016234898018587337, 'epoch': 1.56} \n",
|
||||
"{'loss': 0.0149, 'learning_rate': 0.0001612105982547663, 'epoch': 1.58} \n",
|
||||
"{'loss': 0.0147, 'learning_rate': 0.00016005938713131642, 'epoch': 1.6} \n",
|
||||
"{'loss': 0.0138, 'learning_rate': 0.00015889558809904902, 'epoch': 1.62} \n",
|
||||
"{'loss': 0.0146, 'learning_rate': 0.00015771944507980207, 'epoch': 1.64} \n",
|
||||
"{'loss': 0.0156, 'learning_rate': 0.00015653120458260263, 'epoch': 1.66} \n",
|
||||
"{'loss': 0.0159, 'learning_rate': 0.00015533111565200044, 'epoch': 1.69} \n",
|
||||
"{'loss': 0.0152, 'learning_rate': 0.0001541194298158708, 'epoch': 1.71} \n",
|
||||
"{'loss': 0.0179, 'learning_rate': 0.00015289640103269625, 'epoch': 1.73} \n",
|
||||
" 35%|██████████████▌ | 80/230 [25:36<46:59, 18.80s/it][2023-08-24 20:46:32,302] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"[2023-08-24 20:46:32,310] [INFO] [axolotl.utils.dataloader.generate_batches:181] [PID:125016] generating packed batches\u001b[39m\n",
|
||||
"[2023-08-24 20:46:32,311] [INFO] [axolotl.utils.dataloader.generate_batches:187] [PID:125016] c0ef04db402ba917eb072daff58b8c0ef38c662600f92eee3292e60918d59b78\u001b[39m\n",
|
||||
"[2023-08-24 20:46:32,311] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"/usr/local/lib/python3.10/dist-packages/bitsandbytes/autograd/_functions.py:322: UserWarning: MatMul8bitLt: inputs will be cast from torch.bfloat16 to float16 during quantization\n",
|
||||
" warnings.warn(f\"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization\")\n",
|
||||
"[2023-08-24 20:46:33,670] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:46:33,670] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"[2023-08-24 20:46:33,670] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 0%| | 0/8 [00:00<?, ?it/s]\u001b[A[2023-08-24 20:46:35,068] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:46:35,068] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 25%|███████████▎ | 2/8 [00:01<00:04, 1.43it/s]\u001b[A[2023-08-24 20:46:36,491] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:46:36,491] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 38%|████████████████▉ | 3/8 [00:02<00:05, 1.00s/it]\u001b[A[2023-08-24 20:46:37,849] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:46:37,849] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 50%|██████████████████████▌ | 4/8 [00:04<00:04, 1.13s/it]\u001b[A[2023-08-24 20:46:39,247] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:46:39,247] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 62%|████████████████████████████▏ | 5/8 [00:05<00:03, 1.23s/it]\u001b[A[2023-08-24 20:46:40,629] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:46:40,629] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 75%|█████████████████████████████████▊ | 6/8 [00:06<00:02, 1.28s/it]\u001b[A[2023-08-24 20:46:42,051] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:46:42,051] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 88%|███████████████████████████████████████▍ | 7/8 [00:08<00:01, 1.32s/it]\u001b[A[2023-08-24 20:46:43,434] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:46:43,435] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" \u001b[A\n",
|
||||
"\u001b[A{'eval_loss': 0.015303226187825203, 'eval_runtime': 11.157, 'eval_samples_per_second': 20.256, 'eval_steps_per_second': 10.128, 'epoch': 1.73}\n",
|
||||
" 35%|██████████████▌ | 80/230 [25:47<46:59, 18.80s/it]\n",
|
||||
"100%|█████████████████████████████████████████████| 8/8 [00:09<00:00, 1.34s/it]\u001b[A\n",
|
||||
"{'loss': 0.016, 'learning_rate': 0.00015166228563833934, 'epoch': 1.75} \u001b[A\n",
|
||||
"{'loss': 0.0149, 'learning_rate': 0.00015041734229231688, 'epoch': 1.77} \n",
|
||||
"{'loss': 0.0159, 'learning_rate': 0.00014916183192358718, 'epoch': 1.79} \n",
|
||||
"{'loss': 0.0181, 'learning_rate': 0.00014789601767586173, 'epoch': 1.82} \n",
|
||||
"{'loss': 0.0126, 'learning_rate': 0.00014662016485245274, 'epoch': 1.84} \n",
|
||||
"{'loss': 0.0195, 'learning_rate': 0.00014533454086066772, 'epoch': 1.86} \n",
|
||||
"{'loss': 0.0134, 'learning_rate': 0.00014403941515576344, 'epoch': 1.88} \n",
|
||||
"{'loss': 0.0131, 'learning_rate': 0.00014273505918447054, 'epoch': 1.9} \n",
|
||||
"{'loss': 0.0126, 'learning_rate': 0.00014142174632810072, 'epoch': 1.92} \n",
|
||||
"{'loss': 0.013, 'learning_rate': 0.0001400997518452484, 'epoch': 1.95} \n",
|
||||
"{'loss': 0.0177, 'learning_rate': 0.00013876935281409907, 'epoch': 1.97} \n",
|
||||
"{'loss': 0.0148, 'learning_rate': 0.00013743082807435615, 'epoch': 1.99} \n",
|
||||
" 40%|████████████████▊ | 92/230 [29:32<43:20, 18.85s/it][2023-08-24 20:50:38,268] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 1514815\u001b[39m\n",
|
||||
"[2023-08-24 20:50:38,269] [INFO] [axolotl.utils.dataloader.generate_batches:181] [PID:125016] generating packed batches\u001b[39m\n",
|
||||
"[2023-08-24 20:50:38,271] [INFO] [axolotl.utils.dataloader.generate_batches:187] [PID:125016] 066d0afd59aa1c812e1335cead6076c131869d6997468e47f96e2f7244232bfe\u001b[39m\n",
|
||||
"[2023-08-24 20:50:38,273] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 1514815\u001b[39m\n",
|
||||
"{'loss': 0.0145, 'learning_rate': 0.00013608445816879866, 'epoch': 2.01} \n",
|
||||
"{'loss': 0.0151, 'learning_rate': 0.00013473052528448201, 'epoch': 2.03} \n",
|
||||
"{'loss': 0.0128, 'learning_rate': 0.00013336931319359426, 'epoch': 2.05} \n",
|
||||
"{'loss': 0.0163, 'learning_rate': 0.00013200110719397968, 'epoch': 2.08} \n",
|
||||
"{'loss': 0.016, 'learning_rate': 0.00013062619404934317, 'epoch': 2.1} \n",
|
||||
"{'loss': 0.015, 'learning_rate': 0.00012924486192914705, 'epoch': 2.12} \n",
|
||||
"{'loss': 0.0124, 'learning_rate': 0.00012785740034821329, 'epoch': 2.14} \n",
|
||||
"{'loss': 0.0134, 'learning_rate': 0.00012646410010604397, 'epoch': 2.16} \n",
|
||||
" 43%|█████████████████▊ | 100/230 [32:02<40:34, 18.73s/it][2023-08-24 20:52:58,789] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"[2023-08-24 20:52:58,797] [INFO] [axolotl.utils.dataloader.generate_batches:181] [PID:125016] generating packed batches\u001b[39m\n",
|
||||
"[2023-08-24 20:52:58,797] [INFO] [axolotl.utils.dataloader.generate_batches:187] [PID:125016] c0ef04db402ba917eb072daff58b8c0ef38c662600f92eee3292e60918d59b78\u001b[39m\n",
|
||||
"[2023-08-24 20:52:58,798] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"/usr/local/lib/python3.10/dist-packages/bitsandbytes/autograd/_functions.py:322: UserWarning: MatMul8bitLt: inputs will be cast from torch.bfloat16 to float16 during quantization\n",
|
||||
" warnings.warn(f\"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization\")\n",
|
||||
"[2023-08-24 20:53:00,162] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:53:00,163] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"[2023-08-24 20:53:00,163] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 0%| | 0/8 [00:00<?, ?it/s]\u001b[A[2023-08-24 20:53:01,560] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:53:01,561] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 25%|███████████▎ | 2/8 [00:01<00:04, 1.43it/s]\u001b[A[2023-08-24 20:53:02,985] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:53:02,985] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 38%|████████████████▉ | 3/8 [00:02<00:05, 1.00s/it]\u001b[A[2023-08-24 20:53:04,343] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:53:04,344] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 50%|██████████████████████▌ | 4/8 [00:04<00:04, 1.13s/it]\u001b[A[2023-08-24 20:53:05,742] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:53:05,742] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 62%|████████████████████████████▏ | 5/8 [00:05<00:03, 1.23s/it]\u001b[A[2023-08-24 20:53:07,122] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:53:07,123] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 75%|█████████████████████████████████▊ | 6/8 [00:06<00:02, 1.28s/it]\u001b[A[2023-08-24 20:53:08,545] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:53:08,545] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 88%|███████████████████████████████████████▍ | 7/8 [00:08<00:01, 1.32s/it]\u001b[A[2023-08-24 20:53:09,929] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:53:09,929] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" \u001b[A\n",
|
||||
"\u001b[A{'eval_loss': 0.01423712819814682, 'eval_runtime': 11.165, 'eval_samples_per_second': 20.242, 'eval_steps_per_second': 10.121, 'epoch': 2.16}\n",
|
||||
" 43%|█████████████████▊ | 100/230 [32:13<40:34, 18.73s/it]\n",
|
||||
"100%|█████████████████████████████████████████████| 8/8 [00:09<00:00, 1.34s/it]\u001b[A\n",
|
||||
"{'loss': 0.0148, 'learning_rate': 0.00012506525322587207, 'epoch': 2.18} \u001b[A\n",
|
||||
"{'loss': 0.0163, 'learning_rate': 0.0001236611528934562, 'epoch': 2.21} \n",
|
||||
"{'loss': 0.0145, 'learning_rate': 0.00012225209339563145, 'epoch': 2.23} \n",
|
||||
"{'loss': 0.0166, 'learning_rate': 0.00012083837005862946, 'epoch': 2.25} \n",
|
||||
"{'loss': 0.0115, 'learning_rate': 0.00011942027918618074, 'epoch': 2.27} \n",
|
||||
"{'loss': 0.0107, 'learning_rate': 0.0001179981179974121, 'epoch': 2.29} \n",
|
||||
"{'loss': 0.012, 'learning_rate': 0.00011657218456455206, 'epoch': 2.31} \n",
|
||||
"{'loss': 0.0129, 'learning_rate': 0.00011514277775045768, 'epoch': 2.34} \n",
|
||||
"{'loss': 0.0118, 'learning_rate': 0.00011371019714597562, 'epoch': 2.36} \n",
|
||||
"{'loss': 0.0138, 'learning_rate': 0.00011227474300715055, 'epoch': 2.38} \n",
|
||||
"{'loss': 0.013, 'learning_rate': 0.00011083671619229408, 'epoch': 2.4} \n",
|
||||
"{'loss': 0.0119, 'learning_rate': 0.00010939641809892767, 'epoch': 2.42} \n",
|
||||
"{'loss': 0.0139, 'learning_rate': 0.00010795415060061243, 'epoch': 2.44} \n",
|
||||
"{'loss': 0.0159, 'learning_rate': 0.00010651021598367906, 'epoch': 2.46} \n",
|
||||
"{'loss': 0.0143, 'learning_rate': 0.00010506491688387127, 'epoch': 2.49} \n",
|
||||
"{'loss': 0.0154, 'learning_rate': 0.00010361855622291637, 'epoch': 2.51} \n",
|
||||
"{'loss': 0.0131, 'learning_rate': 0.00010217143714503508, 'epoch': 2.53} \n",
|
||||
"{'loss': 0.0124, 'learning_rate': 0.00010072386295340572, 'epoch': 2.55} \n",
|
||||
"{'loss': 0.0127, 'learning_rate': 9.927613704659429e-05, 'epoch': 2.57} \n",
|
||||
"{'loss': 0.0141, 'learning_rate': 9.782856285496495e-05, 'epoch': 2.59} \n",
|
||||
" 52%|█████████████████████▍ | 120/230 [38:28<34:16, 18.69s/it][2023-08-24 20:59:24,312] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"[2023-08-24 20:59:24,320] [INFO] [axolotl.utils.dataloader.generate_batches:181] [PID:125016] generating packed batches\u001b[39m\n",
|
||||
"[2023-08-24 20:59:24,320] [INFO] [axolotl.utils.dataloader.generate_batches:187] [PID:125016] c0ef04db402ba917eb072daff58b8c0ef38c662600f92eee3292e60918d59b78\u001b[39m\n",
|
||||
"[2023-08-24 20:59:24,320] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"/usr/local/lib/python3.10/dist-packages/bitsandbytes/autograd/_functions.py:322: UserWarning: MatMul8bitLt: inputs will be cast from torch.bfloat16 to float16 during quantization\n",
|
||||
" warnings.warn(f\"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization\")\n",
|
||||
"[2023-08-24 20:59:25,678] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:59:25,679] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"[2023-08-24 20:59:25,679] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 0%| | 0/8 [00:00<?, ?it/s]\u001b[A[2023-08-24 20:59:27,076] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:59:27,076] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 25%|███████████▎ | 2/8 [00:01<00:04, 1.43it/s]\u001b[A[2023-08-24 20:59:28,499] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:59:28,499] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 38%|████████████████▉ | 3/8 [00:02<00:05, 1.00s/it]\u001b[A[2023-08-24 20:59:29,857] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:59:29,857] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 50%|██████████████████████▌ | 4/8 [00:04<00:04, 1.13s/it]\u001b[A[2023-08-24 20:59:31,255] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:59:31,255] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 62%|████████████████████████████▏ | 5/8 [00:05<00:03, 1.23s/it]\u001b[A[2023-08-24 20:59:32,636] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:59:32,636] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 75%|█████████████████████████████████▊ | 6/8 [00:06<00:02, 1.28s/it]\u001b[A[2023-08-24 20:59:34,057] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:59:34,057] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 88%|███████████████████████████████████████▍ | 7/8 [00:08<00:01, 1.32s/it]\u001b[A[2023-08-24 20:59:35,441] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 20:59:35,441] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" \u001b[A\n",
|
||||
"\u001b[A{'eval_loss': 0.01440380234271288, 'eval_runtime': 11.1537, 'eval_samples_per_second': 20.262, 'eval_steps_per_second': 10.131, 'epoch': 2.59}\n",
|
||||
" 52%|█████████████████████▍ | 120/230 [38:39<34:16, 18.69s/it]\n",
|
||||
"100%|█████████████████████████████████████████████| 8/8 [00:09<00:00, 1.34s/it]\u001b[A\n",
|
||||
" \u001b[A/usr/local/lib/python3.10/dist-packages/bitsandbytes/autograd/_functions.py:322: UserWarning: MatMul8bitLt: inputs will be cast from torch.bfloat16 to float16 during quantization\n",
|
||||
" warnings.warn(f\"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization\")\n",
|
||||
"{'loss': 0.0133, 'learning_rate': 9.638144377708367e-05, 'epoch': 2.62} \n",
|
||||
"{'loss': 0.0151, 'learning_rate': 9.493508311612874e-05, 'epoch': 2.64} \n",
|
||||
"{'loss': 0.0136, 'learning_rate': 9.348978401632101e-05, 'epoch': 2.66} \n",
|
||||
"{'loss': 0.0129, 'learning_rate': 9.204584939938762e-05, 'epoch': 2.68} \n",
|
||||
"{'loss': 0.0141, 'learning_rate': 9.060358190107234e-05, 'epoch': 2.7} \n",
|
||||
"{'loss': 0.0131, 'learning_rate': 8.916328380770595e-05, 'epoch': 2.72} \n",
|
||||
"{'loss': 0.0154, 'learning_rate': 8.772525699284946e-05, 'epoch': 2.75} \n",
|
||||
"{'loss': 0.0119, 'learning_rate': 8.628980285402439e-05, 'epoch': 2.77} \n",
|
||||
"{'loss': 0.0104, 'learning_rate': 8.485722224954237e-05, 'epoch': 2.79} \n",
|
||||
"{'loss': 0.013, 'learning_rate': 8.342781543544798e-05, 'epoch': 2.81} \n",
|
||||
"{'loss': 0.0112, 'learning_rate': 8.200188200258791e-05, 'epoch': 2.83} \n",
|
||||
"{'loss': 0.0112, 'learning_rate': 8.057972081381927e-05, 'epoch': 2.85} \n",
|
||||
"{'loss': 0.0127, 'learning_rate': 7.916162994137056e-05, 'epoch': 2.88} \n",
|
||||
"{'loss': 0.0149, 'learning_rate': 7.774790660436858e-05, 'epoch': 2.9} \n",
|
||||
"{'loss': 0.0178, 'learning_rate': 7.633884710654383e-05, 'epoch': 2.92} \n",
|
||||
"{'loss': 0.0119, 'learning_rate': 7.493474677412794e-05, 'epoch': 2.94} \n",
|
||||
"{'loss': 0.0137, 'learning_rate': 7.353589989395604e-05, 'epoch': 2.96} \n",
|
||||
"{'loss': 0.0145, 'learning_rate': 7.214259965178674e-05, 'epoch': 2.98} \n",
|
||||
" 60%|████████████████████████▌ | 138/230 [44:18<28:53, 18.84s/it][2023-08-24 21:05:28,422] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 1514815\u001b[39m\n",
|
||||
"[2023-08-24 21:05:28,423] [INFO] [axolotl.utils.dataloader.generate_batches:181] [PID:125016] generating packed batches\u001b[39m\n",
|
||||
"[2023-08-24 21:05:28,424] [INFO] [axolotl.utils.dataloader.generate_batches:187] [PID:125016] b60ca8a353f86b08d0005489b946fc3b062142d53d2ef59949adfba0b078763f\u001b[39m\n",
|
||||
"[2023-08-24 21:05:28,427] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 1514815\u001b[39m\n",
|
||||
"{'loss': 0.0133, 'learning_rate': 7.075513807085299e-05, 'epoch': 3.01} \n",
|
||||
"{'loss': 0.0128, 'learning_rate': 6.937380595065685e-05, 'epoch': 3.03} \n",
|
||||
" 61%|████████████████████████▉ | 140/230 [44:55<28:12, 18.81s/it][2023-08-24 21:05:51,811] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"[2023-08-24 21:05:51,819] [INFO] [axolotl.utils.dataloader.generate_batches:181] [PID:125016] generating packed batches\u001b[39m\n",
|
||||
"[2023-08-24 21:05:51,820] [INFO] [axolotl.utils.dataloader.generate_batches:187] [PID:125016] c0ef04db402ba917eb072daff58b8c0ef38c662600f92eee3292e60918d59b78\u001b[39m\n",
|
||||
"[2023-08-24 21:05:51,820] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"/usr/local/lib/python3.10/dist-packages/bitsandbytes/autograd/_functions.py:322: UserWarning: MatMul8bitLt: inputs will be cast from torch.bfloat16 to float16 during quantization\n",
|
||||
" warnings.warn(f\"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization\")\n",
|
||||
"[2023-08-24 21:05:53,181] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:05:53,181] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"[2023-08-24 21:05:53,181] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 0%| | 0/8 [00:00<?, ?it/s]\u001b[A[2023-08-24 21:05:54,578] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:05:54,578] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 25%|███████████▎ | 2/8 [00:01<00:04, 1.43it/s]\u001b[A[2023-08-24 21:05:56,002] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:05:56,002] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 38%|████████████████▉ | 3/8 [00:02<00:05, 1.00s/it]\u001b[A[2023-08-24 21:05:57,363] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:05:57,363] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 50%|██████████████████████▌ | 4/8 [00:04<00:04, 1.14s/it]\u001b[A[2023-08-24 21:05:58,762] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:05:58,762] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 62%|████████████████████████████▏ | 5/8 [00:05<00:03, 1.23s/it]\u001b[A[2023-08-24 21:06:00,145] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:06:00,145] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 75%|█████████████████████████████████▊ | 6/8 [00:06<00:02, 1.28s/it]\u001b[A[2023-08-24 21:06:01,569] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:06:01,569] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 88%|███████████████████████████████████████▍ | 7/8 [00:08<00:01, 1.33s/it]\u001b[A[2023-08-24 21:06:02,956] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:06:02,956] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" \u001b[A\n",
|
||||
"\u001b[A{'eval_loss': 0.01361126359552145, 'eval_runtime': 11.1696, 'eval_samples_per_second': 20.233, 'eval_steps_per_second': 10.117, 'epoch': 3.03}\n",
|
||||
" 61%|████████████████████████▉ | 140/230 [45:06<28:12, 18.81s/it]\n",
|
||||
"100%|█████████████████████████████████████████████| 8/8 [00:09<00:00, 1.34s/it]\u001b[A\n",
|
||||
"{'loss': 0.0129, 'learning_rate': 6.799889280602031e-05, 'epoch': 3.05} \u001b[A\n",
|
||||
"{'loss': 0.0133, 'learning_rate': 6.663068680640574e-05, 'epoch': 3.07} \n",
|
||||
"{'loss': 0.0142, 'learning_rate': 6.526947471551798e-05, 'epoch': 3.09} \n",
|
||||
"{'loss': 0.0118, 'learning_rate': 6.391554183120138e-05, 'epoch': 3.11} \n",
|
||||
"{'loss': 0.0144, 'learning_rate': 6.25691719256439e-05, 'epoch': 3.14} \n",
|
||||
"{'loss': 0.0142, 'learning_rate': 6.123064718590099e-05, 'epoch': 3.16} \n",
|
||||
"{'loss': 0.013, 'learning_rate': 5.9900248154751616e-05, 'epoch': 3.18} \n",
|
||||
"{'loss': 0.0123, 'learning_rate': 5.857825367189931e-05, 'epoch': 3.2} \n",
|
||||
"{'loss': 0.0109, 'learning_rate': 5.7264940815529485e-05, 'epoch': 3.22} \n",
|
||||
"{'loss': 0.0107, 'learning_rate': 5.596058484423656e-05, 'epoch': 3.24} \n",
|
||||
"{'loss': 0.0104, 'learning_rate': 5.46654591393323e-05, 'epoch': 3.26} \n",
|
||||
"{'loss': 0.0139, 'learning_rate': 5.337983514754723e-05, 'epoch': 3.29} \n",
|
||||
"{'loss': 0.0138, 'learning_rate': 5.2103982324138244e-05, 'epoch': 3.31} \n",
|
||||
"{'loss': 0.0155, 'learning_rate': 5.083816807641284e-05, 'epoch': 3.33} \n",
|
||||
"{'loss': 0.0129, 'learning_rate': 4.958265770768316e-05, 'epoch': 3.35} \n",
|
||||
"{'loss': 0.0143, 'learning_rate': 4.833771436166069e-05, 'epoch': 3.37} \n",
|
||||
"{'loss': 0.0146, 'learning_rate': 4.710359896730379e-05, 'epoch': 3.39} \n",
|
||||
"{'loss': 0.0112, 'learning_rate': 4.5880570184129215e-05, 'epoch': 3.42} \n",
|
||||
"{'loss': 0.0105, 'learning_rate': 4.466888434799958e-05, 'epoch': 3.44} \n",
|
||||
"{'loss': 0.0121, 'learning_rate': 4.34687954173974e-05, 'epoch': 3.46} \n",
|
||||
" 70%|████████████████████████████▌ | 160/230 [51:23<21:52, 18.75s/it][2023-08-24 21:12:19,255] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"[2023-08-24 21:12:19,264] [INFO] [axolotl.utils.dataloader.generate_batches:181] [PID:125016] generating packed batches\u001b[39m\n",
|
||||
"[2023-08-24 21:12:19,264] [INFO] [axolotl.utils.dataloader.generate_batches:187] [PID:125016] c0ef04db402ba917eb072daff58b8c0ef38c662600f92eee3292e60918d59b78\u001b[39m\n",
|
||||
"[2023-08-24 21:12:19,264] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"/usr/local/lib/python3.10/dist-packages/bitsandbytes/autograd/_functions.py:322: UserWarning: MatMul8bitLt: inputs will be cast from torch.bfloat16 to float16 during quantization\n",
|
||||
" warnings.warn(f\"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization\")\n",
|
||||
"[2023-08-24 21:12:20,628] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:12:20,628] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"[2023-08-24 21:12:20,628] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 0%| | 0/8 [00:00<?, ?it/s]\u001b[A[2023-08-24 21:12:22,028] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:12:22,028] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 25%|███████████▎ | 2/8 [00:01<00:04, 1.43it/s]\u001b[A[2023-08-24 21:12:23,454] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:12:23,454] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 38%|████████████████▉ | 3/8 [00:02<00:05, 1.00s/it]\u001b[A[2023-08-24 21:12:24,814] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:12:24,814] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 50%|██████████████████████▌ | 4/8 [00:04<00:04, 1.14s/it]\u001b[A[2023-08-24 21:12:26,214] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:12:26,215] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 62%|████████████████████████████▏ | 5/8 [00:05<00:03, 1.23s/it]\u001b[A[2023-08-24 21:12:27,598] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:12:27,598] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 75%|█████████████████████████████████▊ | 6/8 [00:06<00:02, 1.28s/it]\u001b[A[2023-08-24 21:12:29,021] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:12:29,021] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 88%|███████████████████████████████████████▍ | 7/8 [00:08<00:01, 1.33s/it]\u001b[A[2023-08-24 21:12:30,407] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:12:30,407] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" \u001b[A\n",
|
||||
"\u001b[A{'eval_loss': 0.013194510713219643, 'eval_runtime': 11.1763, 'eval_samples_per_second': 20.221, 'eval_steps_per_second': 10.111, 'epoch': 3.46}\n",
|
||||
" 70%|████████████████████████████▌ | 160/230 [51:34<21:52, 18.75s/it]\n",
|
||||
"100%|█████████████████████████████████████████████| 8/8 [00:09<00:00, 1.34s/it]\u001b[A\n",
|
||||
"{'loss': 0.0126, 'learning_rate': 4.2280554920197936e-05, 'epoch': 3.48} \u001b[A\n",
|
||||
"{'loss': 0.0139, 'learning_rate': 4.1104411900951015e-05, 'epoch': 3.5} \n",
|
||||
"{'loss': 0.0094, 'learning_rate': 3.994061286868361e-05, 'epoch': 3.52} \n",
|
||||
"{'loss': 0.0144, 'learning_rate': 3.878940174523371e-05, 'epoch': 3.55} \n",
|
||||
"{'loss': 0.0119, 'learning_rate': 3.7651019814126654e-05, 'epoch': 3.57} \n",
|
||||
"{'loss': 0.0128, 'learning_rate': 3.652570567000402e-05, 'epoch': 3.59} \n",
|
||||
"{'loss': 0.0141, 'learning_rate': 3.541369516861648e-05, 'epoch': 3.61} \n",
|
||||
"{'loss': 0.0118, 'learning_rate': 3.431522137739049e-05, 'epoch': 3.63} \n",
|
||||
"{'loss': 0.0142, 'learning_rate': 3.323051452657961e-05, 'epoch': 3.65} \n",
|
||||
"{'loss': 0.0149, 'learning_rate': 3.215980196101002e-05, 'epoch': 3.68} \n",
|
||||
"{'loss': 0.0106, 'learning_rate': 3.110330809243134e-05, 'epoch': 3.7} \n",
|
||||
"{'loss': 0.0112, 'learning_rate': 3.0061254352481804e-05, 'epoch': 3.72} \n",
|
||||
"{'loss': 0.0103, 'learning_rate': 2.9033859146278197e-05, 'epoch': 3.74} \n",
|
||||
"{'loss': 0.0133, 'learning_rate': 2.8021337806640135e-05, 'epoch': 3.76} \n",
|
||||
"{'loss': 0.0138, 'learning_rate': 2.702390254895819e-05, 'epoch': 3.78} \n",
|
||||
"{'loss': 0.0095, 'learning_rate': 2.6041762426715566e-05, 'epoch': 3.81} \n",
|
||||
"{'loss': 0.0152, 'learning_rate': 2.5075123287672175e-05, 'epoch': 3.83} \n",
|
||||
"{'loss': 0.0126, 'learning_rate': 2.4124187730720917e-05, 'epoch': 3.85} \n",
|
||||
"{'loss': 0.0106, 'learning_rate': 2.3189155063424782e-05, 'epoch': 3.87} \n",
|
||||
"{'loss': 0.0135, 'learning_rate': 2.2270221260243673e-05, 'epoch': 3.89} \n",
|
||||
" 78%|████████████████████████████████ | 180/230 [57:52<15:42, 18.85s/it][2023-08-24 21:18:48,116] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"[2023-08-24 21:18:48,124] [INFO] [axolotl.utils.dataloader.generate_batches:181] [PID:125016] generating packed batches\u001b[39m\n",
|
||||
"[2023-08-24 21:18:48,124] [INFO] [axolotl.utils.dataloader.generate_batches:187] [PID:125016] c0ef04db402ba917eb072daff58b8c0ef38c662600f92eee3292e60918d59b78\u001b[39m\n",
|
||||
"[2023-08-24 21:18:48,125] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"/usr/local/lib/python3.10/dist-packages/bitsandbytes/autograd/_functions.py:322: UserWarning: MatMul8bitLt: inputs will be cast from torch.bfloat16 to float16 during quantization\n",
|
||||
" warnings.warn(f\"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization\")\n",
|
||||
"[2023-08-24 21:18:49,486] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:18:49,486] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"[2023-08-24 21:18:49,486] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 0%| | 0/8 [00:00<?, ?it/s]\u001b[A[2023-08-24 21:18:50,888] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:18:50,888] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 25%|███████████▎ | 2/8 [00:01<00:04, 1.43it/s]\u001b[A[2023-08-24 21:18:52,314] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:18:52,314] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 38%|████████████████▉ | 3/8 [00:02<00:05, 1.00s/it]\u001b[A[2023-08-24 21:18:53,675] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:18:53,676] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 50%|██████████████████████▌ | 4/8 [00:04<00:04, 1.14s/it]\u001b[A[2023-08-24 21:18:55,074] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:18:55,074] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 62%|████████████████████████████▏ | 5/8 [00:05<00:03, 1.23s/it]\u001b[A[2023-08-24 21:18:56,457] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:18:56,457] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 75%|█████████████████████████████████▊ | 6/8 [00:06<00:02, 1.28s/it]\u001b[A[2023-08-24 21:18:57,881] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:18:57,881] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 88%|███████████████████████████████████████▍ | 7/8 [00:08<00:01, 1.33s/it]\u001b[A[2023-08-24 21:18:59,267] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:18:59,267] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" \u001b[A\n",
|
||||
"\u001b[A{'eval_loss': 0.013087373226881027, 'eval_runtime': 11.1759, 'eval_samples_per_second': 20.222, 'eval_steps_per_second': 10.111, 'epoch': 3.89}\n",
|
||||
" 78%|████████████████████████████████ | 180/230 [58:03<15:42, 18.85s/it]\n",
|
||||
"100%|█████████████████████████████████████████████| 8/8 [00:09<00:00, 1.34s/it]\u001b[A\n",
|
||||
" \u001b[A/usr/local/lib/python3.10/dist-packages/bitsandbytes/autograd/_functions.py:322: UserWarning: MatMul8bitLt: inputs will be cast from torch.bfloat16 to float16 during quantization\n",
|
||||
" warnings.warn(f\"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization\")\n",
|
||||
"{'loss': 0.0128, 'learning_rate': 2.1367578921460074e-05, 'epoch': 3.91} \n",
|
||||
"{'loss': 0.0103, 'learning_rate': 2.0481417232811573e-05, 'epoch': 3.94} \n",
|
||||
"{'loss': 0.0099, 'learning_rate': 1.961192192583934e-05, 'epoch': 3.96} \n",
|
||||
"{'loss': 0.0112, 'learning_rate': 1.8759275238960473e-05, 'epoch': 3.98} \n",
|
||||
"{'loss': 0.0132, 'learning_rate': 1.7923655879272393e-05, 'epoch': 4.0} \n",
|
||||
" 80%|████████████████████████████████▉ | 185/230 [59:38<14:45, 19.68s/it][2023-08-24 21:20:34,361] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 1514815\u001b[39m\n",
|
||||
"[2023-08-24 21:20:34,361] [INFO] [axolotl.utils.dataloader.generate_batches:181] [PID:125016] generating packed batches\u001b[39m\n",
|
||||
"[2023-08-24 21:20:34,363] [INFO] [axolotl.utils.dataloader.generate_batches:187] [PID:125016] b61d2abf3bc15c84376d0af3386cd5fac907d76f1a3fd6fec08d54c6b52d49cb\u001b[39m\n",
|
||||
"[2023-08-24 21:20:34,365] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 1514815\u001b[39m\n",
|
||||
"{'loss': 0.0102, 'learning_rate': 1.7105238985097472e-05, 'epoch': 4.02} \n",
|
||||
"{'loss': 0.0113, 'learning_rate': 1.6304196089275658e-05, 'epoch': 4.04} \n",
|
||||
"{'loss': 0.0117, 'learning_rate': 1.5520695083212678e-05, 'epoch': 4.06} \n",
|
||||
"{'loss': 0.012, 'learning_rate': 1.4754900181691467e-05, 'epoch': 4.09} \n",
|
||||
"{'loss': 0.0144, 'learning_rate': 1.4006971888454323e-05, 'epoch': 4.11} \n",
|
||||
"{'loss': 0.0102, 'learning_rate': 1.3277066962562645e-05, 'epoch': 4.13} \n",
|
||||
"{'loss': 0.0132, 'learning_rate': 1.2565338385541792e-05, 'epoch': 4.15} \n",
|
||||
"{'loss': 0.013, 'learning_rate': 1.1871935329317363e-05, 'epoch': 4.17} \n",
|
||||
"{'loss': 0.0137, 'learning_rate': 1.1197003124950222e-05, 'epoch': 4.19} \n",
|
||||
"{'loss': 0.0114, 'learning_rate': 1.0540683232176307e-05, 'epoch': 4.22} \n",
|
||||
"{'loss': 0.0119, 'learning_rate': 9.903113209758096e-06, 'epoch': 4.24} \n",
|
||||
"{'loss': 0.0126, 'learning_rate': 9.284426686653303e-06, 'epoch': 4.26} \n",
|
||||
"{'loss': 0.0137, 'learning_rate': 8.68475333400769e-06, 'epoch': 4.28} \n",
|
||||
"{'loss': 0.0132, 'learning_rate': 8.10421883797694e-06, 'epoch': 4.3} \n",
|
||||
"{'loss': 0.0111, 'learning_rate': 7.542944873384106e-06, 'epoch': 4.32} \n",
|
||||
" 87%|█████████████████████████████████▉ | 200/230 [1:04:20<09:26, 18.88s/it][2023-08-24 21:25:17,008] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"[2023-08-24 21:25:17,016] [INFO] [axolotl.utils.dataloader.generate_batches:181] [PID:125016] generating packed batches\u001b[39m\n",
|
||||
"[2023-08-24 21:25:17,016] [INFO] [axolotl.utils.dataloader.generate_batches:187] [PID:125016] c0ef04db402ba917eb072daff58b8c0ef38c662600f92eee3292e60918d59b78\u001b[39m\n",
|
||||
"[2023-08-24 21:25:17,017] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"/usr/local/lib/python3.10/dist-packages/bitsandbytes/autograd/_functions.py:322: UserWarning: MatMul8bitLt: inputs will be cast from torch.bfloat16 to float16 during quantization\n",
|
||||
" warnings.warn(f\"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization\")\n",
|
||||
"[2023-08-24 21:25:18,378] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:25:18,378] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"[2023-08-24 21:25:18,378] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 0%| | 0/8 [00:00<?, ?it/s]\u001b[A[2023-08-24 21:25:19,776] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:25:19,776] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 25%|███████████▎ | 2/8 [00:01<00:04, 1.43it/s]\u001b[A[2023-08-24 21:25:21,199] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:25:21,199] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 38%|████████████████▉ | 3/8 [00:02<00:05, 1.00s/it]\u001b[A[2023-08-24 21:25:22,559] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:25:22,559] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 50%|██████████████████████▌ | 4/8 [00:04<00:04, 1.13s/it]\u001b[A[2023-08-24 21:25:23,958] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:25:23,958] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 62%|████████████████████████████▏ | 5/8 [00:05<00:03, 1.23s/it]\u001b[A[2023-08-24 21:25:25,341] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:25:25,341] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 75%|█████████████████████████████████▊ | 6/8 [00:06<00:02, 1.28s/it]\u001b[A[2023-08-24 21:25:26,764] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:25:26,764] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 88%|███████████████████████████████████████▍ | 7/8 [00:08<00:01, 1.32s/it]\u001b[A[2023-08-24 21:25:28,152] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:25:28,152] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" \u001b[A\n",
|
||||
"\u001b[A{'eval_loss': 0.013082703575491905, 'eval_runtime': 11.1685, 'eval_samples_per_second': 20.235, 'eval_steps_per_second': 10.118, 'epoch': 4.32}\n",
|
||||
" 87%|█████████████████████████████████▉ | 200/230 [1:04:32<09:26, 18.88s/it]\n",
|
||||
"100%|█████████████████████████████████████████████| 8/8 [00:09<00:00, 1.34s/it]\u001b[A\n",
|
||||
"{'loss': 0.0109, 'learning_rate': 7.0010490782176145e-06, 'epoch': 4.35} \u001b[A\n",
|
||||
"{'loss': 0.0151, 'learning_rate': 6.4786450289753715e-06, 'epoch': 4.37} \n",
|
||||
"{'loss': 0.016, 'learning_rate': 5.975842216860239e-06, 'epoch': 4.39} \n",
|
||||
"{'loss': 0.0119, 'learning_rate': 5.492746024831541e-06, 'epoch': 4.41} \n",
|
||||
"{'loss': 0.0132, 'learning_rate': 5.029457705517793e-06, 'epoch': 4.43} \n",
|
||||
"{'loss': 0.0109, 'learning_rate': 4.586074359995119e-06, 'epoch': 4.45} \n",
|
||||
"{'loss': 0.01, 'learning_rate': 4.162688917435631e-06, 'epoch': 4.48} \n",
|
||||
"{'loss': 0.0125, 'learning_rate': 3.7593901156303566e-06, 'epoch': 4.5} \n",
|
||||
"{'loss': 0.0137, 'learning_rate': 3.3762624823906573e-06, 'epoch': 4.52} \n",
|
||||
"{'loss': 0.0122, 'learning_rate': 3.0133863178318232e-06, 'epoch': 4.54} \n",
|
||||
"{'loss': 0.0125, 'learning_rate': 2.6708376775430033e-06, 'epoch': 4.56} \n",
|
||||
"{'loss': 0.0132, 'learning_rate': 2.3486883566465777e-06, 'epoch': 4.58} \n",
|
||||
"{'loss': 0.0118, 'learning_rate': 2.0470058747505516e-06, 'epoch': 4.61} \n",
|
||||
"{'loss': 0.0124, 'learning_rate': 1.7658534617971067e-06, 'epoch': 4.63} \n",
|
||||
"{'loss': 0.0121, 'learning_rate': 1.5052900448100815e-06, 'epoch': 4.65} \n",
|
||||
"{'loss': 0.0145, 'learning_rate': 1.2653702355444608e-06, 'epoch': 4.67} \n",
|
||||
"{'loss': 0.0133, 'learning_rate': 1.0461443190402099e-06, 'epoch': 4.69} \n",
|
||||
"{'loss': 0.0132, 'learning_rate': 8.476582430830049e-07, 'epoch': 4.71} \n",
|
||||
"{'loss': 0.0106, 'learning_rate': 6.699536085739588e-07, 'epoch': 4.74} \n",
|
||||
"{'loss': 0.0106, 'learning_rate': 5.130676608104845e-07, 'epoch': 4.76} \n",
|
||||
" 96%|█████████████████████████████████████▎ | 220/230 [1:10:47<03:08, 18.85s/it][2023-08-24 21:31:44,045] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"[2023-08-24 21:31:44,053] [INFO] [axolotl.utils.dataloader.generate_batches:181] [PID:125016] generating packed batches\u001b[39m\n",
|
||||
"[2023-08-24 21:31:44,053] [INFO] [axolotl.utils.dataloader.generate_batches:187] [PID:125016] c0ef04db402ba917eb072daff58b8c0ef38c662600f92eee3292e60918d59b78\u001b[39m\n",
|
||||
"[2023-08-24 21:31:44,054] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"/usr/local/lib/python3.10/dist-packages/bitsandbytes/autograd/_functions.py:322: UserWarning: MatMul8bitLt: inputs will be cast from torch.bfloat16 to float16 during quantization\n",
|
||||
" warnings.warn(f\"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization\")\n",
|
||||
"[2023-08-24 21:31:45,414] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:31:45,415] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"[2023-08-24 21:31:45,415] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 0%| | 0/8 [00:00<?, ?it/s]\u001b[A[2023-08-24 21:31:46,811] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:31:46,811] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 25%|███████████▎ | 2/8 [00:01<00:04, 1.43it/s]\u001b[A[2023-08-24 21:31:48,235] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:31:48,235] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 38%|████████████████▉ | 3/8 [00:02<00:05, 1.00s/it]\u001b[A[2023-08-24 21:31:49,595] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:31:49,596] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 50%|██████████████████████▌ | 4/8 [00:04<00:04, 1.13s/it]\u001b[A[2023-08-24 21:31:50,994] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:31:50,994] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 62%|████████████████████████████▏ | 5/8 [00:05<00:03, 1.23s/it]\u001b[A[2023-08-24 21:31:52,376] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:31:52,377] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 75%|█████████████████████████████████▊ | 6/8 [00:06<00:02, 1.28s/it]\u001b[A[2023-08-24 21:31:53,798] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:31:53,799] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" 88%|███████████████████████████████████████▍ | 7/8 [00:08<00:01, 1.32s/it]\u001b[A[2023-08-24 21:31:55,183] [INFO] [accelerate.accelerator.log:60] [PID:125016] The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\n",
|
||||
"[2023-08-24 21:31:55,183] [INFO] [axolotl.utils.dataloader._len_est:262] [PID:125016] packing_efficiency_estimate: 0.98 total_num_tokens per device: 79978\u001b[39m\n",
|
||||
"\n",
|
||||
" \u001b[A\n",
|
||||
"\u001b[A{'eval_loss': 0.01321522518992424, 'eval_runtime': 11.1631, 'eval_samples_per_second': 20.245, 'eval_steps_per_second': 10.123, 'epoch': 4.76}\n",
|
||||
" 96%|█████████████████████████████████████▎ | 220/230 [1:10:59<03:08, 18.85s/it]\n",
|
||||
"100%|█████████████████████████████████████████████| 8/8 [00:09<00:00, 1.34s/it]\u001b[A\n",
|
||||
"{'loss': 0.013, 'learning_rate': 3.7703328167999485e-07, 'epoch': 4.78} \u001b[A\n",
|
||||
"{'loss': 0.0115, 'learning_rate': 2.6187898276813784e-07, 'epoch': 4.8} \n",
|
||||
"{'loss': 0.0091, 'learning_rate': 1.6762889938303217e-07, 'epoch': 4.82} \n",
|
||||
"{'loss': 0.0112, 'learning_rate': 9.430278549675819e-08, 'epoch': 4.84} \n",
|
||||
"{'loss': 0.0111, 'learning_rate': 4.191600960505859e-08, 'epoch': 4.86} \n",
|
||||
"{'loss': 0.0109, 'learning_rate': 1.0479551506259456e-08, 'epoch': 4.89} \n",
|
||||
"{'loss': 0.0148, 'learning_rate': 0.0, 'epoch': 4.91} \n",
|
||||
"{'loss': 0.0152, 'learning_rate': 1.0479551506270558e-08, 'epoch': 4.93} \n",
|
||||
"{'loss': 0.0119, 'learning_rate': 4.191600960505859e-08, 'epoch': 4.95} \n",
|
||||
"{'loss': 0.0092, 'learning_rate': 9.430278549675819e-08, 'epoch': 4.97} \n",
|
||||
"{'train_runtime': 4450.2443, 'train_samples_per_second': 4.803, 'train_steps_per_second': 0.052, 'train_loss': 0.08349336267084531, 'epoch': 4.97}\n",
|
||||
"100%|███████████████████████████████████████| 230/230 [1:14:07<00:00, 19.34s/it]\n",
|
||||
"[2023-08-24 21:35:04,013] [INFO] [axolotl.scripts.train:303] [PID:125016] Training Completed!!! Saving pre-trained model to ./models/run1\u001b[39m\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: Waiting for W&B process to finish... \u001b[32m(success).\u001b[0m\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: \n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: Run history:\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: eval/loss █▃▂▂▁▁▁▁▁▁▁\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: eval/runtime █▂▁▁▂▁▃▄▄▃▂\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: eval/samples_per_second ▁▇██▇█▆▅▅▆▇\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: eval/steps_per_second ▁▇██▇█▆▅▅▆▇\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: train/epoch ▁▁▁▂▂▂▂▂▂▃▃▃▃▃▄▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▇▇▇▇▇▇███\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: train/global_step ▁▁▁▂▂▂▂▂▂▃▃▃▃▃▄▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▇▇▇▇▇▇███\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: train/learning_rate ▂▅██████▇▇▇▇▇▆▆▆▆▅▅▅▅▄▄▄▃▃▃▃▂▂▂▂▂▁▁▁▁▁▁▁\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: train/loss ██▂▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: train/total_flos ▁\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: train/train_loss ▁\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: train/train_runtime ▁\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: train/train_samples_per_second ▁\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: train/train_steps_per_second ▁\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: \n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: Run summary:\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: eval/loss 0.01322\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: eval/runtime 11.1631\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: eval/samples_per_second 20.245\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: eval/steps_per_second 10.123\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: train/epoch 4.97\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: train/global_step 230\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: train/learning_rate 0.0\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: train/loss 0.0092\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: train/total_flos 2.966052920056873e+17\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: train/train_loss 0.08349\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: train/train_runtime 4450.2443\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: train/train_samples_per_second 4.803\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: train/train_steps_per_second 0.052\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: \n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: 🚀 View run \u001b[33mrun1\u001b[0m at: \u001b[34m\u001b[4mhttps://wandb.ai/openpipe-team/classify-recipes/runs/run1\u001b[0m\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: ️⚡ View job at \u001b[34m\u001b[4mhttps://wandb.ai/openpipe-team/classify-recipes/jobs/QXJ0aWZhY3RDb2xsZWN0aW9uOjkyNjYwODUw/version_details/v0\u001b[0m\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: Synced 5 W&B file(s), 0 media file(s), 2 artifact file(s) and 0 other file(s)\n",
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: Find logs at: \u001b[35m\u001b[1m./wandb/run-20230824_202055-run1/logs\u001b[0m\n",
|
||||
"Exception in thread NetStatThr:\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"/usr/lib/python3.10/threading.py\", line 1016, in _bootstrap_inner\n",
|
||||
" self.run()\n",
|
||||
" File \"/usr/lib/python3.10/threading.py\", line 953, in run\n",
|
||||
" self._target(*self._args, **self._kwargs)\n",
|
||||
" File \"/usr/local/lib/python3.10/dist-packages/wandb/sdk/wandb_run.py\", line 256, in check_network_status\n",
|
||||
" self._loop_check_status(\n",
|
||||
" File \"/usr/local/lib/python3.10/dist-packages/wandb/sdk/wandb_run.py\", line 212, in _loop_check_status\n",
|
||||
" local_handle = request()\n",
|
||||
" File \"/usr/local/lib/python3.10/dist-packages/wandb/sdk/interface/interface.py\", line 864, in deliver_network_status\n",
|
||||
" return self._deliver_network_status(status)\n",
|
||||
" File \"/usr/local/lib/python3.10/dist-packages/wandb/sdk/interface/interface_shared.py\", line 610, in _deliver_network_status\n",
|
||||
" return self._deliver_record(record)\n",
|
||||
" File \"/usr/local/lib/python3.10/dist-packages/wandb/sdk/interface/interface_shared.py\", line 569, in _deliver_record\n",
|
||||
" handle = mailbox._deliver_record(record, interface=self)\n",
|
||||
" File \"/usr/local/lib/python3.10/dist-packages/wandb/sdk/lib/mailbox.py\", line 455, in _deliver_record\n",
|
||||
" interface._publish(record)\n",
|
||||
" File \"/usr/local/lib/python3.10/dist-packages/wandb/sdk/interface/interface_sock.py\", line 51, in _publish\n",
|
||||
" self._sock_client.send_record_publish(record)\n",
|
||||
" File \"/usr/local/lib/python3.10/dist-packages/wandb/sdk/lib/sock_client.py\", line 221, in send_record_publish\n",
|
||||
" self.send_server_request(server_req)\n",
|
||||
" File \"/usr/local/lib/python3.10/dist-packages/wandb/sdk/lib/sock_client.py\", line 155, in send_server_request\n",
|
||||
" self._send_message(msg)\n",
|
||||
" File \"/usr/local/lib/python3.10/dist-packages/wandb/sdk/lib/sock_client.py\", line 152, in _send_message\n",
|
||||
" self._sendall_with_error_handle(header + data)\n",
|
||||
" File \"/usr/local/lib/python3.10/dist-packages/wandb/sdk/lib/sock_client.py\", line 130, in _sendall_with_error_handle\n",
|
||||
" sent = self._sock.send(data)\n",
|
||||
"BrokenPipeError: [Errno 32] Broken pipe\n",
|
||||
"\u001b[0m"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!accelerate launch ./axolotl/scripts/finetune.py training-config.yaml"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Sweet! I now have a new directory `./models/run1`. This contains my trained model, which I can use to classify more recipes.\n",
|
||||
"\n",
|
||||
"There's one more step though. I trained our model using [LoRA](https://huggingface.co/docs/peft/conceptual_guides/lora), which is a memory-efficient training method. But the inference library we'll use for testing doesn't support LoRA models directly yet, so we need to \"merge\" our LoRA model to transform it into a standard Llama2-shaped model. I've defined a small helper to do that called `merge_lora_model` that I'll use below.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Merging model (this could take a while)\n",
|
||||
"Loading base model\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "13b36646399a45eab184327f17165046",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Loading checkpoint shards: 0%| | 0/2 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Loading PEFT model\n",
|
||||
"Running merge_and_unload\n",
|
||||
"Model saved to ./models/run1/merged\n",
|
||||
"Final model saved to './models/run1/merged'\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from utils import merge_lora_model\n",
|
||||
"\n",
|
||||
"print(\"Merging model (this could take a while)\")\n",
|
||||
"final_model_dir = merge_lora_model(\"training-config.yaml\")\n",
|
||||
"print(f\"Final model saved to '{final_model_dir}'\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Ok, I have a model, but is it actually any good? I'll run some evaluations in [./evaluate.ipynb](./evaluate.ipynb) to check.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
905
examples/classify-recipes/3-evaluate.ipynb
Normal file
905
examples/classify-recipes/3-evaluate.ipynb
Normal file
@@ -0,0 +1,905 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"I have a model in `./models/run1/merged` that was trained on GPT-4's outputs to classify recipes. I need to figure out whether it does a good job at classifying recipes. I'll install dependencies first.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%capture\n",
|
||||
"%pip install vllm==0.1.3 pandas==2.0.3 joblib==1.3.2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Remember I got a \"test.jsonl\" file from OpenPipe back in [./prepare.ipynb](./prepare.ipynb)? That's data from our dataset that we didn't use in training, so we can use it to check our model's performance.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import pandas as pd\n",
|
||||
"\n",
|
||||
"test_data = pd.read_json(\"./data/test.jsonl\", lines=True)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"During the training process Axolotl transformed our data into an instruction/response format known as the \"Alpaca format\" based on [the project that introduced it](https://github.com/tatsu-lab/stanford_alpaca). I need to transform my test data into the same format for best results.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Sample prompt:\n",
|
||||
"--------------\n",
|
||||
"### Instruction:\n",
|
||||
"[{\"role\":\"system\",\"content\":\"Your goal is to classify a recipe along several dimensions.Pay attention to the instructions.\"},{\"role\":\"user\",\"content\":\"Pan Gravy\\n\\nIngredients:\\n- 1/3 cup all purpose flour\\n- 1/3 cup turkey drippings\\n- 3 cup water or broth\\n- 1/8 to 1/4 teaspoon salt\\n- 1/8 tsp pepper\\n\\nDirections:\\n- In a skillet or roasting pan, add flour to drippings; blend well.\\n- Cook over medium heat 2 to 3 minutes until smooth and light brown, stirring constantly.\\n- Add water; cook until mixture boils and thickens, stirring constantly.\\n- Stir in salt and pepper.\\n- *Flour and drippings can be decreased to 1/4 cup each for thinner gravy.\\n- *\"}]\n",
|
||||
"\n",
|
||||
"### Response:\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from axolotl.prompters import UnpromptedPrompter\n",
|
||||
"\n",
|
||||
"prompter = UnpromptedPrompter()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def format_prompt(input: str) -> str:\n",
|
||||
" return next(prompter.build_prompt(input))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"prompts = test_data[\"instruction\"].apply(format_prompt)\n",
|
||||
"\n",
|
||||
"print(f\"Sample prompt:\\n--------------\\n{prompts[0]}\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next up, I'll use [vLLM](https://vllm.readthedocs.io/en/latest/) to efficiently process all the prompts in our test data with our own model.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO 08-28 00:26:23 llm_engine.py:70] Initializing an LLM engine with config: model='./models/run1/merged', tokenizer='./models/run1/merged', tokenizer_mode=auto, trust_remote_code=False, dtype=torch.float16, use_dummy_weights=False, download_dir=None, use_np_weights=False, tensor_parallel_size=1, seed=0)\n",
|
||||
"INFO 08-28 00:27:26 llm_engine.py:196] # GPU blocks: 3419, # CPU blocks: 512\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Processed prompts: 100%|██████████| 500/500 [00:37<00:00, 13.34it/s]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Sample output:\n",
|
||||
"--------------\n",
|
||||
"{\"role\":\"assistant\",\"content\":null,\"function_call\":{\"name\":\"classify\",\"arguments\":\"{\\n\\\"has_non_fish_meat\\\": true,\\n\\\"requires_oven\\\": false,\\n\\\"requires_stove\\\": true,\\n\\\"cook_time_over_30_mins\\\": false,\\n\\\"main_dish\\\": false\\n}\"}}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from vllm import LLM, SamplingParams\n",
|
||||
"\n",
|
||||
"llm = LLM(model=\"./models/run1/merged\", max_num_batched_tokens=4096)\n",
|
||||
"\n",
|
||||
"sampling_params = SamplingParams(\n",
|
||||
" # 120 should be fine for the work we're doing here.\n",
|
||||
" max_tokens=120,\n",
|
||||
" # This is a deterministic task so temperature=0 is best.\n",
|
||||
" temperature=0,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"my_outputs = llm.generate(prompts, sampling_params=sampling_params)\n",
|
||||
"my_outputs = [o.outputs[0].text for o in my_outputs]\n",
|
||||
"\n",
|
||||
"test_data[\"my_outputs\"] = my_outputs\n",
|
||||
"\n",
|
||||
"print(f\"Sample output:\\n--------------\\n{my_outputs[0]}\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Ok, we have our outputs! There are 5 categories we classify each recipe on, so let's check what percentage of the time our model's output matches GPT-4's. I'll write a quick eval function for that:\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 32,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Overall accuracy: 0.95\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def parse_fn_call(str):\n",
|
||||
" \"\"\"Parse the function call arguments from the response\"\"\"\n",
|
||||
" response_dict = json.loads(str)\n",
|
||||
" args_dict = json.loads(response_dict[\"function_call\"][\"arguments\"])\n",
|
||||
"\n",
|
||||
" return args_dict\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"test_data[\"output_parsed\"] = test_data[\"output\"].apply(parse_fn_call)\n",
|
||||
"test_data[\"my_outputs_parsed\"] = test_data[\"my_outputs\"].apply(parse_fn_call)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def calculate_accuracy(row, labels_col):\n",
|
||||
" \"\"\"Calculate the fraction of my model's outputs that match the reference outputs\"\"\"\n",
|
||||
" true_outputs = row[\"output_parsed\"]\n",
|
||||
" labels_outputs = row[labels_col]\n",
|
||||
"\n",
|
||||
" # print(f\"true_outputs: {true_outputs}\")\n",
|
||||
" # print(f\"my_outputs: {row[labels_col]}\")\n",
|
||||
"\n",
|
||||
" num_matching_outputs = 0\n",
|
||||
" for key in true_outputs.keys():\n",
|
||||
" if key in labels_outputs and true_outputs[key] == labels_outputs[key]:\n",
|
||||
" num_matching_outputs += 1\n",
|
||||
"\n",
|
||||
" return num_matching_outputs / len(true_outputs)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"test_data[\"accuracy\"] = test_data.apply(\n",
|
||||
" calculate_accuracy, axis=1, labels_col=\"my_outputs_parsed\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(f\"Overall accuracy: {test_data['accuracy'].mean():.2f}\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"95% seems good! However, we don't have much to compare it to. Let's see how GPT-3.5 would do on the same task as a baseline. We'll use the same prompt we used with GPT-4 to generate the labels.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Sample recipe:\n",
|
||||
"--------------\n",
|
||||
"Pan Gravy\n",
|
||||
"\n",
|
||||
"Ingredients:\n",
|
||||
"- 1/3 cup all purpose flour\n",
|
||||
"- 1/3 cup turkey drippings\n",
|
||||
"- 3 cup water or broth\n",
|
||||
"- 1/8 to 1/4 teaspoon salt\n",
|
||||
"- 1/8 tsp pepper\n",
|
||||
"\n",
|
||||
"Directions:\n",
|
||||
"- In a skillet or roasting pan, add flour to drippings; blend well.\n",
|
||||
"- Cook over medium heat 2 to 3 minutes until smooth and light brown, stirring constantly.\n",
|
||||
"- Add water; cook until mixture boils and thickens, stirring constantly.\n",
|
||||
"- Stir in salt and pepper.\n",
|
||||
"- *Flour and drippings can be decreased to 1/4 cup each for thinner gravy.\n",
|
||||
"- *\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def extract_recipe(row):\n",
|
||||
" \"\"\"Extract the recipe from the instruction\"\"\"\n",
|
||||
" return json.loads(row[\"instruction\"])[1][\"content\"]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"recipes = test_data.apply(extract_recipe, axis=1)\n",
|
||||
"print(f\"Sample recipe:\\n--------------\\n{recipes[0]}\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Classifying first recipe:\n",
|
||||
"------------------\n",
|
||||
"{'has_non_fish_meat': False, 'requires_oven': False, 'requires_stove': True, 'cook_time_over_30_mins': False, 'main_dish': False}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import joblib\n",
|
||||
"import openai\n",
|
||||
"import os\n",
|
||||
"import dotenv\n",
|
||||
"\n",
|
||||
"dotenv.load_dotenv()\n",
|
||||
"openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n",
|
||||
"\n",
|
||||
"memory = joblib.Memory(\"./cache\", verbose=0)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@memory.cache\n",
|
||||
"def classify_recipe_35(recipe: str):\n",
|
||||
" completion = openai.ChatCompletion.create(\n",
|
||||
" model=\"gpt-3.5-turbo\",\n",
|
||||
" messages=[\n",
|
||||
" {\n",
|
||||
" \"role\": \"system\",\n",
|
||||
" \"content\": \"Your goal is to classify a recipe along several dimensions.Pay attention to the instructions.\",\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": recipe,\n",
|
||||
" },\n",
|
||||
" ],\n",
|
||||
" functions=[\n",
|
||||
" {\n",
|
||||
" \"name\": \"classify\",\n",
|
||||
" \"parameters\": {\n",
|
||||
" \"type\": \"object\",\n",
|
||||
" \"properties\": {\n",
|
||||
" \"has_non_fish_meat\": {\n",
|
||||
" \"type\": \"boolean\",\n",
|
||||
" \"description\": \"True if the recipe contains any meat or meat products (eg. chicken broth) besides fish\",\n",
|
||||
" },\n",
|
||||
" \"requires_oven\": {\n",
|
||||
" \"type\": \"boolean\",\n",
|
||||
" \"description\": \"True if the recipe requires an oven\",\n",
|
||||
" },\n",
|
||||
" \"requires_stove\": {\n",
|
||||
" \"type\": \"boolean\",\n",
|
||||
" \"description\": \"True if the recipe requires a stove\",\n",
|
||||
" },\n",
|
||||
" \"cook_time_over_30_mins\": {\n",
|
||||
" \"type\": \"boolean\",\n",
|
||||
" \"description\": \"True if the recipe takes over 30 minutes to prepare and cook, including waiting time\",\n",
|
||||
" },\n",
|
||||
" \"main_dish\": {\n",
|
||||
" \"type\": \"boolean\",\n",
|
||||
" \"description\": \"True if the recipe can be served as a main dish\",\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" \"required\": [\n",
|
||||
" \"has_non_fish_meat\",\n",
|
||||
" \"requires_oven\",\n",
|
||||
" \"requires_stove\",\n",
|
||||
" \"cook_time_over_30_mins\",\n",
|
||||
" \"main_dish\",\n",
|
||||
" ],\n",
|
||||
" },\n",
|
||||
" }\n",
|
||||
" ],\n",
|
||||
" function_call={\n",
|
||||
" \"name\": \"classify\",\n",
|
||||
" },\n",
|
||||
" )\n",
|
||||
" return json.loads(completion.choices[0].message.function_call.arguments)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"print(\"Classifying first recipe:\\n------------------\")\n",
|
||||
"print(classify_recipe_35(recipes[0]))\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\u001b[A\n",
|
||||
"\u001b[A\n",
|
||||
"\u001b[A\n",
|
||||
"\u001b[A\n",
|
||||
"\u001b[A\n",
|
||||
"\u001b[A\n",
|
||||
"\u001b[A\n",
|
||||
"\u001b[A\n",
|
||||
"\u001b[A\n",
|
||||
"\u001b[A\n",
|
||||
"\u001b[A\n",
|
||||
"\u001b[A\n",
|
||||
"\u001b[A\n",
|
||||
"\u001b[A\n",
|
||||
"\u001b[A\n",
|
||||
"100%|██████████| 500/500 [00:31<00:00, 15.77it/s]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from tqdm import tqdm\n",
|
||||
"\n",
|
||||
"test_data[\"gpt_3.5\"] = [classify_recipe_35(r) for r in tqdm(recipes)]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 33,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"GPT-3.5 accuracy: 0.91\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"test_data[\"gpt_3.5_accuracy\"] = test_data.apply(\n",
|
||||
" calculate_accuracy, axis=1, labels_col=\"gpt_3.5\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(f\"GPT-3.5 accuracy: {test_data['gpt_3.5_accuracy'].mean():.2f}\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And for completeness, let's try a fine-tuned GPT-3.5 model. You can find the fine-tuning code in [finetune-gpt-3.5.ipynb](./finetune-gpt-3.5.ipynb)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 34,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'has_non_fish_meat': True,\n",
|
||||
" 'requires_oven': False,\n",
|
||||
" 'requires_stove': True,\n",
|
||||
" 'cook_time_over_30_mins': False,\n",
|
||||
" 'main_dish': False}"
|
||||
]
|
||||
},
|
||||
"execution_count": 34,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"@memory.cache\n",
|
||||
"def classify_recipe_35_ft(recipe: str):\n",
|
||||
" completion = openai.ChatCompletion.create(\n",
|
||||
" model=\"ft:gpt-3.5-turbo-0613:openpipe::7rZpPqYn\",\n",
|
||||
" messages=[\n",
|
||||
" {\n",
|
||||
" \"role\": \"system\",\n",
|
||||
" \"content\": \"Your goal is to classify a recipe along several \"\n",
|
||||
" \"dimensions.Pay attention to the instructions.\",\n",
|
||||
" },\n",
|
||||
" {\"role\": \"user\", \"content\": recipe},\n",
|
||||
" ],\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" return json.loads(completion.choices[0].message.content)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"classify_recipe_35_ft(recipes[0])\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 37,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|██████████| 500/500 [07:31<00:00, 1.11it/s]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"test_data[\"gpt_3.5_ft\"] = [classify_recipe_35_ft(r) for r in tqdm(recipes)]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 39,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"GPT-3.5 FT accuracy: 0.94\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"test_data[\"gpt_3.5_ft_accuracy\"] = test_data.apply(\n",
|
||||
" calculate_accuracy, axis=1, labels_col=\"gpt_3.5_ft\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(f\"GPT-3.5 FT accuracy: {test_data['gpt_3.5_ft_accuracy'].mean():.2f}\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Not bad! However, there are still a few rows where the model outputs don't match. Let's take a closer look.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Alligator Sauce Piquant\n",
|
||||
"\n",
|
||||
"Ingredients:\n",
|
||||
"- 2 lb. alligator, boneless and cubed *\n",
|
||||
"- 4 onions, diced\n",
|
||||
"- 1 c. parsley, chopped\n",
|
||||
"- 4 stalks celery, chopped\n",
|
||||
"- 1 bell pepper, diced\n",
|
||||
"- 1 c. catsup\n",
|
||||
"- 2 Tbsp. Heinz steak sauce\n",
|
||||
"- 2 Tbsp. soy sauce\n",
|
||||
"- 2 Tbsp. Louisiana hot sauce\n",
|
||||
"- 2 Tbsp. cornstarch\n",
|
||||
"- 1 tsp. salt\n",
|
||||
"- 2 tsp. red pepper (ground)\n",
|
||||
"- 1/4 c. cooking oil\n",
|
||||
"\n",
|
||||
"Directions:\n",
|
||||
"- *Alligator must be free of all fat; also dark meat is the best (leg and body meat), boneless.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"<div>\n",
|
||||
"<style scoped>\n",
|
||||
" .dataframe tbody tr th:only-of-type {\n",
|
||||
" vertical-align: middle;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe tbody tr th {\n",
|
||||
" vertical-align: top;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe thead th {\n",
|
||||
" text-align: right;\n",
|
||||
" }\n",
|
||||
"</style>\n",
|
||||
"<table border=\"1\" class=\"dataframe\">\n",
|
||||
" <thead>\n",
|
||||
" <tr style=\"text-align: right;\">\n",
|
||||
" <th></th>\n",
|
||||
" <th>GPT-4</th>\n",
|
||||
" <th>My model</th>\n",
|
||||
" </tr>\n",
|
||||
" </thead>\n",
|
||||
" <tbody>\n",
|
||||
" <tr>\n",
|
||||
" <th>cook_time_over_30_mins</th>\n",
|
||||
" <td>True</td>\n",
|
||||
" <td>False</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>main_dish</th>\n",
|
||||
" <td>True</td>\n",
|
||||
" <td>False</td>\n",
|
||||
" </tr>\n",
|
||||
" </tbody>\n",
|
||||
"</table>\n",
|
||||
"</div>"
|
||||
],
|
||||
"text/plain": [
|
||||
" GPT-4 My model\n",
|
||||
"cook_time_over_30_mins True False\n",
|
||||
"main_dish True False"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Veggie Casserole\n",
|
||||
"\n",
|
||||
"Ingredients:\n",
|
||||
"- 1 (8 oz.) bag mixed veggies (corn, peas, carrots, green beans), steamed\n",
|
||||
"- 1 c. celery\n",
|
||||
"- 1 c. onions\n",
|
||||
"- 1 c. Cheddar cheese\n",
|
||||
"- 1 c. mayonnaise\n",
|
||||
"\n",
|
||||
"Directions:\n",
|
||||
"- Mix above ingredients.\n",
|
||||
"- Bake at 350° for 30 minutes, until bubbly.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"<div>\n",
|
||||
"<style scoped>\n",
|
||||
" .dataframe tbody tr th:only-of-type {\n",
|
||||
" vertical-align: middle;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe tbody tr th {\n",
|
||||
" vertical-align: top;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe thead th {\n",
|
||||
" text-align: right;\n",
|
||||
" }\n",
|
||||
"</style>\n",
|
||||
"<table border=\"1\" class=\"dataframe\">\n",
|
||||
" <thead>\n",
|
||||
" <tr style=\"text-align: right;\">\n",
|
||||
" <th></th>\n",
|
||||
" <th>GPT-4</th>\n",
|
||||
" <th>My model</th>\n",
|
||||
" </tr>\n",
|
||||
" </thead>\n",
|
||||
" <tbody>\n",
|
||||
" <tr>\n",
|
||||
" <th>main_dish</th>\n",
|
||||
" <td>False</td>\n",
|
||||
" <td>True</td>\n",
|
||||
" </tr>\n",
|
||||
" </tbody>\n",
|
||||
"</table>\n",
|
||||
"</div>"
|
||||
],
|
||||
"text/plain": [
|
||||
" GPT-4 My model\n",
|
||||
"main_dish False True"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Rhonda'S Butter Chess Pie\n",
|
||||
"\n",
|
||||
"Ingredients:\n",
|
||||
"- 5 eggs\n",
|
||||
"- 1 stick melted butter\n",
|
||||
"- 2 c. sugar\n",
|
||||
"- 1 tsp. vanilla\n",
|
||||
"- 1 Tbsp. cornstarch\n",
|
||||
"- 1/2 c. buttermilk\n",
|
||||
"- unbaked 9-inch deep dish pie shell\n",
|
||||
"\n",
|
||||
"Directions:\n",
|
||||
"- Mix eggs with sugar and cornstarch until smooth.\n",
|
||||
"- Add melted butter, vanilla and buttermilk.\n",
|
||||
"- Bake at 350° for 30 minutes or until done.\n",
|
||||
"- Let cool and chill.\n",
|
||||
"- Similar to Furr's Butter Chess Pie.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"<div>\n",
|
||||
"<style scoped>\n",
|
||||
" .dataframe tbody tr th:only-of-type {\n",
|
||||
" vertical-align: middle;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe tbody tr th {\n",
|
||||
" vertical-align: top;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe thead th {\n",
|
||||
" text-align: right;\n",
|
||||
" }\n",
|
||||
"</style>\n",
|
||||
"<table border=\"1\" class=\"dataframe\">\n",
|
||||
" <thead>\n",
|
||||
" <tr style=\"text-align: right;\">\n",
|
||||
" <th></th>\n",
|
||||
" <th>GPT-4</th>\n",
|
||||
" <th>My model</th>\n",
|
||||
" </tr>\n",
|
||||
" </thead>\n",
|
||||
" <tbody>\n",
|
||||
" <tr>\n",
|
||||
" <th>cook_time_over_30_mins</th>\n",
|
||||
" <td>False</td>\n",
|
||||
" <td>True</td>\n",
|
||||
" </tr>\n",
|
||||
" </tbody>\n",
|
||||
"</table>\n",
|
||||
"</div>"
|
||||
],
|
||||
"text/plain": [
|
||||
" GPT-4 My model\n",
|
||||
"cook_time_over_30_mins False True"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Broccoli Gorgonzola Cream Soup\n",
|
||||
"\n",
|
||||
"Ingredients:\n",
|
||||
"- 2 heads Broccoli\n",
|
||||
"- 700 milliliters Water\n",
|
||||
"- 1 Onion, Peeled And Cut Into Chunks\n",
|
||||
"- 1 pinch Salt\n",
|
||||
"- 1 teaspoon Oregano\n",
|
||||
"- 1 Potato, Peeled And Cut Into Chunks\n",
|
||||
"- 200 grams Crumbled Gorgonzola\n",
|
||||
"- 1 Tablespoon Finely Grated Parmesan\n",
|
||||
"\n",
|
||||
"Directions:\n",
|
||||
"- Cut off the hard trunks of the broccoli and cut it into small pieces. Prepare a pot with water, add broccoli, onion, salt and oregano and boil for about 30 minutes.\n",
|
||||
"- Add the peeled potato and boil for another 20 minutes. When vegetables are cooked, strain and save the stock.\n",
|
||||
"- Using a hand blender, puree vegetables, adding as much stock as desired. Bring soup back to heat over low heat, and sir in gorgonzola. Remove from heat and add Parmesan.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"<div>\n",
|
||||
"<style scoped>\n",
|
||||
" .dataframe tbody tr th:only-of-type {\n",
|
||||
" vertical-align: middle;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe tbody tr th {\n",
|
||||
" vertical-align: top;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe thead th {\n",
|
||||
" text-align: right;\n",
|
||||
" }\n",
|
||||
"</style>\n",
|
||||
"<table border=\"1\" class=\"dataframe\">\n",
|
||||
" <thead>\n",
|
||||
" <tr style=\"text-align: right;\">\n",
|
||||
" <th></th>\n",
|
||||
" <th>GPT-4</th>\n",
|
||||
" <th>My model</th>\n",
|
||||
" </tr>\n",
|
||||
" </thead>\n",
|
||||
" <tbody>\n",
|
||||
" <tr>\n",
|
||||
" <th>main_dish</th>\n",
|
||||
" <td>False</td>\n",
|
||||
" <td>True</td>\n",
|
||||
" </tr>\n",
|
||||
" </tbody>\n",
|
||||
"</table>\n",
|
||||
"</div>"
|
||||
],
|
||||
"text/plain": [
|
||||
" GPT-4 My model\n",
|
||||
"main_dish False True"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Wild Rice With Cucumber And Feta\n",
|
||||
"\n",
|
||||
"Ingredients:\n",
|
||||
"- 1 (8.5-ounce) package precooked wild rice (such as Archer Farms)\n",
|
||||
"- 1 cup diced English cucumber\n",
|
||||
"- 1 1/2 tablespoons olive oil\n",
|
||||
"- 1 tablespoon fresh lemon juice\n",
|
||||
"- 2 ounces crumbled feta cheese\n",
|
||||
"- 1/2 teaspoon pepper\n",
|
||||
"- 1/4 teaspoon salt\n",
|
||||
"\n",
|
||||
"Directions:\n",
|
||||
"- Prepare rice according to the package directions.\n",
|
||||
"- Combine cooked rice, cucumber, olive oil, lemon juice, and crumbled feta cheese in a medium bowl; toss to coat. Stir in pepper and salt.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"<div>\n",
|
||||
"<style scoped>\n",
|
||||
" .dataframe tbody tr th:only-of-type {\n",
|
||||
" vertical-align: middle;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe tbody tr th {\n",
|
||||
" vertical-align: top;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe thead th {\n",
|
||||
" text-align: right;\n",
|
||||
" }\n",
|
||||
"</style>\n",
|
||||
"<table border=\"1\" class=\"dataframe\">\n",
|
||||
" <thead>\n",
|
||||
" <tr style=\"text-align: right;\">\n",
|
||||
" <th></th>\n",
|
||||
" <th>GPT-4</th>\n",
|
||||
" <th>My model</th>\n",
|
||||
" </tr>\n",
|
||||
" </thead>\n",
|
||||
" <tbody>\n",
|
||||
" <tr>\n",
|
||||
" <th>main_dish</th>\n",
|
||||
" <td>True</td>\n",
|
||||
" <td>False</td>\n",
|
||||
" </tr>\n",
|
||||
" </tbody>\n",
|
||||
"</table>\n",
|
||||
"</div>"
|
||||
],
|
||||
"text/plain": [
|
||||
" GPT-4 My model\n",
|
||||
"main_dish True False"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"\n",
|
||||
"np.random.seed(42)\n",
|
||||
"\n",
|
||||
"for row in test_data[test_data.accuracy < 1].sample(5).itertuples():\n",
|
||||
" print(json.loads(row.instruction)[1][\"content\"])\n",
|
||||
"\n",
|
||||
" gpt4_output = parse_fn_call(row.output)\n",
|
||||
" my_output = parse_fn_call(row.my_outputs)\n",
|
||||
"\n",
|
||||
" table = pd.DataFrame(\n",
|
||||
" {\n",
|
||||
" \"GPT-4\": gpt4_output,\n",
|
||||
" \"My model\": my_output,\n",
|
||||
" }\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" table = table[table[\"GPT-4\"] != table[\"My model\"]]\n",
|
||||
" display(table)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Looking at the outputs, it's clear that our model still makes some mistakes. But at the same time, there are plenty of examples like \"Rhonda's Butter Chess Pie\" where our model gets it right, even though GPT-4 got it wrong! And there are also cases like the \"Veggie Casserole\", where the \"right\" answer is truly ambiguous and really both answers are defensible.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"A realistic point of comparison here might be GPT-3.5. Let's try to classify the same set of recipes using GPT-3.5 and see how it does. We'll use the same prompt that we used with GPT-4 to generate the initial training data.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Interested in cost/latency benchmarking? You can check out [./benchmarking.ipynb](./benchmarking.ipynb) for an overview of my findings!\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
349
examples/classify-recipes/4-benchmark.ipynb
Normal file
349
examples/classify-recipes/4-benchmark.ipynb
Normal file
@@ -0,0 +1,349 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"I'm pretty happy with my model's accuracy relative to GPT-4. How does it compare cost-wise?\n",
|
||||
"\n",
|
||||
"I'll really push this to its limits -- let's see how quickly our poor model can classify the [full 2-million-recipe dataset](https://huggingface.co/datasets/corbt/all-recipes) 😈.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%capture\n",
|
||||
"%pip install datasets==2.14.4 vllm==0.1.3"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Number of recipes: 2,147,248\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from datasets import load_dataset\n",
|
||||
"\n",
|
||||
"all_recipes = load_dataset(\"corbt/all-recipes\")[\"train\"][\"input\"]\n",
|
||||
"\n",
|
||||
"print(f\"Number of recipes: {len(all_recipes):,}\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO 08-24 19:38:29 llm_engine.py:70] Initializing an LLM engine with config: model='./models/run1/merged', tokenizer='./models/run1/merged', tokenizer_mode=auto, trust_remote_code=False, dtype=torch.float16, use_dummy_weights=False, download_dir=None, use_np_weights=False, tensor_parallel_size=1, seed=0)\n",
|
||||
"INFO 08-24 19:39:48 llm_engine.py:196] # GPU blocks: 3419, # CPU blocks: 512\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from vllm import LLM, SamplingParams\n",
|
||||
"\n",
|
||||
"llm = LLM(model=\"./models/run1/merged\", max_num_batched_tokens=4096)\n",
|
||||
"\n",
|
||||
"sampling_params = SamplingParams(\n",
|
||||
" # 120 should be fine for the work we're doing here.\n",
|
||||
" max_tokens=120,\n",
|
||||
" # This is a deterministic task so temperature=0 is best.\n",
|
||||
" temperature=0,\n",
|
||||
")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Start time: 1692906050.3340027\n",
|
||||
"Processing recipes 0 to 10,000...\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Processed prompts: 100%|██████████| 10000/10000 [04:51<00:00, 34.30it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Processing recipes 10,000 to 20,000...\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Processed prompts: 100%|██████████| 10000/10000 [04:54<00:00, 33.98it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Processing recipes 20,000 to 30,000...\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Processed prompts: 100%|██████████| 10000/10000 [04:53<00:00, 34.11it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Processing recipes 30,000 to 40,000...\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Processed prompts: 100%|██████████| 10000/10000 [04:53<00:00, 34.11it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Processing recipes 40,000 to 50,000...\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Processed prompts: 48%|████▊ | 4796/10000 [02:21<03:18, 26.22it/s]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"ename": "KeyboardInterrupt",
|
||||
"evalue": "",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
|
||||
"Cell \u001b[0;32mIn[6], line 12\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[39mfor\u001b[39;00m i \u001b[39min\u001b[39;00m \u001b[39mrange\u001b[39m(\u001b[39m0\u001b[39m, \u001b[39mlen\u001b[39m(all_recipes), BATCH_SIZE):\n\u001b[1;32m 11\u001b[0m \u001b[39mprint\u001b[39m(\u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mProcessing recipes \u001b[39m\u001b[39m{\u001b[39;00mi\u001b[39m:\u001b[39;00m\u001b[39m,\u001b[39m\u001b[39m}\u001b[39;00m\u001b[39m to \u001b[39m\u001b[39m{\u001b[39;00mi\u001b[39m+\u001b[39mBATCH_SIZE\u001b[39m:\u001b[39;00m\u001b[39m,\u001b[39m\u001b[39m}\u001b[39;00m\u001b[39m...\u001b[39m\u001b[39m\"\u001b[39m)\n\u001b[0;32m---> 12\u001b[0m outputs \u001b[39m=\u001b[39m llm\u001b[39m.\u001b[39;49mgenerate(all_recipes[i:i\u001b[39m+\u001b[39;49mBATCH_SIZE], sampling_params\u001b[39m=\u001b[39;49msampling_params)\n\u001b[1;32m 14\u001b[0m all_outputs\u001b[39m.\u001b[39mextend([o\u001b[39m.\u001b[39moutputs[\u001b[39m0\u001b[39m]\u001b[39m.\u001b[39mtext \u001b[39mfor\u001b[39;00m o \u001b[39min\u001b[39;00m outputs])\n\u001b[1;32m 16\u001b[0m end_time \u001b[39m=\u001b[39m time\u001b[39m.\u001b[39mtime()\n",
|
||||
"File \u001b[0;32m/usr/local/lib/python3.10/dist-packages/vllm/entrypoints/llm.py:130\u001b[0m, in \u001b[0;36mLLM.generate\u001b[0;34m(self, prompts, sampling_params, prompt_token_ids, use_tqdm)\u001b[0m\n\u001b[1;32m 128\u001b[0m token_ids \u001b[39m=\u001b[39m prompt_token_ids[i]\n\u001b[1;32m 129\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_add_request(prompt, sampling_params, token_ids)\n\u001b[0;32m--> 130\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_run_engine(use_tqdm)\n",
|
||||
"File \u001b[0;32m/usr/local/lib/python3.10/dist-packages/vllm/entrypoints/llm.py:150\u001b[0m, in \u001b[0;36mLLM._run_engine\u001b[0;34m(self, use_tqdm)\u001b[0m\n\u001b[1;32m 148\u001b[0m outputs: List[RequestOutput] \u001b[39m=\u001b[39m []\n\u001b[1;32m 149\u001b[0m \u001b[39mwhile\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mllm_engine\u001b[39m.\u001b[39mhas_unfinished_requests():\n\u001b[0;32m--> 150\u001b[0m step_outputs \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mllm_engine\u001b[39m.\u001b[39;49mstep()\n\u001b[1;32m 151\u001b[0m \u001b[39mfor\u001b[39;00m output \u001b[39min\u001b[39;00m step_outputs:\n\u001b[1;32m 152\u001b[0m \u001b[39mif\u001b[39;00m output\u001b[39m.\u001b[39mfinished:\n",
|
||||
"File \u001b[0;32m/usr/local/lib/python3.10/dist-packages/vllm/engine/llm_engine.py:313\u001b[0m, in \u001b[0;36mLLMEngine.step\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 307\u001b[0m \u001b[39mreturn\u001b[39;00m [\n\u001b[1;32m 308\u001b[0m RequestOutput\u001b[39m.\u001b[39mfrom_seq_group(seq_group)\n\u001b[1;32m 309\u001b[0m \u001b[39mfor\u001b[39;00m seq_group \u001b[39min\u001b[39;00m scheduler_outputs\u001b[39m.\u001b[39mignored_seq_groups\n\u001b[1;32m 310\u001b[0m ]\n\u001b[1;32m 312\u001b[0m \u001b[39m# Execute the model.\u001b[39;00m\n\u001b[0;32m--> 313\u001b[0m output \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_run_workers(\n\u001b[1;32m 314\u001b[0m \u001b[39m\"\u001b[39;49m\u001b[39mexecute_model\u001b[39;49m\u001b[39m\"\u001b[39;49m,\n\u001b[1;32m 315\u001b[0m seq_group_metadata_list\u001b[39m=\u001b[39;49mseq_group_metadata_list,\n\u001b[1;32m 316\u001b[0m blocks_to_swap_in\u001b[39m=\u001b[39;49mscheduler_outputs\u001b[39m.\u001b[39;49mblocks_to_swap_in,\n\u001b[1;32m 317\u001b[0m blocks_to_swap_out\u001b[39m=\u001b[39;49mscheduler_outputs\u001b[39m.\u001b[39;49mblocks_to_swap_out,\n\u001b[1;32m 318\u001b[0m blocks_to_copy\u001b[39m=\u001b[39;49mscheduler_outputs\u001b[39m.\u001b[39;49mblocks_to_copy,\n\u001b[1;32m 319\u001b[0m )\n\u001b[1;32m 320\u001b[0m \u001b[39m# Update the scheduler with the model outputs.\u001b[39;00m\n\u001b[1;32m 321\u001b[0m seq_groups \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mscheduler\u001b[39m.\u001b[39mupdate(output)\n",
|
||||
"File \u001b[0;32m/usr/local/lib/python3.10/dist-packages/vllm/engine/llm_engine.py:470\u001b[0m, in \u001b[0;36mLLMEngine._run_workers\u001b[0;34m(self, method, get_all_outputs, *args, **kwargs)\u001b[0m\n\u001b[1;32m 467\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[1;32m 468\u001b[0m executor \u001b[39m=\u001b[39m \u001b[39mgetattr\u001b[39m(worker, method)\n\u001b[0;32m--> 470\u001b[0m output \u001b[39m=\u001b[39m executor(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\u001b[1;32m 471\u001b[0m all_outputs\u001b[39m.\u001b[39mappend(output)\n\u001b[1;32m 473\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mparallel_config\u001b[39m.\u001b[39mworker_use_ray:\n",
|
||||
"File \u001b[0;32m/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py:115\u001b[0m, in \u001b[0;36mcontext_decorator.<locals>.decorate_context\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 112\u001b[0m \u001b[39m@functools\u001b[39m\u001b[39m.\u001b[39mwraps(func)\n\u001b[1;32m 113\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mdecorate_context\u001b[39m(\u001b[39m*\u001b[39margs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs):\n\u001b[1;32m 114\u001b[0m \u001b[39mwith\u001b[39;00m ctx_factory():\n\u001b[0;32m--> 115\u001b[0m \u001b[39mreturn\u001b[39;00m func(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n",
|
||||
"File \u001b[0;32m/usr/local/lib/python3.10/dist-packages/vllm/worker/worker.py:293\u001b[0m, in \u001b[0;36mWorker.execute_model\u001b[0;34m(self, seq_group_metadata_list, blocks_to_swap_in, blocks_to_swap_out, blocks_to_copy)\u001b[0m\n\u001b[1;32m 289\u001b[0m input_tokens, input_positions, input_metadata \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_prepare_inputs(\n\u001b[1;32m 290\u001b[0m seq_group_metadata_list)\n\u001b[1;32m 292\u001b[0m \u001b[39m# Execute the model.\u001b[39;00m\n\u001b[0;32m--> 293\u001b[0m output \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mmodel(\n\u001b[1;32m 294\u001b[0m input_ids\u001b[39m=\u001b[39;49minput_tokens,\n\u001b[1;32m 295\u001b[0m positions\u001b[39m=\u001b[39;49minput_positions,\n\u001b[1;32m 296\u001b[0m kv_caches\u001b[39m=\u001b[39;49m\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mgpu_cache,\n\u001b[1;32m 297\u001b[0m input_metadata\u001b[39m=\u001b[39;49minput_metadata,\n\u001b[1;32m 298\u001b[0m cache_events\u001b[39m=\u001b[39;49mcache_events,\n\u001b[1;32m 299\u001b[0m )\n\u001b[1;32m 300\u001b[0m \u001b[39mreturn\u001b[39;00m output\n",
|
||||
"File \u001b[0;32m/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py:1501\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1496\u001b[0m \u001b[39m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1497\u001b[0m \u001b[39m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1498\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m (\u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_backward_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_backward_pre_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_forward_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1499\u001b[0m \u001b[39mor\u001b[39;00m _global_backward_pre_hooks \u001b[39mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1500\u001b[0m \u001b[39mor\u001b[39;00m _global_forward_hooks \u001b[39mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1501\u001b[0m \u001b[39mreturn\u001b[39;00m forward_call(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\u001b[1;32m 1502\u001b[0m \u001b[39m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m 1503\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[39m=\u001b[39m [], []\n",
|
||||
"File \u001b[0;32m/usr/local/lib/python3.10/dist-packages/vllm/model_executor/models/llama.py:255\u001b[0m, in \u001b[0;36mLlamaForCausalLM.forward\u001b[0;34m(self, input_ids, positions, kv_caches, input_metadata, cache_events)\u001b[0m\n\u001b[1;32m 245\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mforward\u001b[39m(\n\u001b[1;32m 246\u001b[0m \u001b[39mself\u001b[39m,\n\u001b[1;32m 247\u001b[0m input_ids: torch\u001b[39m.\u001b[39mTensor,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 251\u001b[0m cache_events: Optional[List[torch\u001b[39m.\u001b[39mcuda\u001b[39m.\u001b[39mEvent]],\n\u001b[1;32m 252\u001b[0m ) \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m Dict[\u001b[39mint\u001b[39m, SequenceOutputs]:\n\u001b[1;32m 253\u001b[0m hidden_states \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mmodel(input_ids, positions, kv_caches,\n\u001b[1;32m 254\u001b[0m input_metadata, cache_events)\n\u001b[0;32m--> 255\u001b[0m next_tokens \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49msampler(\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mlm_head\u001b[39m.\u001b[39;49mweight, hidden_states,\n\u001b[1;32m 256\u001b[0m input_metadata)\n\u001b[1;32m 257\u001b[0m \u001b[39mreturn\u001b[39;00m next_tokens\n",
|
||||
"File \u001b[0;32m/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py:1501\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1496\u001b[0m \u001b[39m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1497\u001b[0m \u001b[39m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1498\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m (\u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_backward_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_backward_pre_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_forward_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1499\u001b[0m \u001b[39mor\u001b[39;00m _global_backward_pre_hooks \u001b[39mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1500\u001b[0m \u001b[39mor\u001b[39;00m _global_forward_hooks \u001b[39mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1501\u001b[0m \u001b[39mreturn\u001b[39;00m forward_call(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\u001b[1;32m 1502\u001b[0m \u001b[39m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m 1503\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[39m=\u001b[39m [], []\n",
|
||||
"File \u001b[0;32m/usr/local/lib/python3.10/dist-packages/vllm/model_executor/layers/sampler.py:44\u001b[0m, in \u001b[0;36mSampler.forward\u001b[0;34m(self, embedding, hidden_states, input_metadata, embedding_bias)\u001b[0m\n\u001b[1;32m 36\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mforward\u001b[39m(\n\u001b[1;32m 37\u001b[0m \u001b[39mself\u001b[39m,\n\u001b[1;32m 38\u001b[0m embedding: torch\u001b[39m.\u001b[39mTensor,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 42\u001b[0m ) \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m Dict[\u001b[39mint\u001b[39m, SequenceOutputs]:\n\u001b[1;32m 43\u001b[0m \u001b[39m# Get the hidden states that we use for sampling.\u001b[39;00m\n\u001b[0;32m---> 44\u001b[0m hidden_states \u001b[39m=\u001b[39m _prune_hidden_states(hidden_states, input_metadata)\n\u001b[1;32m 46\u001b[0m \u001b[39m# Get the logits for the next tokens.\u001b[39;00m\n\u001b[1;32m 47\u001b[0m logits \u001b[39m=\u001b[39m torch\u001b[39m.\u001b[39mmatmul(hidden_states, embedding\u001b[39m.\u001b[39mt())\n",
|
||||
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# We'll process our recipes in batches of 10,000.\n",
|
||||
"\n",
|
||||
"import time\n",
|
||||
"\n",
|
||||
"BATCH_SIZE = 10000\n",
|
||||
"all_outputs = []\n",
|
||||
"\n",
|
||||
"start_time = time.time()\n",
|
||||
"print(f\"Start time: {start_time}\")\n",
|
||||
"for i in range(0, len(all_recipes), BATCH_SIZE):\n",
|
||||
" print(f\"Processing recipes {i:,} to {i+BATCH_SIZE:,}...\")\n",
|
||||
" outputs = llm.generate(\n",
|
||||
" all_recipes[i : i + BATCH_SIZE], sampling_params=sampling_params\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" all_outputs.extend([o.outputs[0].text for o in outputs])\n",
|
||||
"\n",
|
||||
"end_time = time.time()\n",
|
||||
"print(f\"End time: {end_time}\")\n",
|
||||
"print(f\"Total hours: {((end_time - start_time) / 3600):.2f}\")\n",
|
||||
"\n",
|
||||
"# Ended up running this in a separate script to leave it running in the background.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Nice! I've processed all 2,147,248 recipes in under 17 hours. Let's do a cost comparison with GPT-3.5 and GPT-4. I'll use the GPT-4 latency/cost numbers based on the 5000 samples used to generate our model's training data.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"<div>\n",
|
||||
"<style scoped>\n",
|
||||
" .dataframe tbody tr th:only-of-type {\n",
|
||||
" vertical-align: middle;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe tbody tr th {\n",
|
||||
" vertical-align: top;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe thead th {\n",
|
||||
" text-align: right;\n",
|
||||
" }\n",
|
||||
"</style>\n",
|
||||
"<table border=\"1\" class=\"dataframe\">\n",
|
||||
" <thead>\n",
|
||||
" <tr style=\"text-align: right;\">\n",
|
||||
" <th></th>\n",
|
||||
" <th>Model</th>\n",
|
||||
" <th>Cost to Classify One Recipe</th>\n",
|
||||
" <th>Cost to Classify Entire Dataset</th>\n",
|
||||
" </tr>\n",
|
||||
" </thead>\n",
|
||||
" <tbody>\n",
|
||||
" <tr>\n",
|
||||
" <th>0</th>\n",
|
||||
" <td>Llama2 7B (FT)</td>\n",
|
||||
" <td>0.000009</td>\n",
|
||||
" <td>18.81</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>1</th>\n",
|
||||
" <td>GPT-3.5</td>\n",
|
||||
" <td>0.000481</td>\n",
|
||||
" <td>1033.26</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>2</th>\n",
|
||||
" <td>GPT-3.5 (FT)</td>\n",
|
||||
" <td>0.004044</td>\n",
|
||||
" <td>8683.47</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>3</th>\n",
|
||||
" <td>GPT-4</td>\n",
|
||||
" <td>0.010800</td>\n",
|
||||
" <td>23190.28</td>\n",
|
||||
" </tr>\n",
|
||||
" </tbody>\n",
|
||||
"</table>\n",
|
||||
"</div>"
|
||||
],
|
||||
"text/plain": [
|
||||
" Model Cost to Classify One Recipe \\\n",
|
||||
"0 Llama2 7B (FT) 0.000009 \n",
|
||||
"1 GPT-3.5 0.000481 \n",
|
||||
"2 GPT-3.5 (FT) 0.004044 \n",
|
||||
"3 GPT-4 0.010800 \n",
|
||||
"\n",
|
||||
" Cost to Classify Entire Dataset \n",
|
||||
"0 18.81 \n",
|
||||
"1 1033.26 \n",
|
||||
"2 8683.47 \n",
|
||||
"3 23190.28 "
|
||||
]
|
||||
},
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import pandas as pd\n",
|
||||
"\n",
|
||||
"# I used an on-demand Nvidia L40 on RunPod for this, at an hourly cost of $1.14.\n",
|
||||
"finetuned_hourly_cost = 1.14\n",
|
||||
"\n",
|
||||
"finetuned_total_hours = 16.5\n",
|
||||
"\n",
|
||||
"finetuned_avg_cost = finetuned_hourly_cost * finetuned_total_hours / len(all_recipes)\n",
|
||||
"\n",
|
||||
"# The average input and output tokens for OpenAI, based on the 5000 recipes I\n",
|
||||
"# sent them when generating training data.\n",
|
||||
"avg_input_tokens = 276\n",
|
||||
"avg_output_tokens = 42\n",
|
||||
"\n",
|
||||
"# Token pricing from https://openai.com/pricing\n",
|
||||
"gpt_4_avg_cost = avg_input_tokens * 0.03 / 1000 + avg_output_tokens * 0.06 / 1000\n",
|
||||
"\n",
|
||||
"gpt_35_avg_cost = avg_input_tokens * 0.0015 / 1000 + avg_output_tokens * 0.0016 / 1000\n",
|
||||
"\n",
|
||||
"gpt_35_finetuned_avg_cost = (\n",
|
||||
" avg_input_tokens * 0.012 / 1000 + avg_output_tokens * 0.016 / 1000 + 0.06 / 1000\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"models = pd.DataFrame(\n",
|
||||
" {\n",
|
||||
" \"Model\": [\n",
|
||||
" \"Llama2 7B (FT)\",\n",
|
||||
" \"GPT-3.5\",\n",
|
||||
" \"GPT-3.5 (FT)\",\n",
|
||||
" \"GPT-4\",\n",
|
||||
" ],\n",
|
||||
" \"Cost to Classify One Recipe\": [\n",
|
||||
" finetuned_avg_cost,\n",
|
||||
" gpt_35_avg_cost,\n",
|
||||
" gpt_35_finetuned_avg_cost,\n",
|
||||
" gpt_4_avg_cost,\n",
|
||||
" ],\n",
|
||||
" }\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"models[\"Cost to Classify Entire Dataset\"] = (\n",
|
||||
" models[\"Cost to Classify One Recipe\"] * len(all_recipes)\n",
|
||||
").round(2)\n",
|
||||
"\n",
|
||||
"models\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
10
examples/classify-recipes/README.md
Normal file
10
examples/classify-recipes/README.md
Normal file
@@ -0,0 +1,10 @@
|
||||
# OpenPipe demo: fine-tuning your own model
|
||||
|
||||
Hi there! This repository should give you a brief overview of how to fine-tune a competitive model from start to finish. You should review the notebooks in this directory in the following order:
|
||||
|
||||
1. [./generate-data.ipynb](./generate-data.ipynb): Demonstrates how to generate a sample dataset of GPT-4 completions, store it using OpenPipe, and then export it in a format suitable for training a model.
|
||||
2. [./train.ipynb](./train.ipynb): Trains a Llama 2 7B model on the dataset from step (1).
|
||||
3. [./evaluate.ipynb](./evaluate.ipynb): Evaluates the model we trained using a special test set that we set aside in step (1).
|
||||
4. [./benchmark.ipynb](./benchmark.ipynb): A script to compare costs and completion latencies between our fine-tuned model, GPT-3.5, and GPT-4.
|
||||
|
||||
If you want to follow along yourself, I recommend using [RunPod](https://www.runpod.io/). The training scripts we use will run on any of their GPUs with 24GB of vRAM or more.
|
||||
0
examples/classify-recipes/__init__.py
Normal file
0
examples/classify-recipes/__init__.py
Normal file
207
examples/classify-recipes/finetune-gpt-3.5.ipynb
Normal file
207
examples/classify-recipes/finetune-gpt-3.5.ipynb
Normal file
@@ -0,0 +1,207 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Sample training data:\n",
|
||||
"{'messages': [{'content': 'Your goal is to classify a recipe along several '\n",
|
||||
" 'dimensions.Pay attention to the instructions.',\n",
|
||||
" 'role': 'system'},\n",
|
||||
" {'content': 'Homemade Salad Dressing\\n'\n",
|
||||
" '\\n'\n",
|
||||
" 'Ingredients:\\n'\n",
|
||||
" \"- 1 pt. Hellmann's mayonnaise\\n\"\n",
|
||||
" '- 1 pt. buttermilk\\n'\n",
|
||||
" '- 1 tsp. Accent\\n'\n",
|
||||
" '- 2 Tbsp. dry parsley\\n'\n",
|
||||
" '- 2 pkg. low-calorie Italian salad dressing mix\\n'\n",
|
||||
" '- 1 can jalapeno peppers or 4 oz. Jimenez green '\n",
|
||||
" 'sauce\\n'\n",
|
||||
" '\\n'\n",
|
||||
" 'Directions:\\n'\n",
|
||||
" '- Blend well in blender; store in refrigerator.\\n'\n",
|
||||
" '- For dip, decrease liquid.',\n",
|
||||
" 'role': 'user'},\n",
|
||||
" {'content': '{\\n'\n",
|
||||
" '\"has_non_fish_meat\": false,\\n'\n",
|
||||
" '\"requires_oven\": false,\\n'\n",
|
||||
" '\"requires_stove\": false,\\n'\n",
|
||||
" '\"cook_time_over_30_mins\": false,\\n'\n",
|
||||
" '\"main_dish\": false\\n'\n",
|
||||
" '}',\n",
|
||||
" 'role': 'assistant'}]}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import pandas as pd\n",
|
||||
"from pprint import pprint\n",
|
||||
"import json\n",
|
||||
"\n",
|
||||
"df = pd.read_json(\"data/train.jsonl\", lines=True)\n",
|
||||
"\n",
|
||||
"training_data = []\n",
|
||||
"for row in df.itertuples():\n",
|
||||
" input = json.loads(row.instruction)\n",
|
||||
" output = json.loads(row.output)\n",
|
||||
"\n",
|
||||
" output[\"content\"] = output[\"function_call\"][\"arguments\"]\n",
|
||||
" del output[\"function_call\"]\n",
|
||||
"\n",
|
||||
" sample = {\"messages\": input.copy() + [output]}\n",
|
||||
" training_data.append(sample)\n",
|
||||
"\n",
|
||||
"# save the training data to data/train-gpt3.5.jsonl\n",
|
||||
"\n",
|
||||
"with open(\"data/train-gpt3.5.jsonl\", \"w\") as f:\n",
|
||||
" for sample in training_data:\n",
|
||||
" f.write(json.dumps(sample) + \"\\n\")\n",
|
||||
"\n",
|
||||
"print(f\"Sample training data:\")\n",
|
||||
"pprint(training_data[0])\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"<File file id=file-faAdQ1KPxZH79ThW4Dbu4z1y at 0x7fa55db5c6d0> JSON: {\n",
|
||||
" \"object\": \"file\",\n",
|
||||
" \"id\": \"file-faAdQ1KPxZH79ThW4Dbu4z1y\",\n",
|
||||
" \"purpose\": \"fine-tune\",\n",
|
||||
" \"filename\": \"recipe-classification\",\n",
|
||||
" \"bytes\": 4210831,\n",
|
||||
" \"created_at\": 1693000959,\n",
|
||||
" \"status\": \"uploaded\",\n",
|
||||
" \"status_details\": null\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
"execution_count": 28,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import openai\n",
|
||||
"\n",
|
||||
"import dotenv\n",
|
||||
"\n",
|
||||
"dotenv.load_dotenv()\n",
|
||||
"\n",
|
||||
"openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n",
|
||||
"\n",
|
||||
"openai.File.create(\n",
|
||||
" file=open(\"data/train-gpt3.5.jsonl\", \"rb\"),\n",
|
||||
" purpose=\"fine-tune\",\n",
|
||||
" user_provided_filename=\"recipe-classification\",\n",
|
||||
")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 40,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"<OpenAIObject list at 0x7fa55dbf6930> JSON: {\n",
|
||||
" \"object\": \"list\",\n",
|
||||
" \"data\": [\n",
|
||||
" {\n",
|
||||
" \"object\": \"file\",\n",
|
||||
" \"id\": \"file-faAdQ1KPxZH79ThW4Dbu4z1y\",\n",
|
||||
" \"purpose\": \"fine-tune\",\n",
|
||||
" \"filename\": \"recipe-classification\",\n",
|
||||
" \"bytes\": 4210831,\n",
|
||||
" \"created_at\": 1693000959,\n",
|
||||
" \"status\": \"processed\",\n",
|
||||
" \"status_details\": null\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
"execution_count": 40,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"openai.File.list()\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 42,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"<FineTuningJob fine_tuning.job id=ftjob-EjjLxmj9P8apwPRk5s2NPSeB at 0x7fa55ddc4360> JSON: {\n",
|
||||
" \"object\": \"fine_tuning.job\",\n",
|
||||
" \"id\": \"ftjob-EjjLxmj9P8apwPRk5s2NPSeB\",\n",
|
||||
" \"model\": \"gpt-3.5-turbo-0613\",\n",
|
||||
" \"created_at\": 1693001190,\n",
|
||||
" \"finished_at\": null,\n",
|
||||
" \"fine_tuned_model\": null,\n",
|
||||
" \"organization_id\": \"org-jRz4nVPMoeGHWL5nVR3Mb0kp\",\n",
|
||||
" \"result_files\": [],\n",
|
||||
" \"status\": \"created\",\n",
|
||||
" \"validation_file\": null,\n",
|
||||
" \"training_file\": \"file-faAdQ1KPxZH79ThW4Dbu4z1y\",\n",
|
||||
" \"hyperparameters\": {\n",
|
||||
" \"n_epochs\": 3\n",
|
||||
" },\n",
|
||||
" \"trained_tokens\": null\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
"execution_count": 42,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"openai.FineTuningJob.create(\n",
|
||||
" training_file=\"file-faAdQ1KPxZH79ThW4Dbu4z1y\", model=\"gpt-3.5-turbo\"\n",
|
||||
")\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
73
examples/classify-recipes/training-config.yaml
Normal file
73
examples/classify-recipes/training-config.yaml
Normal file
@@ -0,0 +1,73 @@
|
||||
# This file is used by the training script in train.ipynb. You can read more about
|
||||
# the format and see more examples at https://github.com/OpenAccess-AI-Collective/axolotl.
|
||||
# One of the parameters you might want to play around with is `num_epochs`: if you have a
|
||||
# smaller dataset size, making that large can have good results.
|
||||
|
||||
base_model: meta-llama/Llama-2-7b-hf
|
||||
base_model_config: meta-llama/Llama-2-7b-hf
|
||||
model_type: LlamaForCausalLM
|
||||
tokenizer_type: LlamaTokenizer
|
||||
is_llama_derived_model: true
|
||||
|
||||
load_in_8bit: true
|
||||
load_in_4bit: false
|
||||
strict: false
|
||||
|
||||
datasets:
|
||||
- path: ./data/train.jsonl
|
||||
type: alpaca_instruct.load_no_prompt
|
||||
dataset_prepared_path: ./data/last_run_prepared
|
||||
val_set_size: 0.05
|
||||
output_dir: ./models/run1
|
||||
|
||||
sequence_len: 4096
|
||||
sample_packing: true
|
||||
|
||||
adapter: lora
|
||||
lora_model_dir:
|
||||
lora_r: 32
|
||||
lora_alpha: 16
|
||||
lora_dropout: 0.05
|
||||
lora_target_linear: true
|
||||
lora_fan_in_fan_out:
|
||||
|
||||
# This will report stats from your training run to https://wandb.ai/. If you don't want to create a wandb account you can comment this section out.
|
||||
wandb_project: classify-recipes
|
||||
wandb_entity:
|
||||
wandb_watch:
|
||||
wandb_run_id: run1
|
||||
wandb_log_model:
|
||||
|
||||
gradient_accumulation_steps: 4
|
||||
micro_batch_size: 2
|
||||
num_epochs: 5
|
||||
optimizer: adamw_bnb_8bit
|
||||
lr_scheduler: cosine
|
||||
learning_rate: 0.0002
|
||||
|
||||
train_on_inputs: false
|
||||
group_by_length: false
|
||||
bf16: true
|
||||
fp16: false
|
||||
tf32: false
|
||||
|
||||
gradient_checkpointing: true
|
||||
early_stopping_patience:
|
||||
resume_from_checkpoint:
|
||||
local_rank:
|
||||
logging_steps: 1
|
||||
xformers_attention:
|
||||
flash_attention: true
|
||||
|
||||
warmup_steps: 10
|
||||
eval_steps: 20
|
||||
save_steps: 60
|
||||
debug:
|
||||
deepspeed:
|
||||
weight_decay: 0.0
|
||||
fsdp:
|
||||
fsdp_config:
|
||||
special_tokens:
|
||||
bos_token: "<s>"
|
||||
eos_token: "</s>"
|
||||
unk_token: "<unk>"
|
||||
37
examples/classify-recipes/utils.py
Normal file
37
examples/classify-recipes/utils.py
Normal file
@@ -0,0 +1,37 @@
|
||||
import yaml
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
import torch
|
||||
from peft import PeftModel
|
||||
import os
|
||||
|
||||
|
||||
def merge_lora_model(config_file: str):
|
||||
config = yaml.load(open(config_file, "r"), Loader=yaml.FullLoader)
|
||||
|
||||
base_model = config["base_model"]
|
||||
lora_model = config["output_dir"]
|
||||
merged_model = f"{lora_model}/merged"
|
||||
|
||||
if os.path.exists(merged_model):
|
||||
print(f"Model {merged_model} already exists, skipping")
|
||||
return merged_model
|
||||
|
||||
print("Loading base model")
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
base_model,
|
||||
return_dict=True,
|
||||
torch_dtype=torch.float16,
|
||||
)
|
||||
|
||||
print("Loading PEFT model")
|
||||
model = PeftModel.from_pretrained(model, lora_model)
|
||||
print(f"Running merge_and_unload")
|
||||
model = model.merge_and_unload()
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(base_model)
|
||||
|
||||
model.save_pretrained(merged_model)
|
||||
tokenizer.save_pretrained(merged_model)
|
||||
print(f"Model saved to {merged_model}")
|
||||
|
||||
return merged_model
|
||||
210
pnpm-lock.yaml
generated
210
pnpm-lock.yaml
generated
@@ -80,6 +80,9 @@ importers:
|
||||
'@vercel/og':
|
||||
specifier: ^0.5.9
|
||||
version: 0.5.9
|
||||
archiver:
|
||||
specifier: ^6.0.0
|
||||
version: 6.0.0
|
||||
ast-types:
|
||||
specifier: ^0.14.2
|
||||
version: 0.14.2
|
||||
@@ -116,6 +119,9 @@ importers:
|
||||
graphile-worker:
|
||||
specifier: ^0.13.0
|
||||
version: 0.13.0
|
||||
human-id:
|
||||
specifier: ^4.0.0
|
||||
version: 4.0.0
|
||||
immer:
|
||||
specifier: ^10.0.2
|
||||
version: 10.0.2
|
||||
@@ -230,6 +236,9 @@ importers:
|
||||
socket.io-client:
|
||||
specifier: ^4.7.1
|
||||
version: 4.7.1
|
||||
stream-buffers:
|
||||
specifier: ^3.0.2
|
||||
version: 3.0.2
|
||||
superjson:
|
||||
specifier: 1.12.2
|
||||
version: 1.12.2
|
||||
@@ -261,6 +270,9 @@ importers:
|
||||
'@openapi-contrib/openapi-schema-to-json-schema':
|
||||
specifier: ^4.0.5
|
||||
version: 4.0.5
|
||||
'@types/archiver':
|
||||
specifier: ^5.3.2
|
||||
version: 5.3.2
|
||||
'@types/babel__core':
|
||||
specifier: ^7.20.1
|
||||
version: 7.20.1
|
||||
@@ -309,6 +321,9 @@ importers:
|
||||
'@types/react-syntax-highlighter':
|
||||
specifier: ^15.5.7
|
||||
version: 15.5.7
|
||||
'@types/stream-buffers':
|
||||
specifier: ^3.0.4
|
||||
version: 3.0.4
|
||||
'@types/uuid':
|
||||
specifier: ^9.0.2
|
||||
version: 9.0.2
|
||||
@@ -2947,6 +2962,12 @@ packages:
|
||||
resolution: {integrity: sha512-+Wt0NFAeflVSNiUnHIDNN3C8jP7XIRmYrcgJ6IsAnm0lK4p/FkpCpeu1aig5qxrgZx30PHNDLZ/3FttVSEW2aQ==}
|
||||
dev: false
|
||||
|
||||
/@types/archiver@5.3.2:
|
||||
resolution: {integrity: sha512-IctHreBuWE5dvBDz/0WeKtyVKVRs4h75IblxOACL92wU66v+HGAfEYAOyXkOFphvRJMhuXdI9huDXpX0FC6lCw==}
|
||||
dependencies:
|
||||
'@types/readdir-glob': 1.1.1
|
||||
dev: true
|
||||
|
||||
/@types/babel__core@7.20.1:
|
||||
resolution: {integrity: sha512-aACu/U/omhdk15O4Nfb+fHgH/z3QsfQzpnvRZhYhThms83ZnAOZz7zZAWO7mn2yyNQaA4xTO8GLK3uqFU4bYYw==}
|
||||
dependencies:
|
||||
@@ -3262,6 +3283,12 @@ packages:
|
||||
'@types/scheduler': 0.16.3
|
||||
csstype: 3.1.2
|
||||
|
||||
/@types/readdir-glob@1.1.1:
|
||||
resolution: {integrity: sha512-ImM6TmoF8bgOwvehGviEj3tRdRBbQujr1N+0ypaln/GWjaerOB26jb93vsRHmdMtvVQZQebOlqt2HROark87mQ==}
|
||||
dependencies:
|
||||
'@types/node': 20.4.10
|
||||
dev: true
|
||||
|
||||
/@types/request@2.48.8:
|
||||
resolution: {integrity: sha512-whjk1EDJPcAR2kYHRbFl/lKeeKYTi05A15K9bnLInCVroNDCtXce57xKdI0/rQaA3K+6q0eFyUBPmqfSndUZdQ==}
|
||||
dependencies:
|
||||
@@ -3293,6 +3320,12 @@ packages:
|
||||
'@types/node': 20.4.10
|
||||
dev: true
|
||||
|
||||
/@types/stream-buffers@3.0.4:
|
||||
resolution: {integrity: sha512-qU/K1tb2yUdhXkLIATzsIPwbtX6BpZk0l3dPW6xqWyhfzzM1ECaQ/8faEnu3CNraLiQ9LHyQQPBGp7N9Fbs25w==}
|
||||
dependencies:
|
||||
'@types/node': 20.4.10
|
||||
dev: true
|
||||
|
||||
/@types/tough-cookie@4.0.2:
|
||||
resolution: {integrity: sha512-Q5vtl1W5ue16D+nIaW8JWebSSraJVlK+EthKn7e7UcD4KWsaSJ8BqGPXNaPghgtcn/fhvrN17Tv8ksUsQpiplw==}
|
||||
dev: false
|
||||
@@ -3696,6 +3729,51 @@ packages:
|
||||
picomatch: 2.3.1
|
||||
dev: false
|
||||
|
||||
/archiver-utils@2.1.0:
|
||||
resolution: {integrity: sha512-bEL/yUb/fNNiNTuUz979Z0Yg5L+LzLxGJz8x79lYmR54fmTIb6ob/hNQgkQnIUDWIFjZVQwl9Xs356I6BAMHfw==}
|
||||
engines: {node: '>= 6'}
|
||||
dependencies:
|
||||
glob: 7.2.3
|
||||
graceful-fs: 4.2.11
|
||||
lazystream: 1.0.1
|
||||
lodash.defaults: 4.2.0
|
||||
lodash.difference: 4.5.0
|
||||
lodash.flatten: 4.4.0
|
||||
lodash.isplainobject: 4.0.6
|
||||
lodash.union: 4.6.0
|
||||
normalize-path: 3.0.0
|
||||
readable-stream: 2.3.8
|
||||
dev: false
|
||||
|
||||
/archiver-utils@3.0.3:
|
||||
resolution: {integrity: sha512-fXzpEZTKgBJMWy0eUT0/332CAQnJ27OJd7sGcvNZzxS2Yzg7iITivMhXOm+zUTO4vT8ZqlPCqiaLPmB8qWhWRA==}
|
||||
engines: {node: '>= 10'}
|
||||
dependencies:
|
||||
glob: 7.2.3
|
||||
graceful-fs: 4.2.11
|
||||
lazystream: 1.0.1
|
||||
lodash.defaults: 4.2.0
|
||||
lodash.difference: 4.5.0
|
||||
lodash.flatten: 4.4.0
|
||||
lodash.isplainobject: 4.0.6
|
||||
lodash.union: 4.6.0
|
||||
normalize-path: 3.0.0
|
||||
readable-stream: 3.6.2
|
||||
dev: false
|
||||
|
||||
/archiver@6.0.0:
|
||||
resolution: {integrity: sha512-EPGa+bYaxaMiCT8DCbEDqFz8IjeBSExrJzyUOJx2FBkFJ/OZzJuso3lMSk901M50gMqXxTQcumlGajOFlXhVhw==}
|
||||
engines: {node: '>= 12.0.0'}
|
||||
dependencies:
|
||||
archiver-utils: 3.0.3
|
||||
async: 3.2.4
|
||||
buffer-crc32: 0.2.13
|
||||
readable-stream: 3.6.2
|
||||
readdir-glob: 1.1.3
|
||||
tar-stream: 2.2.0
|
||||
zip-stream: 4.1.0
|
||||
dev: false
|
||||
|
||||
/argparse@2.0.1:
|
||||
resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==}
|
||||
|
||||
@@ -3834,6 +3912,10 @@ packages:
|
||||
tslib: 2.6.1
|
||||
dev: false
|
||||
|
||||
/async@3.2.4:
|
||||
resolution: {integrity: sha512-iAB+JbDEGXhyIUavoDl9WP/Jj106Kz9DEn1DPgYw5ruDn0e3Wgi3sKFm55sASdGBNOQB8F59d9qQ7deqrHA8wQ==}
|
||||
dev: false
|
||||
|
||||
/asynckit@0.4.0:
|
||||
resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==}
|
||||
|
||||
@@ -3953,6 +4035,14 @@ packages:
|
||||
engines: {node: '>=8'}
|
||||
dev: false
|
||||
|
||||
/bl@4.1.0:
|
||||
resolution: {integrity: sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==}
|
||||
dependencies:
|
||||
buffer: 5.7.1
|
||||
inherits: 2.0.4
|
||||
readable-stream: 3.6.2
|
||||
dev: false
|
||||
|
||||
/bluebird@3.7.2:
|
||||
resolution: {integrity: sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==}
|
||||
dev: false
|
||||
@@ -4005,6 +4095,10 @@ packages:
|
||||
node-releases: 2.0.13
|
||||
update-browserslist-db: 1.0.11(browserslist@4.21.10)
|
||||
|
||||
/buffer-crc32@0.2.13:
|
||||
resolution: {integrity: sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ==}
|
||||
dev: false
|
||||
|
||||
/buffer-from@0.1.2:
|
||||
resolution: {integrity: sha512-RiWIenusJsmI2KcvqQABB83tLxCByE3upSP8QU3rJDMVFGPWLvPQJt/O1Su9moRWeH7d+Q2HYb68f6+v+tw2vg==}
|
||||
dev: false
|
||||
@@ -4017,6 +4111,13 @@ packages:
|
||||
engines: {node: '>=4'}
|
||||
dev: false
|
||||
|
||||
/buffer@5.7.1:
|
||||
resolution: {integrity: sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==}
|
||||
dependencies:
|
||||
base64-js: 1.5.1
|
||||
ieee754: 1.2.1
|
||||
dev: false
|
||||
|
||||
/busboy@1.6.0:
|
||||
resolution: {integrity: sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==}
|
||||
engines: {node: '>=10.16.0'}
|
||||
@@ -4243,6 +4344,16 @@ packages:
|
||||
resolution: {integrity: sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg==}
|
||||
dev: false
|
||||
|
||||
/compress-commons@4.1.1:
|
||||
resolution: {integrity: sha512-QLdDLCKNV2dtoTorqgxngQCMA+gWXkM/Nwu7FpeBhk/RdkzimqC3jueb/FDmaZeXh+uby1jkBqE3xArsLBE5wQ==}
|
||||
engines: {node: '>= 10'}
|
||||
dependencies:
|
||||
buffer-crc32: 0.2.13
|
||||
crc32-stream: 4.0.2
|
||||
normalize-path: 3.0.0
|
||||
readable-stream: 3.6.2
|
||||
dev: false
|
||||
|
||||
/compute-scroll-into-view@1.0.20:
|
||||
resolution: {integrity: sha512-UCB0ioiyj8CRjtrvaceBLqqhZCVP+1B8+NWQhmdsm0VXOJtobBCf1dBQmebCCo34qZmUwZfIH2MZLqNHazrfjg==}
|
||||
dev: false
|
||||
@@ -4350,6 +4461,20 @@ packages:
|
||||
yaml: 1.10.2
|
||||
dev: false
|
||||
|
||||
/crc-32@1.2.2:
|
||||
resolution: {integrity: sha512-ROmzCKrTnOwybPcJApAA6WBWij23HVfGVNKqqrZpuyZOHqK2CwHSvpGuyt/UNNvaIjEd8X5IFGp4Mh+Ie1IHJQ==}
|
||||
engines: {node: '>=0.8'}
|
||||
hasBin: true
|
||||
dev: false
|
||||
|
||||
/crc32-stream@4.0.2:
|
||||
resolution: {integrity: sha512-DxFZ/Hk473b/muq1VJ///PMNLj0ZMnzye9thBpmjpJKCc5eMgB95aK8zCGrGfQ90cWo561Te6HK9D+j4KPdM6w==}
|
||||
engines: {node: '>= 10'}
|
||||
dependencies:
|
||||
crc-32: 1.2.2
|
||||
readable-stream: 3.6.2
|
||||
dev: false
|
||||
|
||||
/create-emotion@10.0.27:
|
||||
resolution: {integrity: sha512-fIK73w82HPPn/RsAij7+Zt8eCE8SptcJ3WoRMfxMtjteYxud8GDTKKld7MYwAX2TVhrw29uR1N/bVGxeStHILg==}
|
||||
dependencies:
|
||||
@@ -4732,6 +4857,12 @@ packages:
|
||||
iconv-lite: 0.6.3
|
||||
dev: false
|
||||
|
||||
/end-of-stream@1.4.4:
|
||||
resolution: {integrity: sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==}
|
||||
dependencies:
|
||||
once: 1.4.0
|
||||
dev: false
|
||||
|
||||
/engine.io-client@6.5.2:
|
||||
resolution: {integrity: sha512-CQZqbrpEYnrpGqC07a9dJDz4gePZUgTPMU3NKJPSeQOyw27Tst4Pl3FemKoFGAlHzgZmKjoRmiJvbWfhCXUlIg==}
|
||||
dependencies:
|
||||
@@ -5574,6 +5705,10 @@ packages:
|
||||
engines: {node: '>= 0.6'}
|
||||
dev: false
|
||||
|
||||
/fs-constants@1.0.0:
|
||||
resolution: {integrity: sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==}
|
||||
dev: false
|
||||
|
||||
/fs-extra@11.1.1:
|
||||
resolution: {integrity: sha512-MGIE4HOvQCeUCzmlHs0vXpih4ysz4wg9qiSAu6cd42lVwPbTM1TjV7RusoyQqMmk/95gdQZX72u+YW+c3eEpFQ==}
|
||||
engines: {node: '>=14.14'}
|
||||
@@ -5942,6 +6077,10 @@ packages:
|
||||
- supports-color
|
||||
dev: false
|
||||
|
||||
/human-id@4.0.0:
|
||||
resolution: {integrity: sha512-pui0xZRgeAlaRt0I9r8N2pNlbNmluvn71EfjKRpM7jOpZbuHe5mm76r67gcprjw/Nd+GpvB9C3OlTbh7ZKLg7A==}
|
||||
dev: false
|
||||
|
||||
/humanize-ms@1.2.1:
|
||||
resolution: {integrity: sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==}
|
||||
dependencies:
|
||||
@@ -5962,6 +6101,10 @@ packages:
|
||||
safer-buffer: 2.1.2
|
||||
dev: false
|
||||
|
||||
/ieee754@1.2.1:
|
||||
resolution: {integrity: sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==}
|
||||
dev: false
|
||||
|
||||
/ignore@5.2.4:
|
||||
resolution: {integrity: sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==}
|
||||
engines: {node: '>= 4'}
|
||||
@@ -6427,6 +6570,13 @@ packages:
|
||||
language-subtag-registry: 0.3.22
|
||||
dev: true
|
||||
|
||||
/lazystream@1.0.1:
|
||||
resolution: {integrity: sha512-b94GiNHQNy6JNTrt5w6zNyffMrNkXZb3KTkCZJb2V1xaEGCk093vkZ2jk3tpaeP33/OiXC+WvK9AxUebnf5nbw==}
|
||||
engines: {node: '>= 0.6.3'}
|
||||
dependencies:
|
||||
readable-stream: 2.3.8
|
||||
dev: false
|
||||
|
||||
/levn@0.4.1:
|
||||
resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==}
|
||||
engines: {node: '>= 0.8.0'}
|
||||
@@ -6495,6 +6645,22 @@ packages:
|
||||
resolution: {integrity: sha512-/u14pXGviLaweY5JI0IUzgzF2J6Ne8INyzAZjImcryjgkZ+ebruBxy2/JaOOkTqScddcYtakjhSaeemV8lR0tA==}
|
||||
dev: false
|
||||
|
||||
/lodash.defaults@4.2.0:
|
||||
resolution: {integrity: sha512-qjxPLHd3r5DnsdGacqOMU6pb/avJzdh9tFX2ymgoZE27BmjXrNy/y4LoaiTeAb+O3gL8AfpJGtqfX/ae2leYYQ==}
|
||||
dev: false
|
||||
|
||||
/lodash.difference@4.5.0:
|
||||
resolution: {integrity: sha512-dS2j+W26TQ7taQBGN8Lbbq04ssV3emRw4NY58WErlTO29pIqS0HmoT5aJ9+TUQ1N3G+JOZSji4eugsWwGp9yPA==}
|
||||
dev: false
|
||||
|
||||
/lodash.flatten@4.4.0:
|
||||
resolution: {integrity: sha512-C5N2Z3DgnnKr0LOpv/hKCgKdb7ZZwafIrsesve6lmzvZIRZRGaZ/l6Q8+2W7NaT+ZwO3fFlSCzCzrDCFdJfZ4g==}
|
||||
dev: false
|
||||
|
||||
/lodash.isplainobject@4.0.6:
|
||||
resolution: {integrity: sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA==}
|
||||
dev: false
|
||||
|
||||
/lodash.merge@4.6.2:
|
||||
resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==}
|
||||
dev: true
|
||||
@@ -6503,6 +6669,10 @@ packages:
|
||||
resolution: {integrity: sha512-GK3g5RPZWTRSeLSpgP8Xhra+pnjBC56q9FZYe1d5RN3TJ35dbkGy3YqBSMbyCrlbi+CM9Z3Jk5yTL7RCsqboyQ==}
|
||||
dev: false
|
||||
|
||||
/lodash.union@4.6.0:
|
||||
resolution: {integrity: sha512-c4pB2CdGrGdjMKYLA+XiRDO7Y0PRQbm/Gzg8qMj+QH+pFVAoTp5sBpO0odL3FjoPCGjK96p6qsP+yQoiLoOBcw==}
|
||||
dev: false
|
||||
|
||||
/lodash@4.17.21:
|
||||
resolution: {integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==}
|
||||
dev: false
|
||||
@@ -7848,6 +8018,21 @@ packages:
|
||||
util-deprecate: 1.0.2
|
||||
dev: false
|
||||
|
||||
/readable-stream@3.6.2:
|
||||
resolution: {integrity: sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==}
|
||||
engines: {node: '>= 6'}
|
||||
dependencies:
|
||||
inherits: 2.0.4
|
||||
string_decoder: 1.1.1
|
||||
util-deprecate: 1.0.2
|
||||
dev: false
|
||||
|
||||
/readdir-glob@1.1.3:
|
||||
resolution: {integrity: sha512-v05I2k7xN8zXvPD9N+z/uhXPaj0sUFCe2rcWZIpBsqxfP7xXFQ0tipAd/wjj1YxWyWtUS5IDJpOG82JKt2EAVA==}
|
||||
dependencies:
|
||||
minimatch: 5.1.6
|
||||
dev: false
|
||||
|
||||
/readdirp@3.6.0:
|
||||
resolution: {integrity: sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==}
|
||||
engines: {node: '>=8.10.0'}
|
||||
@@ -8303,6 +8488,11 @@ packages:
|
||||
resolution: {integrity: sha512-Rz6yejtVyWnVjC1RFvNmYL10kgjC49EOghxWn0RFqlCHGFpQx+Xe7yW3I4ceK1SGrWIGMjD5Kbue8W/udkbMJg==}
|
||||
dev: true
|
||||
|
||||
/stream-buffers@3.0.2:
|
||||
resolution: {integrity: sha512-DQi1h8VEBA/lURbSwFtEHnSTb9s2/pwLEaFuNhXwy1Dx3Sa0lOuYT2yNUr4/j2fs8oCAMANtrZ5OrPZtyVs3MQ==}
|
||||
engines: {node: '>= 0.10.0'}
|
||||
dev: false
|
||||
|
||||
/streamsearch@1.1.0:
|
||||
resolution: {integrity: sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==}
|
||||
engines: {node: '>=10.0.0'}
|
||||
@@ -8450,6 +8640,17 @@ packages:
|
||||
resolution: {integrity: sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==}
|
||||
engines: {node: '>=6'}
|
||||
|
||||
/tar-stream@2.2.0:
|
||||
resolution: {integrity: sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==}
|
||||
engines: {node: '>=6'}
|
||||
dependencies:
|
||||
bl: 4.1.0
|
||||
end-of-stream: 1.4.4
|
||||
fs-constants: 1.0.0
|
||||
inherits: 2.0.4
|
||||
readable-stream: 3.6.2
|
||||
dev: false
|
||||
|
||||
/terser-webpack-plugin@5.3.9(webpack@5.88.2):
|
||||
resolution: {integrity: sha512-ZuXsqE07EcggTWQjXUj+Aot/OMcD0bMKGgF63f7UxYcu5/AJF53aIpK1YoP5xR9l6s/Hy2b+t1AM0bLNPRuhwA==}
|
||||
engines: {node: '>= 10.13.0'}
|
||||
@@ -9334,6 +9535,15 @@ packages:
|
||||
resolution: {integrity: sha512-N+d4UJSJbt/R3wqY7Coqs5pcV0aUj2j9IaQ3rNj9bVCLld8tTGKRa2USARjnvZJWVx1NDmQev8EknoczaOQDOA==}
|
||||
dev: false
|
||||
|
||||
/zip-stream@4.1.0:
|
||||
resolution: {integrity: sha512-zshzwQW7gG7hjpBlgeQP9RuyPGNxvJdzR8SUM3QhxCnLjWN2E7j3dOvpeDcQoETfHx0urRS7EtmVToql7YpU4A==}
|
||||
engines: {node: '>= 10'}
|
||||
dependencies:
|
||||
archiver-utils: 2.1.0
|
||||
compress-commons: 4.1.1
|
||||
readable-stream: 3.6.2
|
||||
dev: false
|
||||
|
||||
/zod-to-json-schema@3.21.4(zod@3.21.4):
|
||||
resolution: {integrity: sha512-fjUZh4nQ1s6HMccgIeE0VP4QG/YRGPmyjO9sAh890aQKPEk3nqbfUXhMFaC+Dr5KvYBm8BCyvfpZf2jY9aGSsw==}
|
||||
peerDependencies:
|
||||
|
||||
Reference in New Issue
Block a user