move app to app/ subdir
This commit is contained in:
@@ -0,0 +1,69 @@
|
||||
/* eslint-disable @typescript-eslint/no-var-requires */
|
||||
|
||||
import YAML from "yaml";
|
||||
import fs from "fs";
|
||||
import path from "path";
|
||||
import { openapiSchemaToJsonSchema } from "@openapi-contrib/openapi-schema-to-json-schema";
|
||||
import $RefParser from "@apidevtools/json-schema-ref-parser";
|
||||
import { type JSONObject } from "superjson/dist/types";
|
||||
import assert from "assert";
|
||||
import { type JSONSchema4Object } from "json-schema";
|
||||
import { isObject } from "lodash-es";
|
||||
|
||||
// @ts-expect-error for some reason missing from types
|
||||
import parserEstree from "prettier/plugins/estree";
|
||||
import parserBabel from "prettier/plugins/babel";
|
||||
import prettier from "prettier/standalone";
|
||||
|
||||
const OPENAPI_URL =
|
||||
"https://raw.githubusercontent.com/tryAGI/Anthropic/1c0871e861de60a4c3a843cb90e17d63e86c234a/docs/openapi.yaml";
|
||||
|
||||
// Fetch the openapi document
|
||||
const response = await fetch(OPENAPI_URL);
|
||||
const openApiYaml = await response.text();
|
||||
|
||||
// Parse the yaml document
|
||||
let schema = YAML.parse(openApiYaml) as JSONObject;
|
||||
schema = openapiSchemaToJsonSchema(schema);
|
||||
|
||||
const jsonSchema = await $RefParser.dereference(schema);
|
||||
|
||||
assert("components" in jsonSchema);
|
||||
const completionRequestSchema = jsonSchema.components.schemas
|
||||
.CreateCompletionRequest as JSONSchema4Object;
|
||||
|
||||
// We need to do a bit of surgery here since the Monaco editor doesn't like
|
||||
// the fact that the schema says `model` can be either a string or an enum,
|
||||
// and displays a warning in the editor. Let's stick with just an enum for
|
||||
// now and drop the string option.
|
||||
assert(
|
||||
"properties" in completionRequestSchema &&
|
||||
isObject(completionRequestSchema.properties) &&
|
||||
"model" in completionRequestSchema.properties &&
|
||||
isObject(completionRequestSchema.properties.model),
|
||||
);
|
||||
|
||||
const modelProperty = completionRequestSchema.properties.model;
|
||||
assert(
|
||||
"oneOf" in modelProperty &&
|
||||
Array.isArray(modelProperty.oneOf) &&
|
||||
modelProperty.oneOf.length === 2 &&
|
||||
isObject(modelProperty.oneOf[1]) &&
|
||||
"enum" in modelProperty.oneOf[1],
|
||||
"Expected model to have oneOf length of 2",
|
||||
);
|
||||
modelProperty.type = "string";
|
||||
modelProperty.enum = modelProperty.oneOf[1].enum;
|
||||
delete modelProperty["oneOf"];
|
||||
|
||||
// Get the directory of the current script
|
||||
const currentDirectory = path.dirname(import.meta.url).replace("file://", "");
|
||||
|
||||
// Write the JSON schema to a file in the current directory
|
||||
fs.writeFileSync(
|
||||
path.join(currentDirectory, "input.schema.json"),
|
||||
await prettier.format(JSON.stringify(completionRequestSchema, null, 2), {
|
||||
parser: "json",
|
||||
plugins: [parserBabel, parserEstree],
|
||||
}),
|
||||
);
|
||||
@@ -0,0 +1,63 @@
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"model": {
|
||||
"description": "The model that will complete your prompt.",
|
||||
"x-oaiTypeLabel": "string",
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"claude-2",
|
||||
"claude-2.0",
|
||||
"claude-instant-1",
|
||||
"claude-instant-1.1"
|
||||
]
|
||||
},
|
||||
"prompt": {
|
||||
"description": "The prompt that you want Claude to complete.\n\nFor proper response generation you will need to format your prompt as follows:\n\"\\n\\nHuman: all instructions for the assistant\\n\\nAssistant:\". The prompt string should begin with the characters \"Human:\" and end with \"Assistant:\".",
|
||||
"default": "<|endoftext|>",
|
||||
"example": "\\n\\nHuman: What is the correct translation of ${scenario.input}? I would like a long analysis followed by a short answer.\\n\\nAssistant:",
|
||||
"type": "string"
|
||||
},
|
||||
"max_tokens_to_sample": {
|
||||
"type": "integer",
|
||||
"minimum": 1,
|
||||
"default": 256,
|
||||
"nullable": true,
|
||||
"description": "The maximum number of tokens to generate before stopping."
|
||||
},
|
||||
"temperature": {
|
||||
"type": "number",
|
||||
"minimum": 0,
|
||||
"maximum": 1,
|
||||
"nullable": true,
|
||||
"description": "Amount of randomness injected into the response.\n\nDefaults to 1."
|
||||
},
|
||||
"top_p": {
|
||||
"type": "number",
|
||||
"minimum": 0,
|
||||
"maximum": 1,
|
||||
"nullable": true,
|
||||
"description": "Use nucleus sampling.\n\nYou should either alter temperature or top_p, but not both.\n"
|
||||
},
|
||||
"top_k": {
|
||||
"type": "number",
|
||||
"minimum": 0,
|
||||
"default": 5,
|
||||
"nullable": true,
|
||||
"description": "Only sample from the top K options for each subsequent token."
|
||||
},
|
||||
"stream": {
|
||||
"description": "Whether to incrementally stream the response using server-sent events.",
|
||||
"type": "boolean",
|
||||
"nullable": true,
|
||||
"default": false
|
||||
},
|
||||
"stop_sequences": {
|
||||
"description": "Sequences that will cause the model to stop generating completion text.\nBy default, our models stop on \"\\n\\nHuman:\".",
|
||||
"default": null,
|
||||
"nullable": true,
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"required": ["model", "prompt", "max_tokens_to_sample"]
|
||||
}
|
||||
42
app/src/modelProviders/anthropic-completion/frontend.ts
Normal file
42
app/src/modelProviders/anthropic-completion/frontend.ts
Normal file
@@ -0,0 +1,42 @@
|
||||
import { type Completion } from "@anthropic-ai/sdk/resources";
|
||||
import { type SupportedModel } from ".";
|
||||
import { type FrontendModelProvider } from "../types";
|
||||
import { refinementActions } from "./refinementActions";
|
||||
|
||||
const frontendModelProvider: FrontendModelProvider<SupportedModel, Completion> = {
|
||||
name: "Replicate Llama2",
|
||||
|
||||
models: {
|
||||
"claude-2.0": {
|
||||
name: "Claude 2.0",
|
||||
contextWindow: 100000,
|
||||
promptTokenPrice: 11.02 / 1000000,
|
||||
completionTokenPrice: 32.68 / 1000000,
|
||||
speed: "medium",
|
||||
provider: "anthropic/completion",
|
||||
learnMoreUrl: "https://www.anthropic.com/product",
|
||||
apiDocsUrl: "https://docs.anthropic.com/claude/reference/complete_post",
|
||||
},
|
||||
"claude-instant-1.1": {
|
||||
name: "Claude Instant 1.1",
|
||||
contextWindow: 100000,
|
||||
promptTokenPrice: 1.63 / 1000000,
|
||||
completionTokenPrice: 5.51 / 1000000,
|
||||
speed: "fast",
|
||||
provider: "anthropic/completion",
|
||||
learnMoreUrl: "https://www.anthropic.com/product",
|
||||
apiDocsUrl: "https://docs.anthropic.com/claude/reference/complete_post",
|
||||
},
|
||||
},
|
||||
|
||||
refinementActions,
|
||||
|
||||
normalizeOutput: (output) => {
|
||||
return {
|
||||
type: "text",
|
||||
value: output.completion,
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
export default frontendModelProvider;
|
||||
86
app/src/modelProviders/anthropic-completion/getCompletion.ts
Normal file
86
app/src/modelProviders/anthropic-completion/getCompletion.ts
Normal file
@@ -0,0 +1,86 @@
|
||||
import { env } from "~/env.mjs";
|
||||
import { type CompletionResponse } from "../types";
|
||||
|
||||
import Anthropic, { APIError } from "@anthropic-ai/sdk";
|
||||
import { type Completion, type CompletionCreateParams } from "@anthropic-ai/sdk/resources";
|
||||
import { isObject, isString } from "lodash-es";
|
||||
|
||||
const anthropic = new Anthropic({
|
||||
apiKey: env.ANTHROPIC_API_KEY,
|
||||
});
|
||||
|
||||
export async function getCompletion(
|
||||
input: CompletionCreateParams,
|
||||
onStream: ((partialOutput: Completion) => void) | null,
|
||||
): Promise<CompletionResponse<Completion>> {
|
||||
const start = Date.now();
|
||||
let finalCompletion: Completion | null = null;
|
||||
|
||||
try {
|
||||
if (onStream) {
|
||||
const resp = await anthropic.completions.create(
|
||||
{ ...input, stream: true },
|
||||
{
|
||||
maxRetries: 0,
|
||||
},
|
||||
);
|
||||
|
||||
for await (const part of resp) {
|
||||
if (finalCompletion === null) {
|
||||
finalCompletion = part;
|
||||
} else {
|
||||
finalCompletion = { ...part, completion: finalCompletion.completion + part.completion };
|
||||
}
|
||||
onStream(finalCompletion);
|
||||
}
|
||||
if (!finalCompletion) {
|
||||
return {
|
||||
type: "error",
|
||||
message: "Streaming failed to return a completion",
|
||||
autoRetry: false,
|
||||
};
|
||||
}
|
||||
} else {
|
||||
const resp = await anthropic.completions.create(
|
||||
{ ...input, stream: false },
|
||||
{
|
||||
maxRetries: 0,
|
||||
},
|
||||
);
|
||||
finalCompletion = resp;
|
||||
}
|
||||
const timeToComplete = Date.now() - start;
|
||||
|
||||
return {
|
||||
type: "success",
|
||||
statusCode: 200,
|
||||
value: finalCompletion,
|
||||
timeToComplete,
|
||||
};
|
||||
} catch (error: unknown) {
|
||||
console.log("CAUGHT ERROR", error);
|
||||
if (error instanceof APIError) {
|
||||
const message =
|
||||
isObject(error.error) &&
|
||||
"error" in error.error &&
|
||||
isObject(error.error.error) &&
|
||||
"message" in error.error.error &&
|
||||
isString(error.error.error.message)
|
||||
? error.error.error.message
|
||||
: error.message;
|
||||
|
||||
return {
|
||||
type: "error",
|
||||
message,
|
||||
autoRetry: error.status === 429 || error.status === 503,
|
||||
statusCode: error.status,
|
||||
};
|
||||
} else {
|
||||
return {
|
||||
type: "error",
|
||||
message: (error as Error).message,
|
||||
autoRetry: true,
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
34
app/src/modelProviders/anthropic-completion/index.ts
Normal file
34
app/src/modelProviders/anthropic-completion/index.ts
Normal file
@@ -0,0 +1,34 @@
|
||||
import { type JSONSchema4 } from "json-schema";
|
||||
import { type ModelProvider } from "../types";
|
||||
import inputSchema from "./codegen/input.schema.json";
|
||||
import { getCompletion } from "./getCompletion";
|
||||
import frontendModelProvider from "./frontend";
|
||||
import type { Completion, CompletionCreateParams } from "@anthropic-ai/sdk/resources";
|
||||
|
||||
const supportedModels = ["claude-2.0", "claude-instant-1.1"] as const;
|
||||
|
||||
export type SupportedModel = (typeof supportedModels)[number];
|
||||
|
||||
export type AnthropicProvider = ModelProvider<SupportedModel, CompletionCreateParams, Completion>;
|
||||
|
||||
const modelProvider: AnthropicProvider = {
|
||||
getModel: (input) => {
|
||||
if (supportedModels.includes(input.model as SupportedModel))
|
||||
return input.model as SupportedModel;
|
||||
|
||||
const modelMaps: Record<string, SupportedModel> = {
|
||||
"claude-2": "claude-2.0",
|
||||
"claude-instant-1": "claude-instant-1.1",
|
||||
};
|
||||
|
||||
if (input.model in modelMaps) return modelMaps[input.model] as SupportedModel;
|
||||
|
||||
return null;
|
||||
},
|
||||
inputSchema: inputSchema as JSONSchema4,
|
||||
canStream: true,
|
||||
getCompletion,
|
||||
...frontendModelProvider,
|
||||
};
|
||||
|
||||
export default modelProvider;
|
||||
@@ -0,0 +1,3 @@
|
||||
import { type RefinementAction } from "../types";
|
||||
|
||||
export const refinementActions: Record<string, RefinementAction> = {};
|
||||
Reference in New Issue
Block a user