This makes it so we're using our own openpipe client for all OpenAI calls from the OpenPipe app. The client doesn't do anything at the moment beyond proxying to the OpenAI lib. But this infra work should make it easier to quickly iterate on the client and test the changes in our own app.
26 lines
553 B
JSON
26 lines
553 B
JSON
{
|
|
"name": "openpipe",
|
|
"version": "0.1.0",
|
|
"type": "module",
|
|
"description": "Metrics and auto-evaluation for LLM calls",
|
|
"scripts": {
|
|
"build": "tsc"
|
|
},
|
|
"main": "dist/index.js",
|
|
"types": "dist/index.d.ts",
|
|
"keywords": [],
|
|
"author": "",
|
|
"license": "Apache-2.0",
|
|
"dependencies": {
|
|
"axios": "^0.26.0",
|
|
"openai-beta": "npm:openai@4.0.0-beta.7",
|
|
"openai-legacy": "npm:openai@3.3.0"
|
|
},
|
|
"devDependencies": {
|
|
"@types/node": "^20.4.8",
|
|
"dotenv": "^16.3.1",
|
|
"tsx": "^3.12.7",
|
|
"typescript": "^5.0.4"
|
|
}
|
|
}
|