mirror of
https://github.com/promptfoo/promptfoo.git
synced 2023-08-15 01:10:51 +03:00
formatting
This commit is contained in:
@@ -1,8 +1,9 @@
|
||||
This example shows how to run a Python (LangChain) chain implementation with Promptfoo. It compares GPT-4 versus LangChain's LLM Math module.
|
||||
This example shows how to run a Python (LangChain) chain implementation with Promptfoo. It compares GPT-4 versus LangChain's LLM Math module.
|
||||
|
||||
To run it, first create a virtual env and install the requirements:
|
||||
|
||||
Then activate the virtual env.
|
||||
|
||||
```
|
||||
python3 -m venv venv
|
||||
source venv/bin/activate
|
||||
@@ -10,7 +11,7 @@ pip install -r requirements.txt
|
||||
```
|
||||
|
||||
Then run the eval:
|
||||
|
||||
```
|
||||
npx promptfoo eval
|
||||
```
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ export class AnthropicCompletionProvider implements ApiProvider {
|
||||
constructor(modelName: string, apiKey?: string, context?: AnthropicCompletionOptions) {
|
||||
this.modelName = modelName;
|
||||
this.apiKey = apiKey || process.env.ANTHROPIC_API_KEY;
|
||||
this.anthropic = new Anthropic({apiKey: this.apiKey});
|
||||
this.anthropic = new Anthropic({ apiKey: this.apiKey });
|
||||
this.options = context || {};
|
||||
}
|
||||
|
||||
|
||||
@@ -79,7 +79,7 @@ describe('providers', () => {
|
||||
const provider = new AnthropicCompletionProvider('claude-1', 'test-api-key');
|
||||
provider.anthropic.completions.create = jest.fn().mockResolvedValue({
|
||||
completion: 'Test output',
|
||||
})
|
||||
});
|
||||
const result = await provider.callApi('Test prompt');
|
||||
|
||||
expect(provider.anthropic.completions.create).toHaveBeenCalledTimes(1);
|
||||
|
||||
Reference in New Issue
Block a user