mirror of
https://github.com/abetlen/llama-cpp-python.git
synced 2023-09-07 17:34:22 +03:00
Add support to get embeddings from high-level api. Closes #4
This commit is contained in:
12
examples/high_level_api_embedding.py
Normal file
12
examples/high_level_api_embedding.py
Normal file
@@ -0,0 +1,12 @@
|
||||
import json
|
||||
import argparse
|
||||
|
||||
from llama_cpp import Llama
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("-m", "--model", type=str, default=".//models/...")
|
||||
args = parser.parse_args()
|
||||
|
||||
llm = Llama(model_path=args.model, embedding=True)
|
||||
|
||||
print(llm.embed("Hello world!"))
|
||||
Reference in New Issue
Block a user