mirror of
https://github.com/abetlen/llama-cpp-python.git
synced 2023-09-07 17:34:22 +03:00
Add support to get embeddings from high-level api. Closes #4
This commit is contained in:
@@ -105,6 +105,20 @@ class Llama:
|
||||
output += llama_cpp.llama_token_to_str(self.ctx, token)
|
||||
return output
|
||||
|
||||
def embed(self, text: str):
|
||||
"""Embed a string.
|
||||
|
||||
Args:
|
||||
text: The utf-8 encoded string to embed.
|
||||
|
||||
Returns:
|
||||
A list of embeddings.
|
||||
"""
|
||||
tokens = self.tokenize(text.encode("utf-8"))
|
||||
self._eval(tokens, 0)
|
||||
embeddings = llama_cpp.llama_get_embeddings(self.ctx)
|
||||
return embeddings[:llama_cpp.llama_n_embd(self.ctx)]
|
||||
|
||||
def _eval(self, tokens: List[int], n_past):
|
||||
rc = llama_cpp.llama_eval(
|
||||
self.ctx,
|
||||
|
||||
Reference in New Issue
Block a user