mirror of
https://github.com/abetlen/llama-cpp-python.git
synced 2023-09-07 17:34:22 +03:00
Add return type annotations for embeddings and logits
This commit is contained in:
@@ -387,7 +387,7 @@ _lib.llama_n_embd.restype = c_int
|
||||
# Can be mutated in order to change the probabilities of the next token
|
||||
# Rows: n_tokens
|
||||
# Cols: n_vocab
|
||||
def llama_get_logits(ctx: llama_context_p):
|
||||
def llama_get_logits(ctx: llama_context_p): # type: (...) -> Array[float] # type: ignore
|
||||
return _lib.llama_get_logits(ctx)
|
||||
|
||||
|
||||
@@ -397,7 +397,7 @@ _lib.llama_get_logits.restype = c_float_p
|
||||
|
||||
# Get the embeddings for the input
|
||||
# shape: [n_embd] (1-dimensional)
|
||||
def llama_get_embeddings(ctx: llama_context_p):
|
||||
def llama_get_embeddings(ctx: llama_context_p): # type: (...) -> Array[float] # type: ignore
|
||||
return _lib.llama_get_embeddings(ctx)
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user