mirror of
https://github.com/abetlen/llama-cpp-python.git
synced 2023-09-07 17:34:22 +03:00
Add n_ctx, n_vocab, and n_embd properties
This commit is contained in:
@@ -1291,6 +1291,24 @@ class Llama:
|
|||||||
if llama_cpp.llama_set_state_data(self.ctx, state.llama_state) != state_size:
|
if llama_cpp.llama_set_state_data(self.ctx, state.llama_state) != state_size:
|
||||||
raise RuntimeError("Failed to set llama state data")
|
raise RuntimeError("Failed to set llama state data")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def n_ctx(self) -> int:
|
||||||
|
"""Return the context window size."""
|
||||||
|
assert self.ctx is not None
|
||||||
|
return llama_cpp.llama_n_ctx(self.ctx)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def n_embd(self) -> int:
|
||||||
|
"""Return the embedding size."""
|
||||||
|
assert self.ctx is not None
|
||||||
|
return llama_cpp.llama_n_embd(self.ctx)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def n_vocab(self) -> int:
|
||||||
|
"""Return the vocabulary size."""
|
||||||
|
assert self.ctx is not None
|
||||||
|
return llama_cpp.llama_n_vocab(self.ctx)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def token_eos() -> int:
|
def token_eos() -> int:
|
||||||
"""Return the end-of-sequence token."""
|
"""Return the end-of-sequence token."""
|
||||||
|
|||||||
Reference in New Issue
Block a user