mirror of
https://github.com/abetlen/llama-cpp-python.git
synced 2023-09-07 17:34:22 +03:00
Add static methods for beginning and end of sequence tokens.
This commit is contained in:
@@ -69,6 +69,8 @@ python3 setup.py develop
|
||||
- create_embedding
|
||||
- create_completion
|
||||
- __call__
|
||||
- token_bos
|
||||
- token_eos
|
||||
show_root_heading: true
|
||||
|
||||
::: llama_cpp.llama_cpp
|
||||
|
||||
@@ -446,3 +446,13 @@ class Llama:
|
||||
if self.ctx is not None:
|
||||
llama_cpp.llama_free(self.ctx)
|
||||
self.ctx = None
|
||||
|
||||
@staticmethod
|
||||
def token_eos() -> llama_cpp.llama_token:
|
||||
"""Return the end-of-sequence token."""
|
||||
return llama_cpp.llama_token_eos()
|
||||
|
||||
@staticmethod
|
||||
def token_bos() -> llama_cpp.llama_token:
|
||||
"""Return the beginning-of-sequence token."""
|
||||
return llama_cpp.llama_token_bos()
|
||||
|
||||
Reference in New Issue
Block a user