mirror of
https://github.com/abetlen/llama-cpp-python.git
synced 2023-09-07 17:34:22 +03:00
Add type annotations
This commit is contained in:
@@ -332,13 +332,15 @@ class Llama:
|
|||||||
stream: bool = False,
|
stream: bool = False,
|
||||||
) -> Union[Iterator[Completion], Iterator[CompletionChunk]]:
|
) -> Union[Iterator[Completion], Iterator[CompletionChunk]]:
|
||||||
assert self.ctx is not None
|
assert self.ctx is not None
|
||||||
completion_id = f"cmpl-{str(uuid.uuid4())}"
|
completion_id: str = f"cmpl-{str(uuid.uuid4())}"
|
||||||
created = int(time.time())
|
created: int = int(time.time())
|
||||||
completion_tokens: List[llama_cpp.llama_token] = []
|
completion_tokens: List[llama_cpp.llama_token] = []
|
||||||
# Add blank space to start of prompt to match OG llama tokenizer
|
# Add blank space to start of prompt to match OG llama tokenizer
|
||||||
prompt_tokens = self.tokenize(b" " + prompt.encode("utf-8"))
|
prompt_tokens: List[llama_cpp.llama_token] = self.tokenize(
|
||||||
text = b""
|
b" " + prompt.encode("utf-8")
|
||||||
returned_characters = 0
|
)
|
||||||
|
text: bytes = b""
|
||||||
|
returned_characters: int = 0
|
||||||
stop = stop if stop is not None else []
|
stop = stop if stop is not None else []
|
||||||
|
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
|
|||||||
Reference in New Issue
Block a user