From eef627c09c16cde2ccce216c4780f8967877005e Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Sat, 1 Apr 2023 17:39:35 -0400 Subject: [PATCH] Fix example documentation --- llama_cpp/llama.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index e0becc4..e151c95 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -127,10 +127,11 @@ class Llama: ]: """Generate tokens. - >>> llama = Llama("models/117M") - >>> tokens = llama.tokenize(b"Hello, world!") - >>> for token in llama.generate(tokens, top_k=40, top_p=0.95, temp=1.0, repeat_penalty=1.1): - ... print(llama.detokenize([token])) + Examples: + >>> llama = Llama("models/ggml-7b.bin") + >>> tokens = llama.tokenize(b"Hello, world!") + >>> for token in llama.generate(tokens, top_k=40, top_p=0.95, temp=1.0, repeat_penalty=1.1): + ... print(llama.detokenize([token])) Args: tokens: The prompt tokens.