mirror of
https://github.com/abetlen/llama-cpp-python.git
synced 2023-09-07 17:34:22 +03:00
Add sampling defaults for generate
This commit is contained in:
@@ -446,10 +446,10 @@ class Llama:
|
|||||||
def generate(
|
def generate(
|
||||||
self,
|
self,
|
||||||
tokens: Sequence[llama_cpp.llama_token],
|
tokens: Sequence[llama_cpp.llama_token],
|
||||||
top_k: int,
|
top_k: int = 40,
|
||||||
top_p: float,
|
top_p: float = 0.95,
|
||||||
temp: float,
|
temp: float = 0.80,
|
||||||
repeat_penalty: float,
|
repeat_penalty: float = 1.1,
|
||||||
reset: bool = True,
|
reset: bool = True,
|
||||||
frequency_penalty: float = 0.0,
|
frequency_penalty: float = 0.0,
|
||||||
presence_penalty: float = 0.0,
|
presence_penalty: float = 0.0,
|
||||||
|
|||||||
Reference in New Issue
Block a user