mirror of
https://github.com/abetlen/llama-cpp-python.git
synced 2023-09-07 17:34:22 +03:00
Remove workaround
This commit is contained in:
@@ -176,11 +176,6 @@ class Llama:
|
|||||||
The sampled token.
|
The sampled token.
|
||||||
"""
|
"""
|
||||||
assert self.ctx is not None
|
assert self.ctx is not None
|
||||||
# Temporary workaround for https://github.com/ggerganov/llama.cpp/issues/684
|
|
||||||
if temp == 0.0:
|
|
||||||
temp = 1.0
|
|
||||||
top_p = 0.0
|
|
||||||
top_k = 1
|
|
||||||
return llama_cpp.llama_sample_top_p_top_k(
|
return llama_cpp.llama_sample_top_p_top_k(
|
||||||
ctx=self.ctx,
|
ctx=self.ctx,
|
||||||
last_n_tokens_data=(llama_cpp.llama_token * self.last_n_tokens_size)(
|
last_n_tokens_data=(llama_cpp.llama_token * self.last_n_tokens_size)(
|
||||||
|
|||||||
Reference in New Issue
Block a user