mirror of
https://github.com/abetlen/llama-cpp-python.git
synced 2023-09-07 17:34:22 +03:00
llama_cpp server: move logprobs to supported
I think this is actually supported (its in the arguments of `LLama.__call__`, which is how the completion is invoked). decision: mark as supported
This commit is contained in:
@@ -79,12 +79,11 @@ class CreateCompletionRequest(BaseModel):
|
||||
echo: bool = False
|
||||
stop: Optional[List[str]] = []
|
||||
stream: bool = False
|
||||
logprobs: Optional[int] = Field(None)
|
||||
|
||||
# ignored, but marked as required for the sake of compatibility with openai's api
|
||||
model: str = model_field
|
||||
|
||||
logprobs: Optional[int] = Field(None)
|
||||
|
||||
# llama.cpp specific parameters
|
||||
top_k: int = 40
|
||||
repeat_penalty: float = 1.1
|
||||
|
||||
Reference in New Issue
Block a user