From 1e429135993f4e1298d8c801f2628bae3d8f18a9 Mon Sep 17 00:00:00 2001 From: Lucas Doyle Date: Sat, 29 Apr 2023 11:48:49 -0700 Subject: [PATCH] llama_cpp server: move logprobs to supported I think this is actually supported (its in the arguments of `LLama.__call__`, which is how the completion is invoked). decision: mark as supported --- llama_cpp/server/app.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/llama_cpp/server/app.py b/llama_cpp/server/app.py index cc467db..2d20f37 100644 --- a/llama_cpp/server/app.py +++ b/llama_cpp/server/app.py @@ -79,12 +79,11 @@ class CreateCompletionRequest(BaseModel): echo: bool = False stop: Optional[List[str]] = [] stream: bool = False + logprobs: Optional[int] = Field(None) # ignored, but marked as required for the sake of compatibility with openai's api model: str = model_field - logprobs: Optional[int] = Field(None) - # llama.cpp specific parameters top_k: int = 40 repeat_penalty: float = 1.1