From 8895b9002acefbccfee0cfc36f22ede7410b64e2 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Fri, 12 May 2023 07:16:57 -0400 Subject: [PATCH] Revert "llama_cpp server: prompt is a string". Closes #187 This reverts commit b9098b0ef7309b63ebff99cdfadf641223c15025. --- llama_cpp/server/app.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/llama_cpp/server/app.py b/llama_cpp/server/app.py index be6bac8..b1237e5 100644 --- a/llama_cpp/server/app.py +++ b/llama_cpp/server/app.py @@ -167,8 +167,9 @@ frequency_penalty_field = Field( ) class CreateCompletionRequest(BaseModel): - prompt: Optional[str] = Field( - default="", description="The prompt to generate completions for." + prompt: Union[str, List[str]] = Field( + default="", + description="The prompt to generate completions for." ) suffix: Optional[str] = Field( default=None, @@ -222,6 +223,9 @@ CreateCompletionResponse = create_model_from_typeddict(llama_cpp.Completion) def create_completion( request: CreateCompletionRequest, llama: llama_cpp.Llama = Depends(get_llama) ): + if isinstance(request.prompt, list): + request.prompt = "".join(request.prompt) + completion_or_chunks = llama( **request.dict( exclude={