diff --git a/llama_cpp/server/app.py b/llama_cpp/server/app.py index ba2ca2f..48dfc5e 100644 --- a/llama_cpp/server/app.py +++ b/llama_cpp/server/app.py @@ -13,18 +13,41 @@ from sse_starlette.sse import EventSourceResponse class Settings(BaseSettings): - model: str - n_ctx: int = 2048 - n_batch: int = 512 - n_threads: int = max((os.cpu_count() or 2) // 2, 1) - f16_kv: bool = True - use_mlock: bool = False # This causes a silent failure on platforms that don't support mlock (e.g. Windows) took forever to figure out... - use_mmap: bool = True - embedding: bool = True - last_n_tokens_size: int = 64 - logits_all: bool = False - cache: bool = False # WARNING: This is an experimental feature - vocab_only: bool = False + model: str = Field( + description="The path to the model to use for generating completions." + ) + n_ctx: int = Field(default=2048, ge=1, description="The context size.") + n_batch: int = Field( + default=512, ge=1, description="The batch size to use per eval." + ) + n_threads: int = Field( + default=max((os.cpu_count() or 2) // 2, 1), + ge=1, + description="The number of threads to use.", + ) + f16_kv: bool = Field(default=True, description="Whether to use f16 key/value.") + use_mlock: bool = Field( + default=bool(llama_cpp.llama_mlock_supported().value), + description="Use mlock.", + ) + use_mmap: bool = Field( + default=bool(llama_cpp.llama_mmap_supported().value), + description="Use mmap.", + ) + embedding: bool = Field(default=True, description="Whether to use embeddings.") + last_n_tokens_size: int = Field( + default=64, + ge=0, + description="Last n tokens to keep for repeat penalty calculation.", + ) + logits_all: bool = Field(default=True, description="Whether to return logits.") + cache: bool = Field( + default=False, + description="Use a cache to reduce processing times for evaluated prompts.", + ) + vocab_only: bool = Field( + default=False, description="Whether to only return the vocabulary." + ) router = APIRouter() @@ -74,79 +97,75 @@ def get_llama(): with llama_lock: yield llama -model_field = Field( - description="The model to use for generating completions." -) + +model_field = Field(description="The model to use for generating completions.") max_tokens_field = Field( - default=16, - ge=1, - le=2048, - description="The maximum number of tokens to generate." + default=16, ge=1, le=2048, description="The maximum number of tokens to generate." ) temperature_field = Field( default=0.8, ge=0.0, le=2.0, - description="Adjust the randomness of the generated text.\n\n" + - "Temperature is a hyperparameter that controls the randomness of the generated text. It affects the probability distribution of the model's output tokens. A higher temperature (e.g., 1.5) makes the output more random and creative, while a lower temperature (e.g., 0.5) makes the output more focused, deterministic, and conservative. The default value is 0.8, which provides a balance between randomness and determinism. At the extreme, a temperature of 0 will always pick the most likely next token, leading to identical outputs in each run." + description="Adjust the randomness of the generated text.\n\n" + + "Temperature is a hyperparameter that controls the randomness of the generated text. It affects the probability distribution of the model's output tokens. A higher temperature (e.g., 1.5) makes the output more random and creative, while a lower temperature (e.g., 0.5) makes the output more focused, deterministic, and conservative. The default value is 0.8, which provides a balance between randomness and determinism. At the extreme, a temperature of 0 will always pick the most likely next token, leading to identical outputs in each run.", ) top_p_field = Field( default=0.95, ge=0.0, le=1.0, - description="Limit the next token selection to a subset of tokens with a cumulative probability above a threshold P.\n\n" + - "Top-p sampling, also known as nucleus sampling, is another text generation method that selects the next token from a subset of tokens that together have a cumulative probability of at least p. This method provides a balance between diversity and quality by considering both the probabilities of tokens and the number of tokens to sample from. A higher value for top_p (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text." + description="Limit the next token selection to a subset of tokens with a cumulative probability above a threshold P.\n\n" + + "Top-p sampling, also known as nucleus sampling, is another text generation method that selects the next token from a subset of tokens that together have a cumulative probability of at least p. This method provides a balance between diversity and quality by considering both the probabilities of tokens and the number of tokens to sample from. A higher value for top_p (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.", ) stop_field = Field( default=None, - description="A list of tokens at which to stop generation. If None, no stop tokens are used." + description="A list of tokens at which to stop generation. If None, no stop tokens are used.", ) stream_field = Field( default=False, - description="Whether to stream the results as they are generated. Useful for chatbots." + description="Whether to stream the results as they are generated. Useful for chatbots.", ) top_k_field = Field( default=40, ge=0, - description="Limit the next token selection to the K most probable tokens.\n\n" + - "Top-k sampling is a text generation method that selects the next token only from the top k most likely tokens predicted by the model. It helps reduce the risk of generating low-probability or nonsensical tokens, but it may also limit the diversity of the output. A higher value for top_k (e.g., 100) will consider more tokens and lead to more diverse text, while a lower value (e.g., 10) will focus on the most probable tokens and generate more conservative text." + description="Limit the next token selection to the K most probable tokens.\n\n" + + "Top-k sampling is a text generation method that selects the next token only from the top k most likely tokens predicted by the model. It helps reduce the risk of generating low-probability or nonsensical tokens, but it may also limit the diversity of the output. A higher value for top_k (e.g., 100) will consider more tokens and lead to more diverse text, while a lower value (e.g., 10) will focus on the most probable tokens and generate more conservative text.", ) repeat_penalty_field = Field( default=1.0, ge=0.0, - description="A penalty applied to each token that is already generated. This helps prevent the model from repeating itself.\n\n" + - "Repeat penalty is a hyperparameter used to penalize the repetition of token sequences during text generation. It helps prevent the model from generating repetitive or monotonous text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient." + description="A penalty applied to each token that is already generated. This helps prevent the model from repeating itself.\n\n" + + "Repeat penalty is a hyperparameter used to penalize the repetition of token sequences during text generation. It helps prevent the model from generating repetitive or monotonous text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient.", ) + class CreateCompletionRequest(BaseModel): prompt: Optional[str] = Field( - default="", - description="The prompt to generate completions for." + default="", description="The prompt to generate completions for." ) suffix: Optional[str] = Field( default=None, - description="A suffix to append to the generated text. If None, no suffix is appended. Useful for chatbots." + description="A suffix to append to the generated text. If None, no suffix is appended. Useful for chatbots.", ) max_tokens: int = max_tokens_field temperature: float = temperature_field top_p: float = top_p_field echo: bool = Field( default=False, - description="Whether to echo the prompt in the generated text. Useful for chatbots." + description="Whether to echo the prompt in the generated text. Useful for chatbots.", ) stop: Optional[List[str]] = stop_field stream: bool = stream_field logprobs: Optional[int] = Field( default=None, ge=0, - description="The number of logprobs to generate. If None, no logprobs are generated." + description="The number of logprobs to generate. If None, no logprobs are generated.", ) # ignored or currently unsupported @@ -204,9 +223,7 @@ def create_completion( class CreateEmbeddingRequest(BaseModel): model: Optional[str] = model_field - input: str = Field( - description="The input to embed." - ) + input: str = Field(description="The input to embed.") user: Optional[str] class Config: @@ -239,8 +256,7 @@ class ChatCompletionRequestMessage(BaseModel): class CreateChatCompletionRequest(BaseModel): messages: List[ChatCompletionRequestMessage] = Field( - default=[], - description="A list of messages to generate completions for." + default=[], description="A list of messages to generate completions for." ) max_tokens: int = max_tokens_field temperature: float = temperature_field