mirror of
https://github.com/abetlen/llama-cpp-python.git
synced 2023-09-07 17:34:22 +03:00
Format server options
This commit is contained in:
@@ -260,18 +260,18 @@ class CreateCompletionRequest(BaseModel):
|
|||||||
presence_penalty: Optional[float] = presence_penalty_field
|
presence_penalty: Optional[float] = presence_penalty_field
|
||||||
frequency_penalty: Optional[float] = frequency_penalty_field
|
frequency_penalty: Optional[float] = frequency_penalty_field
|
||||||
logit_bias: Optional[Dict[str, float]] = Field(None)
|
logit_bias: Optional[Dict[str, float]] = Field(None)
|
||||||
logit_bias_type: Optional[Literal["input_ids", "tokens"]] = Field(None)
|
logprobs: Optional[int] = Field(None)
|
||||||
|
|
||||||
# ignored or currently unsupported
|
# ignored or currently unsupported
|
||||||
model: Optional[str] = model_field
|
model: Optional[str] = model_field
|
||||||
n: Optional[int] = 1
|
n: Optional[int] = 1
|
||||||
logprobs: Optional[int] = Field(None)
|
|
||||||
best_of: Optional[int] = 1
|
best_of: Optional[int] = 1
|
||||||
user: Optional[str] = Field(None)
|
user: Optional[str] = Field(None)
|
||||||
|
|
||||||
# llama.cpp specific parameters
|
# llama.cpp specific parameters
|
||||||
top_k: int = top_k_field
|
top_k: int = top_k_field
|
||||||
repeat_penalty: float = repeat_penalty_field
|
repeat_penalty: float = repeat_penalty_field
|
||||||
|
logit_bias_type: Optional[Literal["input_ids", "tokens"]] = Field(None)
|
||||||
|
|
||||||
class Config:
|
class Config:
|
||||||
schema_extra = {
|
schema_extra = {
|
||||||
@@ -424,7 +424,6 @@ class CreateChatCompletionRequest(BaseModel):
|
|||||||
presence_penalty: Optional[float] = presence_penalty_field
|
presence_penalty: Optional[float] = presence_penalty_field
|
||||||
frequency_penalty: Optional[float] = frequency_penalty_field
|
frequency_penalty: Optional[float] = frequency_penalty_field
|
||||||
logit_bias: Optional[Dict[str, float]] = Field(None)
|
logit_bias: Optional[Dict[str, float]] = Field(None)
|
||||||
logit_bias_type: Optional[Literal["input_ids", "tokens"]] = Field(None)
|
|
||||||
|
|
||||||
# ignored or currently unsupported
|
# ignored or currently unsupported
|
||||||
model: Optional[str] = model_field
|
model: Optional[str] = model_field
|
||||||
@@ -434,6 +433,7 @@ class CreateChatCompletionRequest(BaseModel):
|
|||||||
# llama.cpp specific parameters
|
# llama.cpp specific parameters
|
||||||
top_k: int = top_k_field
|
top_k: int = top_k_field
|
||||||
repeat_penalty: float = repeat_penalty_field
|
repeat_penalty: float = repeat_penalty_field
|
||||||
|
logit_bias_type: Optional[Literal["input_ids", "tokens"]] = Field(None)
|
||||||
|
|
||||||
class Config:
|
class Config:
|
||||||
schema_extra = {
|
schema_extra = {
|
||||||
|
|||||||
Reference in New Issue
Block a user