mirror of
https://github.com/abetlen/llama-cpp-python.git
synced 2023-09-07 17:34:22 +03:00
Black formatting
This commit is contained in:
@@ -5,9 +5,11 @@ from llama_cpp import Llama
|
||||
from fastapi import FastAPI
|
||||
from pydantic import BaseModel, BaseSettings, Field
|
||||
|
||||
|
||||
class Settings(BaseSettings):
|
||||
model: str
|
||||
|
||||
|
||||
app = FastAPI(
|
||||
title="🦙 llama.cpp Python API",
|
||||
version="0.0.1",
|
||||
@@ -15,6 +17,7 @@ app = FastAPI(
|
||||
settings = Settings()
|
||||
llama = Llama(settings.model)
|
||||
|
||||
|
||||
class CompletionRequest(BaseModel):
|
||||
prompt: str
|
||||
suffix: Optional[str] = Field(None)
|
||||
@@ -31,12 +34,11 @@ class CompletionRequest(BaseModel):
|
||||
schema_extra = {
|
||||
"example": {
|
||||
"prompt": "\n\n### Instructions:\nWhat is the capital of France?\n\n### Response:\n",
|
||||
"stop": ["\n", "###"]
|
||||
"stop": ["\n", "###"],
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@app.post("/v1/completions")
|
||||
def completions(request: CompletionRequest):
|
||||
return llama(**request.dict())
|
||||
return llama(**request.dict())
|
||||
|
||||
Reference in New Issue
Block a user