mirror of
https://github.com/abetlen/llama-cpp-python.git
synced 2023-09-07 17:34:22 +03:00
Remove async from function signature to avoid blocking the server
This commit is contained in:
@@ -196,7 +196,7 @@ CreateChatCompletionResponse = create_model_from_typeddict(llama_cpp.ChatComplet
|
|||||||
"/v1/chat/completions",
|
"/v1/chat/completions",
|
||||||
response_model=CreateChatCompletionResponse,
|
response_model=CreateChatCompletionResponse,
|
||||||
)
|
)
|
||||||
async def create_chat_completion(
|
def create_chat_completion(
|
||||||
request: CreateChatCompletionRequest,
|
request: CreateChatCompletionRequest,
|
||||||
) -> Union[llama_cpp.ChatCompletion, EventSourceResponse]:
|
) -> Union[llama_cpp.ChatCompletion, EventSourceResponse]:
|
||||||
completion_or_chunks = llama.create_chat_completion(
|
completion_or_chunks = llama.create_chat_completion(
|
||||||
|
|||||||
Reference in New Issue
Block a user