mirror of
https://github.com/abetlen/llama-cpp-python.git
synced 2023-09-07 17:34:22 +03:00
Add support for stream parameter. Closes #1
This commit is contained in:
20
examples/high_level_api_streaming.py
Normal file
20
examples/high_level_api_streaming.py
Normal file
@@ -0,0 +1,20 @@
|
||||
import json
|
||||
import argparse
|
||||
|
||||
from llama_cpp import Llama
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("-m", "--model", type=str, default=".//models/...")
|
||||
args = parser.parse_args()
|
||||
|
||||
llm = Llama(model_path=args.model)
|
||||
|
||||
stream = llm(
|
||||
"Question: What are the names of the planets in the solar system? Answer: ",
|
||||
max_tokens=48,
|
||||
stop=["Q:", "\n"],
|
||||
stream=True,
|
||||
)
|
||||
|
||||
for output in stream:
|
||||
print(json.dumps(output, indent=2))
|
||||
Reference in New Issue
Block a user