mirror of
https://github.com/abetlen/llama-cpp-python.git
synced 2023-09-07 17:34:22 +03:00
Update model paths to be more clear they should point to file
This commit is contained in:
@@ -29,14 +29,14 @@ pip install llama-cpp-python
|
||||
|
||||
```python
|
||||
>>> from llama_cpp import Llama
|
||||
>>> llm = Llama(model_path="models/7B/...")
|
||||
>>> llm = Llama(model_path="./models/7B/ggml-model.bin")
|
||||
>>> output = llm("Q: Name the planets in the solar system? A: ", max_tokens=32, stop=["Q:", "\n"], echo=True)
|
||||
>>> print(output)
|
||||
{
|
||||
"id": "cmpl-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
|
||||
"object": "text_completion",
|
||||
"created": 1679561337,
|
||||
"model": "models/7B/...",
|
||||
"model": "./models/7B/ggml-model.bin",
|
||||
"choices": [
|
||||
{
|
||||
"text": "Q: Name the planets in the solar system? A: Mercury, Venus, Earth, Mars, Jupiter, Saturn, Uranus, Neptune and Pluto.",
|
||||
@@ -62,7 +62,7 @@ To install the server package and get started:
|
||||
|
||||
```bash
|
||||
pip install llama-cpp-python[server]
|
||||
export MODEL=./models/7B
|
||||
export MODEL=./models/7B/ggml-model.bin
|
||||
python3 -m llama_cpp.server
|
||||
```
|
||||
|
||||
|
||||
Reference in New Issue
Block a user