Update model paths to be more clear they should point to file

This commit is contained in:
Andrei Betlen
2023-04-09 22:45:55 -04:00
parent a79d3eb732
commit 196650ccb2
8 changed files with 12 additions and 12 deletions

View File

@@ -4,7 +4,7 @@ To run this example:
```bash
pip install fastapi uvicorn sse-starlette
export MODEL=../models/7B/...
export MODEL=../models/7B/ggml-model.bin
uvicorn fastapi_server_chat:app --reload
```

View File

@@ -3,7 +3,7 @@ import argparse
from llama_cpp import Llama
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", type=str, default=".//models/...")
parser.add_argument("-m", "--model", type=str, default="../models/7B/ggml-model.bin")
args = parser.parse_args()
llm = Llama(model_path=args.model, embedding=True)

View File

@@ -4,7 +4,7 @@ import argparse
from llama_cpp import Llama
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", type=str, default="./models/...")
parser.add_argument("-m", "--model", type=str, default="../models/7B/ggml-models.bin")
args = parser.parse_args()
llm = Llama(model_path=args.model)

View File

@@ -4,7 +4,7 @@ import argparse
from llama_cpp import Llama
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", type=str, default="./models/...")
parser.add_argument("-m", "--model", type=str, default="../models/7B/ggml-models.bin")
args = parser.parse_args()
llm = Llama(model_path=args.model)

View File

@@ -29,7 +29,7 @@ class LlamaLLM(LLM):
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", type=str, default="./models/...")
parser.add_argument("-m", "--model", type=str, default="../models/7B/ggml-models.bin")
args = parser.parse_args()
# Load the model

View File

@@ -9,7 +9,7 @@ N_THREADS = multiprocessing.cpu_count()
prompt = b"\n\n### Instruction:\nWhat is the capital of France?\n\n### Response:\n"
lparams = llama_cpp.llama_context_default_params()
ctx = llama_cpp.llama_init_from_file(b"models/ggml-alpaca-7b-q4.bin", lparams)
ctx = llama_cpp.llama_init_from_file(b"../models/7B/ggml-model.bin", lparams)
# determine the required inference memory per token:
tmp = [0, 1, 2, 3]