mirror of
https://github.com/abetlen/llama-cpp-python.git
synced 2023-09-07 17:34:22 +03:00
Update model paths to be more clear they should point to file
This commit is contained in:
@@ -9,7 +9,7 @@ N_THREADS = multiprocessing.cpu_count()
|
||||
prompt = b"\n\n### Instruction:\nWhat is the capital of France?\n\n### Response:\n"
|
||||
|
||||
lparams = llama_cpp.llama_context_default_params()
|
||||
ctx = llama_cpp.llama_init_from_file(b"models/ggml-alpaca-7b-q4.bin", lparams)
|
||||
ctx = llama_cpp.llama_init_from_file(b"../models/7B/ggml-model.bin", lparams)
|
||||
|
||||
# determine the required inference memory per token:
|
||||
tmp = [0, 1, 2, 3]
|
||||
|
||||
Reference in New Issue
Block a user