mirror of
https://github.com/abetlen/llama-cpp-python.git
synced 2023-09-07 17:34:22 +03:00
Fixed somethings and activated cublas
This commit is contained in:
@@ -390,7 +390,7 @@ class Llama:
|
|||||||
top_k=llama_cpp.c_int(top_k),
|
top_k=llama_cpp.c_int(top_k),
|
||||||
top_p=llama_cpp.c_float(top_p),
|
top_p=llama_cpp.c_float(top_p),
|
||||||
temp=llama_cpp.c_float(temp),
|
temp=llama_cpp.c_float(temp),
|
||||||
mirostat=llama_cpp.c_int(mirostat_mode),
|
mirostat_mode=llama_cpp.c_int(mirostat_mode),
|
||||||
mirostat_mu=llama_cpp.c_float(mirostat_mu),
|
mirostat_mu=llama_cpp.c_float(mirostat_mu),
|
||||||
mirostat_tau=llama_cpp.c_float(mirostat_tau),
|
mirostat_tau=llama_cpp.c_float(mirostat_tau),
|
||||||
mirostat_eta=llama_cpp.c_float(mirostat_eta),
|
mirostat_eta=llama_cpp.c_float(mirostat_eta),
|
||||||
@@ -404,7 +404,7 @@ class Llama:
|
|||||||
top_k: int,
|
top_k: int,
|
||||||
top_p: float,
|
top_p: float,
|
||||||
temp: float,
|
temp: float,
|
||||||
mirostat: int,
|
mirostat_mode: int,
|
||||||
mirostat_tau: float,
|
mirostat_tau: float,
|
||||||
mirostat_eta: float,
|
mirostat_eta: float,
|
||||||
mirostat_mu: float,
|
mirostat_mu: float,
|
||||||
|
|||||||
1
setup.py
1
setup.py
@@ -22,6 +22,7 @@ setup(
|
|||||||
extras_require={
|
extras_require={
|
||||||
"server": ["uvicorn>=0.21.1", "fastapi>=0.95.0", "sse-starlette>=1.3.3"],
|
"server": ["uvicorn>=0.21.1", "fastapi>=0.95.0", "sse-starlette>=1.3.3"],
|
||||||
},
|
},
|
||||||
|
cmake_args=['-DLLAMA_CUBLAS=ON'],
|
||||||
python_requires=">=3.7",
|
python_requires=">=3.7",
|
||||||
classifiers=[
|
classifiers=[
|
||||||
"Programming Language :: Python :: 3",
|
"Programming Language :: Python :: 3",
|
||||||
|
|||||||
2
vendor/llama.cpp
vendored
2
vendor/llama.cpp
vendored
Submodule vendor/llama.cpp updated: 2edbdb0f99...173d0e6419
Reference in New Issue
Block a user