diff --git a/llama_cpp/server/__main__.py b/llama_cpp/server/__main__.py index c3168b4..d19c45a 100644 --- a/llama_cpp/server/__main__.py +++ b/llama_cpp/server/__main__.py @@ -28,7 +28,7 @@ class Settings(BaseSettings): model: str n_ctx: int = 2048 n_batch: int = 8 - n_threads: int = int(os.cpu_count() / 2) or 1 + n_threads: int = ((os.cpu_count() or 2) // 2) or 1 f16_kv: bool = True use_mlock: bool = False # This causes a silent failure on platforms that don't support mlock (e.g. Windows) took forever to figure out... embedding: bool = True