mirror of
https://github.com/abetlen/llama-cpp-python.git
synced 2023-09-07 17:34:22 +03:00
Add additional verbose logs for cache
This commit is contained in:
@@ -119,8 +119,12 @@ def create_app(settings: Optional[Settings] = None):
|
||||
)
|
||||
if settings.cache:
|
||||
if settings.cache_type == "disk":
|
||||
if settings.verbose:
|
||||
print(f"Using disk cache with size {settings.cache_size}")
|
||||
cache = llama_cpp.LlamaDiskCache(capacity_bytes=settings.cache_size)
|
||||
else:
|
||||
if settings.verbose:
|
||||
print(f"Using ram cache with size {settings.cache_size}")
|
||||
cache = llama_cpp.LlamaRAMCache(capacity_bytes=settings.cache_size)
|
||||
|
||||
cache = llama_cpp.LlamaCache(capacity_bytes=settings.cache_size)
|
||||
|
||||
Reference in New Issue
Block a user