mirror of
https://github.com/abetlen/llama-cpp-python.git
synced 2023-09-07 17:34:22 +03:00
Bump version
This commit is contained in:
@@ -7,6 +7,13 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
### Added
|
||||
|
||||
- (llama.cpp) Add full gpu utilisation in CUDA
|
||||
- (llama.cpp) Add get_vocab
|
||||
- (llama.cpp) Add low_vram parameter
|
||||
- (server) Add logit_bias parameter
|
||||
|
||||
## [0.1.62]
|
||||
|
||||
### Fixed
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[tool.poetry]
|
||||
name = "llama_cpp_python"
|
||||
version = "0.1.62"
|
||||
version = "0.1.63"
|
||||
description = "Python bindings for the llama.cpp library"
|
||||
authors = ["Andrei Betlen <abetlen@gmail.com>"]
|
||||
license = "MIT"
|
||||
|
||||
Reference in New Issue
Block a user