whisper : add integer quantization support (#540)

* whisper : add integer quantization support

* examples : add common-ggml + prepare to add "quantize" tool

* whisper : quantization tool ready

* whisper : fix F32 support

* whisper : try to fix shared lib linkage

* wasm : update quantized models to Q5

* bench.wasm : remove "medium" button

* bench.wasm : fix custom model button

* ggml : add Q5_0 and Q5_1 WASM SIMD

* wasm : add quantized models to all WASM examples

* wasm : bump DB version number to 2

* talk-llama : update example to latest llama.cpp

* node : increase test timeout to 10s

* readme : add information for model quantization

* wasm : add links to other examples
This commit is contained in:
Georgi Gerganov
2023-04-30 18:51:57 +03:00
committed by GitHub
parent 5fd1bdd7fc
commit 794b162a46
41 changed files with 3183 additions and 1010 deletions

16
ggml.h
View File

@@ -232,6 +232,20 @@ extern "C" {
GGML_TYPE_COUNT,
};
// model file types
enum ggml_ftype {
GGML_FTYPE_UNKNOWN = -1,
GGML_FTYPE_ALL_F32 = 0,
GGML_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
GGML_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
GGML_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
GGML_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
GGML_FTYPE_MOSTLY_Q4_2 = 5, // except 1d tensors
GGML_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
GGML_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
GGML_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
};
// available tensor operations:
enum ggml_op {
GGML_OP_NONE = 0,
@@ -385,6 +399,8 @@ extern "C" {
GGML_API bool ggml_is_quantized(enum ggml_type type);
GGML_API enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype);
// main
GGML_API struct ggml_context * ggml_init(struct ggml_init_params params);