mirror of
https://github.com/ggerganov/whisper.cpp.git
synced 2023-11-04 02:52:44 +03:00
build : CLBlast support as in llama.cpp (#862)
* ggml : CLBlast support as in llama.cpp Building with CLBlast speeds up whisper.cpp ~2x on low end / older AMD APUs (CPU with integrated GPU) such as the A9. Usage: WHISPER_CLBLAST=1 make * CMake/Makefile : CLBlast support as in llama.cpp Building with CLBlast speeds up whisper.cpp ~2x on low end / older AMD APUs (CPU with integrated GPU) such as the A9. Usage: ``` Makefile: cd whisper.cpp WHISPER_CLBLAST=1 make CMake: cd whisper.cpp ; mkdir build ; cd build cmake -DWHISPER_CLBLAST=ON .. make ```
This commit is contained in:
9
Makefile
9
Makefile
@@ -171,6 +171,15 @@ ggml-cuda.o: ggml-cuda.cu ggml-cuda.h
|
||||
$(NVCC) $(NVCCFLAGS) $(CXXFLAGS) -Wno-pedantic -c $< -o $@
|
||||
endif
|
||||
|
||||
ifdef WHISPER_CLBLAST
|
||||
CFLAGS += -DGGML_USE_CLBLAST
|
||||
LDFLAGS += -lclblast -lOpenCL
|
||||
WHISPER_OBJ += ggml-opencl.o
|
||||
|
||||
ggml-opencl.o: ggml-opencl.c ggml-opencl.h
|
||||
$(CC) $(CFLAGS) -c $< -o $@
|
||||
endif
|
||||
|
||||
ifdef WHISPER_GPROF
|
||||
CFLAGS += -pg
|
||||
CXXFLAGS += -pg
|
||||
|
||||
Reference in New Issue
Block a user