mirror of
https://github.com/abetlen/llama-cpp-python.git
synced 2023-09-07 17:34:22 +03:00
Add runtime check to ensure embedding is enabled if trying to generate embeddings
This commit is contained in:
@@ -242,6 +242,11 @@ class Llama:
|
|||||||
"""
|
"""
|
||||||
assert self.ctx is not None
|
assert self.ctx is not None
|
||||||
|
|
||||||
|
if self.params.embedding == False:
|
||||||
|
raise RuntimeError(
|
||||||
|
"Llama model must be created with embedding=True to call this method"
|
||||||
|
)
|
||||||
|
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
llama_cpp.llama_reset_timings(self.ctx)
|
llama_cpp.llama_reset_timings(self.ctx)
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user