Skip to content

Commit 8be7d67

Browse files
committed
raise exception when llama_load_model_from_file fails
1 parent 231123e commit 8be7d67

File tree

1 file changed

+4
-1
lines changed

1 file changed

+4
-1
lines changed

llama_cpp/llama_cpp.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -367,7 +367,10 @@ def llama_backend_free():
367367
def llama_load_model_from_file(
368368
path_model: bytes, params: llama_context_params
369369
) -> llama_model_p:
370-
return _lib.llama_load_model_from_file(path_model, params)
370+
result = _lib.llama_load_model_from_file(path_model, params)
371+
if result is None:
372+
raise Exception(f"Failed to load model from {path_model}")
373+
return result
371374

372375

373376
_lib.llama_load_model_from_file.argtypes = [c_char_p, llama_context_params]

0 commit comments

Comments
 (0)