Skip to content

Commit 6d67be6

Browse files
committed
Improve error message when model file is missing
Original Pull Request: abetlen/llama-cpp-python#2041
1 parent fa3985f commit 6d67be6

1 file changed

Lines changed: 16 additions & 7 deletions

File tree

llama_cpp/llama.py

Lines changed: 16 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -383,15 +383,24 @@ def __init__(
383383
if not os.path.exists(model_path):
384384
raise ValueError(f"Model path does not exist: {model_path}")
385385

386-
self._model = self._stack.enter_context(
387-
contextlib.closing(
388-
internals.LlamaModel(
389-
path_model=self.model_path,
390-
params=self.model_params,
391-
verbose=self.verbose,
386+
try:
387+
self._model = self._stack.enter_context(
388+
contextlib.closing(
389+
internals.LlamaModel(
390+
path_model=self.model_path,
391+
params=self.model_params,
392+
verbose=self.verbose,
393+
)
392394
)
393395
)
394-
)
396+
except RuntimeError as e:
397+
if "No such file or directory" in str(e):
398+
raise FileNotFoundError(
399+
f"Model file not found at '{self.model_path}'. "
400+
"Make sure the .gguf model file exists at the given path."
401+
) from e
402+
else:
403+
raise
395404

396405
# Override tokenizer
397406
self.tokenizer_ = tokenizer or LlamaTokenizer(self)

0 commit comments

Comments
 (0)