Skip to content

Commit 2222931

Browse files
authored
llama : clarify deprecation message (#13794)
1 parent 9012eb9 commit 2222931

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

include/llama.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -612,11 +612,11 @@ extern "C" {
612612
// Returns the number of tokens in the KV cache (slow, use only for debug)
613613
// If a KV cell has multiple sequences assigned to it, it will be counted multiple times
614614
DEPRECATED(LLAMA_API int32_t llama_kv_self_n_tokens(const struct llama_context * ctx),
615-
"Use llama_kv_self_seq_pos_max() instead");
615+
"Use llama_kv_self_seq_pos_max() and llama_kv_self_seq_pos_min() instead (https://github.com/ggml-org/llama.cpp/issues/13793)");
616616

617617
// Returns the number of used KV cells (i.e. have at least one sequence assigned to them)
618618
DEPRECATED(LLAMA_API int32_t llama_kv_self_used_cells(const struct llama_context * ctx),
619-
"Use llama_kv_self_seq_pos_max() instead");
619+
"Use llama_kv_self_seq_pos_max() and llama_kv_self_seq_pos_min() instead (https://github.com/ggml-org/llama.cpp/issues/13793)");
620620

621621
// Clear the KV cache - both cell info is erased and KV data is zeroed
622622
LLAMA_API void llama_kv_self_clear(

0 commit comments

Comments
 (0)