File tree Expand file tree Collapse file tree 1 file changed +2
-2
lines changed Expand file tree Collapse file tree 1 file changed +2
-2
lines changed Original file line number Diff line number Diff line change @@ -612,11 +612,11 @@ extern "C" {
612
612
// Returns the number of tokens in the KV cache (slow, use only for debug)
613
613
// If a KV cell has multiple sequences assigned to it, it will be counted multiple times
614
614
DEPRECATED (LLAMA_API int32_t llama_kv_self_n_tokens (const struct llama_context * ctx),
615
- "Use llama_kv_self_seq_pos_max() instead");
615
+ "Use llama_kv_self_seq_pos_max() and llama_kv_self_seq_pos_min() instead (https: // github.com/ggml-org/llama.cpp/issues/13793) ");
616
616
617
617
// Returns the number of used KV cells (i.e. have at least one sequence assigned to them)
618
618
DEPRECATED (LLAMA_API int32_t llama_kv_self_used_cells (const struct llama_context * ctx),
619
- "Use llama_kv_self_seq_pos_max() instead");
619
+ "Use llama_kv_self_seq_pos_max() and llama_kv_self_seq_pos_min() instead (https: // github.com/ggml-org/llama.cpp/issues/13793) ");
620
620
621
621
// Clear the KV cache - both cell info is erased and KV data is zeroed
622
622
LLAMA_API void llama_kv_self_clear (
You can’t perform that action at this time.
0 commit comments