Skip to content

Commit 8323e23

Browse files
committed
kv-cache : use (n_swa + n_ubatch) cells for SWA (tmp) [no ci]
1 parent 122769d commit 8323e23

File tree

4 files changed

+8
-12
lines changed

4 files changed

+8
-12
lines changed

src/llama-kv-cache.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1685,14 +1685,14 @@ llama_kv_cache_unified_iswa::llama_kv_cache_unified_iswa(
16851685
bool swa_full,
16861686
uint32_t kv_size,
16871687
uint32_t n_seq_max,
1688-
uint32_t n_batch,
1688+
uint32_t n_ubatch,
16891689
uint32_t n_pad) : hparams(model.hparams) {
16901690
llama_kv_cache_unified::layer_filter_cb filter_base = [&](int32_t il) { return !model.hparams.is_swa(il); };
16911691
llama_kv_cache_unified::layer_filter_cb filter_swa = [&](int32_t il) { return model.hparams.is_swa(il); };
16921692

16931693
const uint32_t size_base = kv_size;
16941694

1695-
uint32_t size_swa = std::min(size_base, GGML_PAD(hparams.n_swa*n_seq_max + n_batch, n_pad));
1695+
uint32_t size_swa = std::min(size_base, GGML_PAD(hparams.n_swa*n_seq_max + n_ubatch, n_pad));
16961696

16971697
// when using full-size SWA cache, we set the SWA cache size to be equal to the base cache size
16981698
if (swa_full) {

src/llama-kv-cache.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,7 @@ class llama_kv_cache_unified_iswa : public llama_kv_cache {
244244
bool swa_full,
245245
uint32_t kv_size,
246246
uint32_t n_seq_max,
247-
uint32_t n_batch,
247+
uint32_t n_ubatch,
248248
uint32_t n_pad);
249249

250250
~llama_kv_cache_unified_iswa() = default;

src/llama-model.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13230,7 +13230,7 @@ llama_memory_i * llama_model::create_memory(const llama_memory_params & params,
1323013230
params.swa_full,
1323113231
cparams.n_ctx,
1323213232
cparams.n_seq_max,
13233-
cparams.n_batch,
13233+
cparams.n_ubatch,
1323413234
padding);
1323513235
} else {
1323613236
GGML_ASSERT(!hparams.is_swa_any());

tools/server/server.cpp

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2023,11 +2023,6 @@ struct server_context {
20232023
params_base.n_cache_reuse = 0;
20242024
SRV_WRN("%s\n", "cache_reuse is not supported by this context, it will be disabled");
20252025
}
2026-
2027-
if (!params_base.speculative.model.path.empty()) {
2028-
SRV_ERR("%s\n", "err: speculative decode is not supported by this context");
2029-
return false;
2030-
}
20312026
}
20322027

20332028
return true;
@@ -3216,9 +3211,10 @@ struct server_context {
32163211
slot.cache_tokens.clear(); // TODO: not needed, will be cleared later via "keep_first()"
32173212
}
32183213

3219-
if (slot.n_past > 0 && slot.n_past < (int) slot.cache_tokens.size()) {
3220-
if (llama_kv_self_seq_pos_min(ctx, slot.id) > 0) {
3221-
SLT_WRN(slot, "n_past = %d, cache_tokens.size() = %d\n", slot.n_past, (int) slot.cache_tokens.size());
3214+
if (slot.n_past > 0 && slot.n_past + 32 < (int) slot.cache_tokens.size()) {
3215+
const auto pos_min = llama_kv_self_seq_pos_min(ctx, slot.id);
3216+
if (pos_min > 0) {
3217+
SLT_WRN(slot, "n_past = %d, cache_tokens.size() = %d, seq_id = %d, pos_min = %d\n", slot.n_past, (int) slot.cache_tokens.size(), slot.id, pos_min);
32223218
SLT_WRN(slot, "forcing full prompt re-processing due to lack of cache data (likely due to SWA, see %s)\n",
32233219
"https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055");
32243220
slot.n_past = 0;

0 commit comments

Comments
 (0)