Skip to content

Commit 4819f9b

Browse files
committed
kv-cache : revert the (n_swa + n_ubatch) change (for next PR)
ggml-ci
1 parent d61cf45 commit 4819f9b

File tree

4 files changed

+12
-6
lines changed

4 files changed

+12
-6
lines changed

src/llama-kv-cache.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1743,14 +1743,14 @@ llama_kv_cache_unified_iswa::llama_kv_cache_unified_iswa(
17431743
bool swa_full,
17441744
uint32_t kv_size,
17451745
uint32_t n_seq_max,
1746-
uint32_t n_ubatch,
1746+
uint32_t n_batch,
17471747
uint32_t n_pad) : hparams(model.hparams) {
17481748
llama_kv_cache_unified::layer_filter_cb filter_base = [&](int32_t il) { return !model.hparams.is_swa(il); };
17491749
llama_kv_cache_unified::layer_filter_cb filter_swa = [&](int32_t il) { return model.hparams.is_swa(il); };
17501750

17511751
const uint32_t size_base = kv_size;
17521752

1753-
uint32_t size_swa = std::min(size_base, GGML_PAD(hparams.n_swa*n_seq_max + n_ubatch, n_pad));
1753+
uint32_t size_swa = std::min(size_base, GGML_PAD(hparams.n_swa*n_seq_max + n_batch, n_pad));
17541754

17551755
// when using full-size SWA cache, we set the SWA cache size to be equal to the base cache size
17561756
if (swa_full) {

src/llama-kv-cache.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -256,7 +256,7 @@ class llama_kv_cache_unified_iswa : public llama_kv_cache {
256256
bool swa_full,
257257
uint32_t kv_size,
258258
uint32_t n_seq_max,
259-
uint32_t n_ubatch,
259+
uint32_t n_batch,
260260
uint32_t n_pad);
261261

262262
~llama_kv_cache_unified_iswa() = default;

src/llama-model.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13234,7 +13234,7 @@ llama_memory_i * llama_model::create_memory(const llama_memory_params & params,
1323413234
params.swa_full,
1323513235
cparams.n_ctx,
1323613236
cparams.n_seq_max,
13237-
cparams.n_ubatch,
13237+
cparams.n_batch,
1323813238
padding);
1323913239
} else {
1324013240
GGML_ASSERT(!hparams.is_swa_any());

tools/server/server.cpp

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2017,6 +2017,11 @@ struct server_context {
20172017
params_base.n_cache_reuse = 0;
20182018
SRV_WRN("%s\n", "cache_reuse is not supported by this context, it will be disabled");
20192019
}
2020+
2021+
if (!params_base.speculative.model.path.empty()) {
2022+
SRV_ERR("%s\n", "err: speculative decode is not supported by this context");
2023+
return false;
2024+
}
20202025
}
20212026

20222027
return true;
@@ -3209,7 +3214,7 @@ struct server_context {
32093214
slot.cache_tokens.clear(); // TODO: not needed, will be cleared later via "keep_first()"
32103215
}
32113216

3212-
if (slot.n_past > 0 && slot.n_past + 32 < (int) slot.cache_tokens.size()) {
3217+
if (slot.n_past > 0 && slot.n_past < (int) slot.cache_tokens.size()) {
32133218
const auto pos_min = llama_kv_self_seq_pos_min(ctx, slot.id);
32143219
if (pos_min > 0) {
32153220
SLT_WRN(slot, "n_past = %d, cache_tokens.size() = %d, seq_id = %d, pos_min = %d\n", slot.n_past, (int) slot.cache_tokens.size(), slot.id, pos_min);
@@ -3423,10 +3428,11 @@ struct server_context {
34233428

34243429
// retry with half the batch size to try to find a free slot in the KV cache
34253430
n_batch /= 2;
3426-
i -= n_batch;
34273431

34283432
SRV_WRN("failed to find free space in the KV cache, retrying with smaller batch size - try increasing it via the context size or enable defragmentation, i = %d, n_batch = %d, ret = %d\n", i, n_batch, ret);
34293433

3434+
i -= n_batch;
3435+
34303436
continue; // continue loop of n_batch
34313437
}
34323438

0 commit comments

Comments
 (0)