Skip to content

Commit 051372c

Browse files
committed
kv-cache : use (n_swa + n_ubatch) cells for SWA (tmp) [no ci]
1 parent 04d8fb4 commit 051372c

File tree

3 files changed

+4
-4
lines changed

3 files changed

+4
-4
lines changed

src/llama-kv-cache.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1680,14 +1680,14 @@ llama_kv_cache_unified_iswa::llama_kv_cache_unified_iswa(
16801680
bool swa_full,
16811681
uint32_t kv_size,
16821682
uint32_t n_seq_max,
1683-
uint32_t n_batch,
1683+
uint32_t n_ubatch,
16841684
uint32_t n_pad) : hparams(model.hparams) {
16851685
llama_kv_cache_unified::layer_filter_cb filter_base = [&](int32_t il) { return !model.hparams.is_swa(il); };
16861686
llama_kv_cache_unified::layer_filter_cb filter_swa = [&](int32_t il) { return model.hparams.is_swa(il); };
16871687

16881688
const uint32_t size_base = kv_size;
16891689

1690-
uint32_t size_swa = std::min(size_base, GGML_PAD(hparams.n_swa*n_seq_max + n_batch, n_pad));
1690+
uint32_t size_swa = std::min(size_base, GGML_PAD(hparams.n_swa*n_seq_max + n_ubatch, n_pad));
16911691

16921692
// when using full-size SWA cache, we set the SWA cache size to be equal to the base cache size
16931693
if (swa_full) {

src/llama-kv-cache.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -260,7 +260,7 @@ class llama_kv_cache_unified_iswa : public llama_kv_cache {
260260
bool swa_full,
261261
uint32_t kv_size,
262262
uint32_t n_seq_max,
263-
uint32_t n_batch,
263+
uint32_t n_ubatch,
264264
uint32_t n_pad);
265265

266266
~llama_kv_cache_unified_iswa() = default;

src/llama-model.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13230,7 +13230,7 @@ llama_memory_i * llama_model::create_memory(const llama_memory_params & params,
1323013230
params.swa_full,
1323113231
cparams.n_ctx,
1323213232
cparams.n_seq_max,
13233-
cparams.n_batch,
13233+
cparams.n_ubatch,
1323413234
padding);
1323513235
} else {
1323613236
GGML_ASSERT(!hparams.is_swa_any());

0 commit comments

Comments
 (0)