Skip to content

Commit cc4a954

Browse files
ggerganovcompilade
andauthored
llama : fix attention layer count sanity check (#6550)
* llama : fix attention layer count sanity check * llama : fix parentheses in attention layer count sanity check There was otherwise a warning when compiling. --------- Co-authored-by: Francis Couture-Harpin <git@compilade.net>
1 parent cecd8d3 commit cc4a954

File tree

1 file changed

+7
-2
lines changed

1 file changed

+7
-2
lines changed

llama.cpp

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13468,7 +13468,8 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
1346813468
const std::string name = ggml_get_name(meta);
1346913469

1347013470
// TODO: avoid hardcoded tensor names - use the TN_* constants
13471-
if (name.find("attn_v.weight") != std::string::npos || name.find("attn_qkv.weight") != std::string::npos) {
13471+
if (name.find("attn_v.weight") != std::string::npos ||
13472+
name.find("attn_qkv.weight") != std::string::npos) {
1347213473
++qs.n_attention_wv;
1347313474
} else if (name == LLM_TN(model.arch)(LLM_TENSOR_OUTPUT, "weight")) {
1347413475
qs.has_output = true;
@@ -13478,7 +13479,11 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
1347813479
qs.n_ffn_down = qs.n_ffn_gate = qs.n_ffn_up = (int)model.hparams.n_layer;
1347913480

1348013481
// sanity checks
13481-
GGML_ASSERT(qs.n_attention_wv == (int)model.hparams.n_layer && "n_attention_wv != n_layer is unexpected");
13482+
//
13483+
// - qs.n_attention_wv == 0 for Mamba models
13484+
// - qs.n_attention_wv == model.hparams.n_layer for Transformer models
13485+
//
13486+
GGML_ASSERT((qs.n_attention_wv == 0 || qs.n_attention_wv == (int)model.hparams.n_layer) && "n_attention_wv is unexpected");
1348213487

1348313488
size_t total_size_org = 0;
1348413489
size_t total_size_new = 0;

0 commit comments

Comments
 (0)