From 671bbee2c86365617757b3628b9e1f898f247527 Mon Sep 17 00:00:00 2001 From: Ed Addario Date: Fri, 18 Apr 2025 11:18:29 +0100 Subject: [PATCH 1/6] Add layer remap logic --- src/llama-quant.cpp | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index 7dc5422763118..3e6e7371c475a 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -21,6 +21,30 @@ static void zeros(std::ofstream & file, size_t n) { } } +static std::string remap_layer(const std::string & orig_name, const std::vector& prune, std::map& mapped, int& next_id) { + static const std::regex pattern(R"(blk\.(\d+)\.)"); + if (std::smatch match; std::regex_search(orig_name, match, pattern)) { + const int blk = std::stoi(match[1]); + std::string new_name = orig_name; + + if (mapped.count(blk)) { + // Already mapped, do nothing + } else if (std::find(prune.begin(), prune.end(), blk) != prune.end()) { + mapped[blk] = "X"; + } else if (blk < prune.front()) { + mapped[blk] = std::to_string(blk); + next_id = blk + 1; + } else { + mapped[blk] = std::to_string(next_id); + ++next_id; + } + + return new_name.replace(match.position(1), match.length(1), mapped[blk]); + } + + return orig_name; +} + struct quantize_state_impl { const llama_model & model; const llama_model_quantize_params * params; From 5a805d136058f06daef124bdff638df0f9e3ca9d Mon Sep 17 00:00:00 2001 From: Ed Addario Date: Sat, 19 Apr 2025 16:04:07 +0100 Subject: [PATCH 2/6] Add tensor pruning logic --- src/llama-quant.cpp | 35 ++++++++++++++++++++++++++++++++--- 1 file changed, 32 insertions(+), 3 deletions(-) diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index 3e6e7371c475a..6f4fe9409d94e 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -1,5 +1,4 @@ #include "llama-quant.h" - #include "llama-impl.h" #include "llama-model.h" #include "llama-model-loader.h" @@ -14,6 +13,9 @@ #include #include +//static std::vector prune_map = {2, 3, 5, 7, 11, 13, 17, 19, 23, 29}; +static std::vector prune_map = {7}; + static void zeros(std::ofstream & file, size_t n) { char zero = 0; for (size_t i = 0; i < n; ++i) { @@ -22,6 +24,10 @@ static void zeros(std::ofstream & file, size_t n) { } static std::string remap_layer(const std::string & orig_name, const std::vector& prune, std::map& mapped, int& next_id) { + if (prune.empty()) { + return orig_name; + } + static const std::regex pattern(R"(blk\.(\d+)\.)"); if (std::smatch match; std::regex_search(orig_name, match, pattern)) { const int blk = std::stoi(match[1]); @@ -39,7 +45,8 @@ static std::string remap_layer(const std::string & orig_name, const std::vector< ++next_id; } - return new_name.replace(match.position(1), match.length(1), mapped[blk]); + std::string name = mapped[blk] == "X" ? mapped[blk] : new_name.replace(match.position(1), match.length(1), mapped[blk]); + return name; } return orig_name; @@ -597,6 +604,10 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: gguf_set_val_u32(ctx_out.get(), "general.quantization_version", GGML_QNT_VERSION); // TODO: use LLM_KV gguf_set_val_u32(ctx_out.get(), "general.file_type", ftype); // TODO: use LLM_KV + // ToDo: Add test for --tensor-prune condition + const auto block_count = gguf_get_val_u32(ctx_out.get(), LLM_KV_BLOCK_COUNT) - prune_map.size(); + gguf_set_val_u32(ctx_out.get(), ml.llm_kv(LLM_KV_BLOCK_COUNT).c_str(), block_count); + // Remove split metadata gguf_remove_key(ctx_out.get(), ml.llm_kv(LLM_KV_SPLIT_NO).c_str()); gguf_remove_key(ctx_out.get(), ml.llm_kv(LLM_KV_SPLIT_COUNT).c_str()); @@ -620,10 +631,28 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: } } + std::map mapped; + int next_blk_id = 0; + int pruned_attention_w = 0; + // make a list of weights std::vector tensors; tensors.reserve(ml.weights_map.size()); for (const auto & it : ml.weights_map) { + // ToDo: Add test for --tensor-prune condition + const std::string remapped_name(remap_layer(it.first, prune_map, mapped, next_blk_id)); + if (remapped_name == "X") { + if (it.first.find("attn_v.weight") != std::string::npos || + it.first.find("attn_qkv.weight") != std::string::npos || + it.first.find("attn_kv_b.weight")!= std::string::npos) { + pruned_attention_w++; + } + LLAMA_LOG_DEBUG("%s: prunning tensor %s\n", __func__, it.first.c_str()); + continue; + } else if (remapped_name != it.first) { + ggml_set_name(it.second.tensor, remapped_name.c_str()); + //LLAMA_LOG_DEBUG("%s: tensor %s remmaped to %s\n", __func__, it.first.c_str(), ggml_get_name(it.second.tensor)); + } tensors.push_back(&it.second); } @@ -663,7 +692,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: if (llama_model_has_encoder(&model)) { n_attn_layer *= 3; } - GGML_ASSERT((qs.n_attention_wv == n_attn_layer) && "n_attention_wv is unexpected"); + GGML_ASSERT((qs.n_attention_wv == n_attn_layer - pruned_attention_w) && "n_attention_wv is unexpected"); } size_t total_size_org = 0; From 63aa3f334301f773f037d387ba8d8c6a91a1b1bb Mon Sep 17 00:00:00 2001 From: Ed Addario Date: Sun, 20 Apr 2025 09:46:58 +0100 Subject: [PATCH 3/6] Add imatrix mapping logic --- src/llama-quant.cpp | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index 6f4fe9409d94e..5a80cc4b535c7 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -14,7 +14,7 @@ #include //static std::vector prune_map = {2, 3, 5, 7, 11, 13, 17, 19, 23, 29}; -static std::vector prune_map = {7}; +static std::vector prune_map = {3}; static void zeros(std::ofstream & file, size_t n) { char zero = 0; @@ -52,6 +52,28 @@ static std::string remap_layer(const std::string & orig_name, const std::vector< return orig_name; } +static std::string remap_imatrix (const std::string & orig_name, const std::map& mapped) { + if (mapped.empty()) { + return orig_name; + } + + static const std::regex pattern(R"(blk\.(\d+)\.)"); + if (std::smatch match; std::regex_search(orig_name, match, pattern)) { + const std::string blk(match[1]); + std::string new_name = orig_name; + + for (const auto & p : mapped) { + if (p.second == blk) { + //LLAMA_LOG_DEBUG("(imatrix -> %d) ", p.first); + return new_name.replace(match.position(1), match.length(1), std::to_string(p.first)); + } + } + GGML_ABORT("\n%s: imatrix mapping error for %s\n", __func__, orig_name.c_str()); + } + + return orig_name; +} + struct quantize_state_impl { const llama_model & model; const llama_model_quantize_params * params; @@ -882,7 +904,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: const float * imatrix = nullptr; if (imatrix_data) { - auto it = imatrix_data->find(tensor->name); + auto it = imatrix_data->find(remap_imatrix(tensor->name, mapped)); if (it == imatrix_data->end()) { LLAMA_LOG_INFO("\n====== %s: did not find weights for %s\n", __func__, tensor->name); } else { From c128b28b1679603f408cef9e4bf3222cd7a37d9a Mon Sep 17 00:00:00 2001 From: Ed Addario Date: Sun, 20 Apr 2025 21:13:11 +0100 Subject: [PATCH 4/6] Add --prune-layers command line option --- examples/quantize/quantize.cpp | 46 ++++++++++++++++++++++++++++++---- include/llama.h | 1 + src/llama-quant.cpp | 23 +++++++++-------- 3 files changed, 55 insertions(+), 15 deletions(-) diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp index 0355311dc5c06..3828164e01da5 100644 --- a/examples/quantize/quantize.cpp +++ b/examples/quantize/quantize.cpp @@ -101,13 +101,11 @@ static bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftyp return false; } -// usage: -// ./llama-quantize [--allow-requantize] [--leave-output-tensor] [--pure] models/llama/ggml-model.gguf [models/llama/ggml-model-quant.gguf] type [nthreads] -// [[noreturn]] static void usage(const char * executable) { - printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights] [--output-tensor-type]\n", executable); - printf(" [--token-embedding-type] [--tensor-type] [--keep-split] [--override-kv] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n"); + printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights]\n", executable); + printf(" [--exclude-weights] [--output-tensor-type] [--token-embedding-type] [--tensor-type] [--prune-layers] [--keep-split] [--override-kv]\n"); + printf(" model-f32.gguf [model-quant.gguf] type [nthreads]\n\n"); printf(" --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n"); printf(" --leave-output-tensor: Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n"); printf(" --pure: Disable k-quant mixtures and quantize all tensors to the same type\n"); @@ -118,6 +116,8 @@ static void usage(const char * executable) { printf(" --token-embedding-type ggml_type: use this ggml_type for the token embeddings tensor\n"); printf(" --tensor-type TENSOR=TYPE: quantize this tensor to this ggml_type. example: --tensor-type attn_q=q8_0\n"); printf(" Advanced option to selectively quantize tensors. May be specified multiple times.\n"); + printf(" --prune-layers L0,L1,L2...comma-separated list of layer numbers to prune from the model\n"); + printf(" Advanced option to remove all tensors from the given layers\n"); printf(" --keep-split: will generate quantized model in the same shards as input\n"); printf(" --override-kv KEY=TYPE:VALUE\n"); printf(" Advanced option to override model metadata by key in the quantized model. May be specified multiple times.\n"); @@ -349,6 +349,34 @@ static bool parse_tensor_type(const char * data, std::vector & prune_layers) { + if (!data) { + printf("\n%s: no layer prunning ids provided\n\n", __func__); + return false; + } + + const auto block_ids = string_split(data, ','); + + for ( const auto & block_id : block_ids) { + + try { + std::stoi(block_id); + } catch (...) { + printf("%s: invalid layer id '%s'\n\n", __func__, block_id.c_str()); + return false; + } + + int id = std::stoi(block_id); + if (id < 0) { + printf("\n%s: invalid layer id '%s'\n\n", __func__, block_id.c_str()); + return false; + } + prune_layers.emplace_back(id); + } + + return true; +} + int main(int argc, char ** argv) { if (argc < 3) { usage(argv[0]); @@ -361,6 +389,7 @@ int main(int argc, char ** argv) { std::vector included_weights, excluded_weights; std::vector kv_overrides; std::vector tensor_types; + std::vector prune_layers; for (; arg_idx < argc && strncmp(argv[arg_idx], "--", 2) == 0; arg_idx++) { if (strcmp(argv[arg_idx], "--leave-output-tensor") == 0) { @@ -387,6 +416,10 @@ int main(int argc, char ** argv) { if (arg_idx == argc-1 || !parse_tensor_type(argv[++arg_idx], tensor_types)) { usage(argv[0]); } + } else if (strcmp(argv[arg_idx], "--prune-layers") == 0) { + if (arg_idx == argc-1 || !parse_layer_prune(argv[++arg_idx], prune_layers)) { + usage(argv[0]); + } } else if (strcmp(argv[arg_idx], "--override-kv") == 0) { if (arg_idx == argc-1 || !string_parse_kv_override(argv[++arg_idx], kv_overrides)) { usage(argv[0]); @@ -474,6 +507,9 @@ int main(int argc, char ** argv) { if (!tensor_types.empty()) { params.tensor_types = &tensor_types; } + if (!prune_layers.empty()) { + params.prune_layers = &prune_layers; + } llama_backend_init(); diff --git a/include/llama.h b/include/llama.h index 5657fbf0a703a..d828a43df551b 100644 --- a/include/llama.h +++ b/include/llama.h @@ -379,6 +379,7 @@ extern "C" { void * imatrix; // pointer to importance matrix data void * kv_overrides; // pointer to vector containing overrides void * tensor_types; // pointer to vector containing tensor types + void * prune_layers; // pointer to vector containing layer indices to prune } llama_model_quantize_params; typedef struct llama_logit_bias { diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index 5a80cc4b535c7..04b2c5fdf26f9 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -13,9 +13,6 @@ #include #include -//static std::vector prune_map = {2, 3, 5, 7, 11, 13, 17, 19, 23, 29}; -static std::vector prune_map = {3}; - static void zeros(std::ofstream & file, size_t n) { char zero = 0; for (size_t i = 0; i < n; ++i) { @@ -64,7 +61,7 @@ static std::string remap_imatrix (const std::string & orig_name, const std::map< for (const auto & p : mapped) { if (p.second == blk) { - //LLAMA_LOG_DEBUG("(imatrix -> %d) ", p.first); + LLAMA_LOG_DEBUG("(blk.%d imatrix) ", p.first); return new_name.replace(match.position(1), match.length(1), std::to_string(p.first)); } } @@ -621,14 +618,20 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: const size_t align = GGUF_DEFAULT_ALIGNMENT; gguf_context_ptr ctx_out { gguf_init_empty() }; + std::vector prune_list = {}; + if (params->prune_layers) { + prune_list = *static_cast *>(params->prune_layers); + } + // copy the KV pairs from the input file gguf_set_kv (ctx_out.get(), ml.meta.get()); gguf_set_val_u32(ctx_out.get(), "general.quantization_version", GGML_QNT_VERSION); // TODO: use LLM_KV gguf_set_val_u32(ctx_out.get(), "general.file_type", ftype); // TODO: use LLM_KV - // ToDo: Add test for --tensor-prune condition - const auto block_count = gguf_get_val_u32(ctx_out.get(), LLM_KV_BLOCK_COUNT) - prune_map.size(); - gguf_set_val_u32(ctx_out.get(), ml.llm_kv(LLM_KV_BLOCK_COUNT).c_str(), block_count); + if (!prune_list.empty()) { + const auto block_count = gguf_get_val_u32(ctx_out.get(), LLM_KV_BLOCK_COUNT) - prune_list.size(); + gguf_set_val_u32(ctx_out.get(), ml.llm_kv(LLM_KV_BLOCK_COUNT).c_str(), block_count); + } // Remove split metadata gguf_remove_key(ctx_out.get(), ml.llm_kv(LLM_KV_SPLIT_NO).c_str()); @@ -661,8 +664,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: std::vector tensors; tensors.reserve(ml.weights_map.size()); for (const auto & it : ml.weights_map) { - // ToDo: Add test for --tensor-prune condition - const std::string remapped_name(remap_layer(it.first, prune_map, mapped, next_blk_id)); + const std::string remapped_name(remap_layer(it.first, prune_list, mapped, next_blk_id)); if (remapped_name == "X") { if (it.first.find("attn_v.weight") != std::string::npos || it.first.find("attn_qkv.weight") != std::string::npos || @@ -673,7 +675,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: continue; } else if (remapped_name != it.first) { ggml_set_name(it.second.tensor, remapped_name.c_str()); - //LLAMA_LOG_DEBUG("%s: tensor %s remmaped to %s\n", __func__, it.first.c_str(), ggml_get_name(it.second.tensor)); + LLAMA_LOG_DEBUG("%s: tensor %s remapped to %s\n", __func__, it.first.c_str(), ggml_get_name(it.second.tensor)); } tensors.push_back(&it.second); } @@ -1019,6 +1021,7 @@ llama_model_quantize_params llama_model_quantize_default_params() { /*.imatrix =*/ nullptr, /*.kv_overrides =*/ nullptr, /*.tensor_type =*/ nullptr, + /*.prune_layers =*/ nullptr }; return result; From 056799f377d4bea0260a14f5485c5d83e223fb49 Mon Sep 17 00:00:00 2001 From: Ed Addario Date: Wed, 23 Apr 2025 09:31:55 +0100 Subject: [PATCH 5/6] Fix LLM_KV_BLOCK_COUNT retrieval --- src/llama-quant.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index 04b2c5fdf26f9..d6988250d102f 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -42,8 +42,7 @@ static std::string remap_layer(const std::string & orig_name, const std::vector< ++next_id; } - std::string name = mapped[blk] == "X" ? mapped[blk] : new_name.replace(match.position(1), match.length(1), mapped[blk]); - return name; + return mapped[blk] == "X" ? mapped[blk] : new_name.replace(match.position(1), match.length(1), mapped[blk]); } return orig_name; @@ -629,8 +628,9 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: gguf_set_val_u32(ctx_out.get(), "general.file_type", ftype); // TODO: use LLM_KV if (!prune_list.empty()) { - const auto block_count = gguf_get_val_u32(ctx_out.get(), LLM_KV_BLOCK_COUNT) - prune_list.size(); - gguf_set_val_u32(ctx_out.get(), ml.llm_kv(LLM_KV_BLOCK_COUNT).c_str(), block_count); + uint32_t block_count = 0; + ml.get_key(LLM_KV_BLOCK_COUNT, block_count); + gguf_set_val_u32(ctx_out.get(), ml.llm_kv(LLM_KV_BLOCK_COUNT).c_str(), block_count - prune_list.size()); } // Remove split metadata From 4ad1f0a6a8ab58cc7621892ca577577f9e3bc9a4 Mon Sep 17 00:00:00 2001 From: Ed Addario Date: Wed, 23 Apr 2025 16:32:42 +0100 Subject: [PATCH 6/6] Add pruned metadata tag to model --- src/llama-quant.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index d6988250d102f..f1f21f340529e 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -628,6 +628,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: gguf_set_val_u32(ctx_out.get(), "general.file_type", ftype); // TODO: use LLM_KV if (!prune_list.empty()) { + gguf_set_val_bool(ctx_out.get(), "general.pruned", true); uint32_t block_count = 0; ml.get_key(LLM_KV_BLOCK_COUNT, block_count); gguf_set_val_u32(ctx_out.get(), ml.llm_kv(LLM_KV_BLOCK_COUNT).c_str(), block_count - prune_list.size());