@@ -4451,6 +4451,9 @@ def __init__(self, dir_model: Path, *args, **kwargs):
4451
4451
with open (dir_model / "config.json" , "r" , encoding = "utf-8" ) as f :
4452
4452
hparams = json .load (f )
4453
4453
super ().__init__ (dir_model , * args , hparams = hparams , ** kwargs )
4454
+ self .d_model = self .find_hparam (["hidden_size" , "d_model" , "dim" ])
4455
+ self .d_inner = self .find_hparam (["intermediate_size" , "d_inner" ], optional = True ) or 2 * self .d_model
4456
+ self .n_group = self .hparams .get ("n_groups" , 1 )
4454
4457
4455
4458
def set_vocab (self ):
4456
4459
vocab_size = self .hparams ["vocab_size" ]
@@ -4521,10 +4524,7 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter
4521
4524
# (D is also unsqueezed, but for more straightforward broadcast internally)
4522
4525
data_torch = data_torch .reshape ((* data_torch .shape , 1 ))
4523
4526
elif self .match_model_tensor_name (new_name , gguf .MODEL_TENSOR .SSM_NORM , bid ):
4524
- d_model = self .find_hparam (["hidden_size" , "d_model" , "dim" ])
4525
- d_inner = self .find_hparam (["intermediate_size" , "d_inner" ], optional = True ) or 2 * d_model
4526
- n_group = self .hparams .get ("n_groups" , 1 )
4527
- data_torch = data_torch .reshape ((n_group , d_inner // n_group ))
4527
+ data_torch = data_torch .reshape ((self .n_group , self .d_inner // self .n_group ))
4528
4528
4529
4529
if name .endswith (".A_log" ):
4530
4530
logger .debug ("A_log --> A ==> " + new_name )
@@ -4533,6 +4533,107 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter
4533
4533
yield (new_name , data_torch )
4534
4534
4535
4535
4536
+ @ModelBase .register ("BambaForCausalLM" )
4537
+ class BambaModel (Mamba2Model ):
4538
+ """Bamba is a hybrid SSM + Attention model that uses Mamba2 SSM layers"""
4539
+ model_arch = gguf .MODEL_ARCH .BAMBA
4540
+ undo_permute = True
4541
+
4542
+ def __init__ (self , * args , ** kwargs ):
4543
+
4544
+ # Hybrid mamba models use a prefix for the mamba-specific params.
4545
+ # TODO: Extend this if the prefix(es) need to be configurable
4546
+ self .hparam_prefixes = ["mamba" ]
4547
+
4548
+ super ().__init__ (* args , ** kwargs )
4549
+
4550
+ # Use Llama conversion for attention
4551
+ self ._transformer_model_class : type [TextModel ] = LlamaModel
4552
+
4553
+ # Lists of which layers use ssm vs attention
4554
+ self ._attn_layers = self .hparams .get ("attn_layer_indices" , [])
4555
+ if not self ._attn_layers :
4556
+ attn_period = self .hparams .get ("attn_layer_period" )
4557
+ assert attn_period , "Didn't find attn_layer_indices or attn_layer_period"
4558
+ attn_offset = self .hparams .get ("attn_layer_offset" )
4559
+ assert attn_offset is not None , "No attention layer offset set with attn_layer_period"
4560
+ self ._attn_layers = [
4561
+ i for i in range (self .block_count )
4562
+ if i % attn_period == attn_offset
4563
+ ]
4564
+ self ._ssm_layers = [
4565
+ i for i in range (self .block_count )
4566
+ if i not in self ._attn_layers
4567
+ ]
4568
+
4569
+ # n_group and d_inner are used during reshape_tensors for mamaba2
4570
+ self .d_model = self .find_hparam (["hidden_size" , "d_model" ])
4571
+ self .n_group = self .find_hparam (["n_groups" ])
4572
+ self .d_inner = self .find_hparam (["expand" ]) * self .d_model
4573
+
4574
+ def find_hparam (self , keys : Iterable [str ], * args , ** kwargs ) -> Any :
4575
+ prefixed = []
4576
+ for pfx in self .hparam_prefixes :
4577
+ prefixed .extend (
4578
+ "_" .join ([pfx , k ])
4579
+ for k in keys
4580
+ )
4581
+ keys = list (keys ) + prefixed
4582
+ return super ().find_hparam (keys , * args , ** kwargs )
4583
+
4584
+ def set_gguf_parameters (self ):
4585
+
4586
+ ## General Params ##
4587
+ self .gguf_writer .add_embedding_length (self .d_model )
4588
+ self .gguf_writer .add_block_count (self .block_count )
4589
+ self .gguf_writer .add_context_length (self .hparams .get ("max_position_embeddings" , 0 ))
4590
+ self .gguf_writer .add_vocab_size (self .hparams ["vocab_size" ])
4591
+ self .gguf_writer .add_feed_forward_length (self .hparams ["intermediate_size" ])
4592
+
4593
+ ## Mamba mixer params ##
4594
+ self .gguf_writer .add_ssm_conv_kernel (self .find_hparam (["conv_kernel" , "d_conv" ]))
4595
+ self .gguf_writer .add_ssm_state_size (self .find_hparam (["state_size" , "d_state" ]))
4596
+ self .gguf_writer .add_ssm_group_count (self .n_group )
4597
+ self .gguf_writer .add_ssm_inner_size (self .d_inner )
4598
+ # NOTE: The mamba_dt_rank is _not_ the right field for how this is used
4599
+ # in llama.cpp
4600
+ self .gguf_writer .add_ssm_time_step_rank (self .find_hparam (["n_heads" ]))
4601
+
4602
+ ## Attention params ##
4603
+ self .gguf_writer .add_attn_layer_indices (self ._attn_layers )
4604
+ self .gguf_writer .add_rope_dimension_count (self .hparams ["attn_rotary_emb" ])
4605
+ self .gguf_writer .add_head_count (self .hparams ["num_attention_heads" ])
4606
+ self .gguf_writer .add_head_count_kv (self .find_hparam (["num_key_value_heads" , "n_head_kv" ]))
4607
+
4608
+ ## Feed Forward Params ##
4609
+ self .gguf_writer .add_layer_norm_rms_eps (
4610
+ self .find_hparam (["layer_norm_epsilon" , "rms_norm_eps" ], optional = True ) or 1e-5
4611
+ )
4612
+
4613
+ ## Validation ##
4614
+ d_head = self .find_hparam (["d_head" ], optional = True ) or 64
4615
+ assert self .hparams .get ("hidden_act" ) in [None , "silu" ], "Only SILU activation supported"
4616
+ assert self .d_inner % d_head == 0 , f"SSM inner size { self .d_inner } not a multiple of head dim { d_head } "
4617
+
4618
+ def modify_tensors (
4619
+ self , data_torch : Tensor , name : str , bid : int | None
4620
+ ) -> Iterable [tuple [str , Tensor ]]:
4621
+
4622
+ # Determine whether this is a mamaba layer or an attention layer
4623
+ if bid in self ._ssm_layers :
4624
+ for mamba_new_name , data_torch in super ().modify_tensors (
4625
+ data_torch , name , bid
4626
+ ):
4627
+ yield mamba_new_name , data_torch
4628
+ elif bid in self ._attn_layers :
4629
+ for llama_new_name , data_torch in self ._transformer_model_class .modify_tensors (
4630
+ self , data_torch , name , bid
4631
+ ):
4632
+ yield llama_new_name , data_torch
4633
+ else :
4634
+ yield self .map_tensor_name (name ), data_torch
4635
+
4636
+
4536
4637
@ModelBase .register ("CohereForCausalLM" )
4537
4638
class CommandR2Model (TextModel ):
4538
4639
model_arch = gguf .MODEL_ARCH .COMMAND_R
0 commit comments