We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent d9b33fe commit f28af0dCopy full SHA for f28af0d
gguf-py/gguf/gguf.py
@@ -393,6 +393,7 @@ class TensorNameMap:
393
"layers.{bid}.attention_norm", # llama-pth
394
"encoder.layer.{bid}.attention.output.LayerNorm", # bert
395
"language_model.encoder.layers.{bid}.input_layernorm", # persimmon
396
+ "model.layers.{bid}.ln1", # yi
397
),
398
399
# Attention norm 2
@@ -464,6 +465,7 @@ class TensorNameMap:
464
465
"layers.{bid}.ffn_norm", # llama-pth
466
"encoder.layer.{bid}.output.LayerNorm", # bert
467
"language_model.encoder.layers.{bid}.post_attention_layernorm", # persimmon
468
+ "model.layers.{bid}.ln2", # yi
469
470
471
# Feed-forward up
0 commit comments