Skip to content

Commit d7652ca

Browse files
committed
added extra newline in convert_hf_to_gguf.py. Replaced a tab with spaces in src/llama.cpp
1 parent f77260c commit d7652ca

File tree

2 files changed

+3
-1
lines changed

2 files changed

+3
-1
lines changed

convert_hf_to_gguf.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1671,6 +1671,7 @@ def prepare_tensors(self):
16711671
if len(experts) > 0:
16721672
raise ValueError(f"Unprocessed experts: {experts}")
16731673

1674+
16741675
@Model.register("DeciLMForCausalLM")
16751676
class DeciModel(Model):
16761677
model_arch = gguf.MODEL_ARCH.DECI
@@ -1848,6 +1849,7 @@ def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
18481849
def prepare_tensors(self):
18491850
super().prepare_tensors()
18501851

1852+
18511853
@Model.register("BitnetForCausalLM")
18521854
class BitnetModel(Model):
18531855
model_arch = gguf.MODEL_ARCH.BITNET

src/llama.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11195,7 +11195,7 @@ struct llm_build_context {
1119511195

1119611196
if (n_head == 0) { // attention-free layer of Llama-3_1-Nemotron-51B
1119711197
cur = inpL;
11198-
} else {
11198+
} else {
1119911199
// norm
1120011200
cur = llm_build_norm(ctx0, inpL, hparams,
1120111201
model.layers[il].attn_norm, NULL,

0 commit comments

Comments
 (0)