Skip to content

Commit ed98eea

Browse files
committed
llama : fix names [no ci]
1 parent b0396b5 commit ed98eea

File tree

1 file changed

+12
-12
lines changed

1 file changed

+12
-12
lines changed

src/llama.cpp

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1373,9 +1373,9 @@ struct llm_build_context {
13731373
inp = ggml_graph_node(gf, i);
13741374
if (strcmp(inp->name, "result_norm") == 0 || strcmp(inp->name, "result_embd") == 0) {
13751375
break;
1376-
} else {
1377-
inp = nullptr;
13781376
}
1377+
1378+
inp = nullptr;
13791379
}
13801380
GGML_ASSERT(inp != nullptr && "missing result_norm/result_embd tensor");
13811381

@@ -1431,7 +1431,7 @@ struct llm_build_context {
14311431
return gf;
14321432
}
14331433

1434-
struct ggml_tensor * llm_build_pos_bucket(bool causal) {
1434+
struct ggml_tensor * build_pos_bucket(bool causal) {
14351435
if (causal) {
14361436
lctx.inp_pos_bucket = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, n_kv, n_tokens);
14371437
} else {
@@ -1444,7 +1444,7 @@ struct llm_build_context {
14441444
return lctx.inp_pos_bucket;
14451445
}
14461446

1447-
struct ggml_tensor * llm_build_pos_bias(struct ggml_tensor * pos_bucket, struct ggml_tensor * attn_rel_b) {
1447+
struct ggml_tensor * build_pos_bias(struct ggml_tensor * pos_bucket, struct ggml_tensor * attn_rel_b) {
14481448
struct ggml_tensor * pos_bucket_1d = ggml_view_1d(ctx0, pos_bucket, pos_bucket->ne[0] * pos_bucket->ne[1], 0);
14491449
cb(pos_bucket_1d, "pos_bucket_1d", -1);
14501450

@@ -1463,15 +1463,15 @@ struct llm_build_context {
14631463
return pos_bias;
14641464
}
14651465

1466-
struct ggml_tensor * llm_build_inp_embd_enc() {
1466+
struct ggml_tensor * build_inp_embd_enc() {
14671467
const int64_t n_embd = hparams.n_embd;
14681468
lctx.inp_embd_enc = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, n_outputs_enc);
14691469
ggml_set_input(lctx.inp_embd_enc);
14701470
cb(lctx.inp_embd_enc, "embd_enc", -1);
14711471
return lctx.inp_embd_enc;
14721472
}
14731473

1474-
struct ggml_tensor * llm_build_inp_KQ_mask_cross() {
1474+
struct ggml_tensor * build_inp_KQ_mask_cross() {
14751475
lctx.inp_KQ_mask_cross = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_outputs_enc, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD));
14761476
ggml_set_input(lctx.inp_KQ_mask_cross);
14771477
cb(lctx.inp_KQ_mask_cross, "KQ_mask_cross", -1);
@@ -6775,7 +6775,7 @@ struct llm_build_context {
67756775
inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb);
67766776

67776777
GGML_ASSERT(lctx.is_encoding);
6778-
struct ggml_tensor * pos_bucket_enc = llm_build_pos_bucket(false);
6778+
struct ggml_tensor * pos_bucket_enc = build_pos_bucket(false);
67796779

67806780
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
67816781
struct ggml_tensor * KQ_mask_enc = build_inp_KQ_mask(false);
@@ -6810,7 +6810,7 @@ struct llm_build_context {
68106810
cb(kq, "kq", il);
68116811

68126812
struct ggml_tensor * attn_rel_b = model.layers[il].attn_rel_b_enc ? model.layers[il].attn_rel_b_enc : model.layers[0].attn_rel_b_enc;
6813-
struct ggml_tensor * pos_bias = llm_build_pos_bias(pos_bucket_enc, attn_rel_b);
6813+
struct ggml_tensor * pos_bias = build_pos_bias(pos_bucket_enc, attn_rel_b);
68146814
struct ggml_tensor * kq_b = ggml_add(ctx0, kq, pos_bias);
68156815
cb(kq_b, "kq_b", il);
68166816

@@ -6909,11 +6909,11 @@ struct llm_build_context {
69096909
GGML_ASSERT(!lctx.is_encoding);
69106910
GGML_ASSERT(n_outputs_enc > 0 && "call llama_encode() first");
69116911

6912-
struct ggml_tensor * embd_enc = llm_build_inp_embd_enc();
6913-
struct ggml_tensor * pos_bucket_dec = llm_build_pos_bucket(true);
6912+
struct ggml_tensor * embd_enc = build_inp_embd_enc();
6913+
struct ggml_tensor * pos_bucket_dec = build_pos_bucket(true);
69146914

69156915
struct ggml_tensor * KQ_mask_dec = build_inp_KQ_mask();
6916-
struct ggml_tensor * KQ_mask_cross = llm_build_inp_KQ_mask_cross();
6916+
struct ggml_tensor * KQ_mask_cross = build_inp_KQ_mask_cross();
69176917

69186918
for (int il = 0; il < n_layer; ++il) {
69196919
struct ggml_tensor * inpSA = inpL;
@@ -6961,7 +6961,7 @@ struct llm_build_context {
69616961
cb(kq, "kq", il);
69626962

69636963
struct ggml_tensor * attn_rel_b = model.layers[il].attn_rel_b ? model.layers[il].attn_rel_b : model.layers[0].attn_rel_b;
6964-
struct ggml_tensor * pos_bias = llm_build_pos_bias(pos_bucket_dec, attn_rel_b);
6964+
struct ggml_tensor * pos_bias = build_pos_bias(pos_bucket_dec, attn_rel_b);
69656965
struct ggml_tensor * kq_b = ggml_add(ctx0, kq, pos_bias);
69666966
cb(kq_b, "kq_b", il);
69676967

0 commit comments

Comments
 (0)