Skip to content

Commit ddd5466

Browse files
ngxsonarthw
authored andcommitted
llama : remove all_pos_0, all_pos_1, all_seq_id from llama_batch (ggml-org#9745)
* refactor llama_batch_get_one * adapt all examples * fix simple.cpp * fix llama_bench * fix * fix context shifting * free batch before return * use common_batch_add, reuse llama_batch in loop * null terminated seq_id list * fix save-load-state example * fix perplexity * correct token pos in llama_batch_allocr
1 parent 4bd42f7 commit ddd5466

File tree

22 files changed

+205
-118
lines changed

22 files changed

+205
-118
lines changed

common/common.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -955,7 +955,7 @@ struct common_init_result common_init_from_params(common_params & params) {
955955
}
956956

957957
if (llama_model_has_encoder(model)) {
958-
llama_encode(lctx, llama_batch_get_one(tmp.data(), tmp.size(), 0, 0));
958+
llama_encode(lctx, llama_batch_get_one(tmp.data(), tmp.size()));
959959
llama_token decoder_start_token_id = llama_model_decoder_start_token(model);
960960
if (decoder_start_token_id == -1) {
961961
decoder_start_token_id = bos;
@@ -964,7 +964,7 @@ struct common_init_result common_init_from_params(common_params & params) {
964964
tmp.push_back(decoder_start_token_id);
965965
}
966966
if (llama_model_has_decoder(model)) {
967-
llama_decode(lctx, llama_batch_get_one(tmp.data(), std::min(tmp.size(), (size_t) params.n_batch), 0, 0));
967+
llama_decode(lctx, llama_batch_get_one(tmp.data(), std::min(tmp.size(), (size_t) params.n_batch)));
968968
}
969969
llama_kv_cache_clear(lctx);
970970
llama_synchronize(lctx);

examples/batched-bench/batched-bench.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,6 @@ int main(int argc, char ** argv) {
7474
batch.n_seq_id + i,
7575
batch.seq_id + i,
7676
batch.logits + i,
77-
0, 0, 0, // unused
7877
};
7978

8079
const int ret = llama_decode(ctx, batch_view);

examples/cvector-generator/cvector-generator.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -339,7 +339,7 @@ static bool cb_eval(struct ggml_tensor * t, bool ask, void * user_data) {
339339

340340
static bool get_hidden_layers(llama_context * ctx, std::vector<llama_token> & tokens) {
341341
llama_kv_cache_clear(ctx);
342-
if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size(), 0, 0))) {
342+
if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size()))) {
343343
fprintf(stderr, "%s : failed to eval\n", __func__);
344344
return false;
345345
}

examples/eval-callback/eval-callback.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -131,7 +131,7 @@ static bool run(llama_context * ctx, const common_params & params) {
131131

132132
std::vector<llama_token> tokens = common_tokenize(ctx, params.prompt, add_bos);
133133

134-
if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size(), 0, 0))) {
134+
if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size()))) {
135135
LOG_ERR("%s : failed to eval\n", __func__);
136136
return false;
137137
}

examples/imatrix/imatrix.cpp

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -496,6 +496,8 @@ static bool compute_imatrix(llama_context * ctx, const common_params & params) {
496496
// clear the KV cache
497497
llama_kv_cache_clear(ctx);
498498

499+
llama_batch batch = llama_batch_init(n_batch, 0, 1);
500+
499501
for (int j = 0; j < num_batches; ++j) {
500502
const int batch_start = start + j * n_batch;
501503
const int batch_size = std::min(end - batch_start, n_batch);
@@ -508,9 +510,14 @@ static bool compute_imatrix(llama_context * ctx, const common_params & params) {
508510
tokens[batch_start] = llama_token_bos(llama_get_model(ctx));
509511
}
510512

511-
// TODO: use batch.logits to save computations instead of relying on logits_all == true
512-
if (llama_decode(ctx, llama_batch_get_one(tokens.data() + batch_start, batch_size, j * n_batch, 0))) {
513+
common_batch_clear(batch);
514+
for (int i = 0; i < batch_size; i++) {
515+
common_batch_add(batch, tokens[batch_start + i], j*n_batch + i, {0}, true);
516+
}
517+
518+
if (llama_decode(ctx, batch)) {
513519
LOG_ERR("%s : failed to eval\n", __func__);
520+
llama_batch_free(batch);
514521
return false;
515522
}
516523

@@ -523,6 +530,8 @@ static bool compute_imatrix(llama_context * ctx, const common_params & params) {
523530
}
524531
}
525532

533+
llama_batch_free(batch);
534+
526535
const auto t_end = std::chrono::high_resolution_clock::now();
527536

528537
if (i == 0) {

examples/infill/infill.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -396,7 +396,7 @@ int main(int argc, char ** argv) {
396396

397397
LOG_DBG("eval: %s\n", string_from(ctx, embd).c_str());
398398

399-
if (llama_decode(ctx, llama_batch_get_one(&embd[i], n_eval, n_past, 0))) {
399+
if (llama_decode(ctx, llama_batch_get_one(&embd[i], n_eval))) {
400400
LOG_ERR("%s : failed to eval\n", __func__);
401401
return 1;
402402
}

examples/llama-bench/llama-bench.cpp

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1428,7 +1428,7 @@ struct sql_printer : public printer {
14281428
}
14291429
};
14301430

1431-
static void test_prompt(llama_context * ctx, int n_prompt, int n_past, int n_batch, int n_threads) {
1431+
static void test_prompt(llama_context * ctx, int n_prompt, int n_batch, int n_threads) {
14321432
llama_set_n_threads(ctx, n_threads, n_threads);
14331433

14341434
const llama_model * model = llama_get_model(ctx);
@@ -1444,14 +1444,14 @@ static void test_prompt(llama_context * ctx, int n_prompt, int n_past, int n_bat
14441444
for (int i = 1; i < n_tokens; i++) {
14451445
tokens[i] = std::rand() % n_vocab;
14461446
}
1447-
llama_decode(ctx, llama_batch_get_one(tokens.data(), n_tokens, n_past + n_processed, 0));
1447+
llama_decode(ctx, llama_batch_get_one(tokens.data(), n_tokens));
14481448
n_processed += n_tokens;
14491449
}
14501450

14511451
llama_synchronize(ctx);
14521452
}
14531453

1454-
static void test_gen(llama_context * ctx, int n_gen, int n_past, int n_threads) {
1454+
static void test_gen(llama_context * ctx, int n_gen, int n_threads) {
14551455
llama_set_n_threads(ctx, n_threads, n_threads);
14561456

14571457
const llama_model * model = llama_get_model(ctx);
@@ -1460,7 +1460,7 @@ static void test_gen(llama_context * ctx, int n_gen, int n_past, int n_threads)
14601460
llama_token token = llama_add_bos_token(model) ? llama_token_bos(model) : std::rand() % n_vocab;
14611461

14621462
for (int i = 0; i < n_gen; i++) {
1463-
llama_decode(ctx, llama_batch_get_one(&token, 1, n_past + i, 0));
1463+
llama_decode(ctx, llama_batch_get_one(&token, 1));
14641464
llama_synchronize(ctx);
14651465
token = std::rand() % n_vocab;
14661466
}
@@ -1596,13 +1596,13 @@ int main(int argc, char ** argv) {
15961596
fprintf(stderr, "llama-bench: benchmark %d/%ld: warmup prompt run\n", params_idx, params_count);
15971597
}
15981598
//test_prompt(ctx, std::min(t.n_batch, std::min(t.n_prompt, 32)), 0, t.n_batch, t.n_threads);
1599-
test_prompt(ctx, t.n_prompt, 0, t.n_batch, t.n_threads);
1599+
test_prompt(ctx, t.n_prompt, t.n_batch, t.n_threads);
16001600
}
16011601
if (t.n_gen > 0) {
16021602
if (params.progress) {
16031603
fprintf(stderr, "llama-bench: benchmark %d/%ld: warmup generation run\n", params_idx, params_count);
16041604
}
1605-
test_gen(ctx, 1, 0, t.n_threads);
1605+
test_gen(ctx, 1, t.n_threads);
16061606
}
16071607

16081608
for (int i = 0; i < params.reps; i++) {
@@ -1614,13 +1614,13 @@ int main(int argc, char ** argv) {
16141614
if (params.progress) {
16151615
fprintf(stderr, "llama-bench: benchmark %d/%ld: prompt run %d/%d\n", params_idx, params_count, i + 1, params.reps);
16161616
}
1617-
test_prompt(ctx, t.n_prompt, 0, t.n_batch, t.n_threads);
1617+
test_prompt(ctx, t.n_prompt, t.n_batch, t.n_threads);
16181618
}
16191619
if (t.n_gen > 0) {
16201620
if (params.progress) {
16211621
fprintf(stderr, "llama-bench: benchmark %d/%ld: generation run %d/%d\n", params_idx, params_count, i + 1, params.reps);
16221622
}
1623-
test_gen(ctx, t.n_gen, t.n_prompt, t.n_threads);
1623+
test_gen(ctx, t.n_gen, t.n_threads);
16241624
}
16251625

16261626
uint64_t t_ns = get_time_ns() - t_start;

examples/llama.android/llama/src/main/cpp/llama-android.cpp

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -283,9 +283,6 @@ Java_android_llama_cpp_LLamaAndroid_new_1batch(JNIEnv *, jobject, jint n_tokens,
283283
nullptr,
284284
nullptr,
285285
nullptr,
286-
0,
287-
0,
288-
0,
289286
};
290287

291288
if (embd) {

examples/llava/llava-cli.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ static bool eval_tokens(struct llama_context * ctx_llama, std::vector<llama_toke
2020
if (n_eval > n_batch) {
2121
n_eval = n_batch;
2222
}
23-
if (llama_decode(ctx_llama, llama_batch_get_one(&tokens[i], n_eval, *n_past, 0))) {
23+
if (llama_decode(ctx_llama, llama_batch_get_one(&tokens[i], n_eval))) {
2424
LOG_ERR("%s : failed to eval. token %d/%d (batch size %d, n_past %d)\n", __func__, i, N, n_batch, *n_past);
2525
return false;
2626
}

examples/llava/llava.cpp

Lines changed: 36 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -401,6 +401,39 @@ bool llava_image_embed_make_with_clip_img(clip_ctx * ctx_clip, int n_threads, co
401401
return true;
402402
}
403403

404+
struct llava_embd_batch {
405+
std::vector<llama_pos> pos;
406+
std::vector<int32_t> n_seq_id;
407+
std::vector<llama_seq_id> seq_id_0;
408+
std::vector<llama_seq_id *> seq_ids;
409+
std::vector<int8_t> logits;
410+
llama_batch batch;
411+
llava_embd_batch(float * embd, int32_t n_tokens, llama_pos pos_0, llama_seq_id seq_id) {
412+
pos .resize(n_tokens);
413+
n_seq_id.resize(n_tokens);
414+
seq_ids .resize(n_tokens + 1);
415+
logits .resize(n_tokens);
416+
seq_id_0.resize(1);
417+
seq_id_0[0] = seq_id;
418+
seq_ids [n_tokens] = nullptr;
419+
batch = {
420+
/*n_tokens =*/ n_tokens,
421+
/*tokens =*/ nullptr,
422+
/*embd =*/ embd,
423+
/*pos =*/ pos.data(),
424+
/*n_seq_id =*/ n_seq_id.data(),
425+
/*seq_id =*/ seq_ids.data(),
426+
/*logits =*/ logits.data(),
427+
};
428+
for (int i = 0; i < n_tokens; i++) {
429+
batch.pos [i] = pos_0 + i;
430+
batch.n_seq_id[i] = 1;
431+
batch.seq_id [i] = seq_id_0.data();
432+
batch.logits [i] = false;
433+
}
434+
}
435+
};
436+
404437
bool llava_eval_image_embed(llama_context * ctx_llama, const struct llava_image_embed * image_embed, int n_batch, int * n_past) {
405438
int n_embd = llama_n_embd(llama_get_model(ctx_llama));
406439

@@ -409,8 +442,9 @@ bool llava_eval_image_embed(llama_context * ctx_llama, const struct llava_image_
409442
if (n_eval > n_batch) {
410443
n_eval = n_batch;
411444
}
412-
llama_batch batch = {int32_t(n_eval), nullptr, (image_embed->embed+i*n_embd), nullptr, nullptr, nullptr, nullptr, *n_past, 1, 0, };
413-
if (llama_decode(ctx_llama, batch)) {
445+
float * embd = image_embed->embed+i*n_embd;
446+
llava_embd_batch llava_batch = llava_embd_batch(embd, n_eval, *n_past, 0);
447+
if (llama_decode(ctx_llama, llava_batch.batch)) {
414448
LOG_ERR("%s : failed to eval\n", __func__);
415449
return false;
416450
}

examples/llava/minicpmv-cli.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ static bool eval_tokens(struct llama_context * ctx_llama, std::vector<llama_toke
9797
if (n_eval > n_batch) {
9898
n_eval = n_batch;
9999
}
100-
if (llama_decode(ctx_llama, llama_batch_get_one(&tokens[i], n_eval, *n_past, 0))) {
100+
if (llama_decode(ctx_llama, llama_batch_get_one(&tokens[i], n_eval))) {
101101
LOG_ERR("%s : failed to eval. token %d/%d (batch size %d, n_past %d)\n", __func__, i, N, n_batch, *n_past);
102102
return false;
103103
}

examples/lookahead/lookahead.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -89,8 +89,8 @@ int main(int argc, char ** argv) {
8989
const auto t_enc_start = ggml_time_us();
9090

9191
// eval the prompt
92-
llama_decode(ctx, llama_batch_get_one( inp.data(), n_input - 1, 0, 0));
93-
llama_decode(ctx, llama_batch_get_one(&inp.back(), 1, n_input - 1, 0));
92+
llama_decode(ctx, llama_batch_get_one( inp.data(), n_input - 1));
93+
llama_decode(ctx, llama_batch_get_one(&inp.back(), 1));
9494

9595
for (int s = 1; s < W + G + 1; ++s) {
9696
llama_kv_cache_seq_cp(ctx, 0, s, -1, -1);

examples/lookup/lookup.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -89,8 +89,8 @@ int main(int argc, char ** argv){
8989

9090
const auto t_enc_start = ggml_time_us();
9191

92-
llama_decode(ctx, llama_batch_get_one( inp.data(), n_input - 1, 0, 0));
93-
llama_decode(ctx, llama_batch_get_one(&inp.back(), 1, n_input - 1, 0));
92+
llama_decode(ctx, llama_batch_get_one( inp.data(), n_input - 1));
93+
llama_decode(ctx, llama_batch_get_one(&inp.back(), 1));
9494

9595
const auto t_enc_end = ggml_time_us();
9696

examples/main/main.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -528,7 +528,7 @@ int main(int argc, char ** argv) {
528528
int enc_input_size = embd_inp.size();
529529
llama_token * enc_input_buf = embd_inp.data();
530530

531-
if (llama_encode(ctx, llama_batch_get_one(enc_input_buf, enc_input_size, 0, 0))) {
531+
if (llama_encode(ctx, llama_batch_get_one(enc_input_buf, enc_input_size))) {
532532
LOG_ERR("%s : failed to eval\n", __func__);
533533
return 1;
534534
}
@@ -648,7 +648,7 @@ int main(int argc, char ** argv) {
648648

649649
LOG_DBG("eval: %s\n", string_from(ctx, embd).c_str());
650650

651-
if (llama_decode(ctx, llama_batch_get_one(&embd[i], n_eval, n_past, 0))) {
651+
if (llama_decode(ctx, llama_batch_get_one(&embd[i], n_eval))) {
652652
LOG_ERR("%s : failed to eval\n", __func__);
653653
return 1;
654654
}

examples/parallel/parallel.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -308,7 +308,6 @@ int main(int argc, char ** argv) {
308308
batch.n_seq_id + i,
309309
batch.seq_id + i,
310310
batch.logits + i,
311-
0, 0, 0, // unused
312311
};
313312

314313
const int ret = llama_decode(ctx, batch_view);

examples/perplexity/perplexity.cpp

Lines changed: 22 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -408,14 +408,21 @@ static results_perplexity perplexity_v2(llama_context * ctx, const common_params
408408
// clear the KV cache
409409
llama_kv_cache_clear(ctx);
410410

411+
llama_batch batch = llama_batch_init(n_batch, 0, 1);
412+
411413
for (int j = 0; j < num_batches; ++j) {
412414
const int batch_start = start + j * n_batch;
413415
const int batch_size = std::min(end - batch_start, n_batch);
414416

417+
common_batch_clear(batch);
418+
for (int i = 0; i < batch_size; i++) {
419+
common_batch_add(batch, tokens[batch_start + i], j*n_batch + i, {0}, true);
420+
}
421+
415422
//LOG_DBG(" Batch %d: starts at %d, size is %d, n_past is %d\n",j,batch_start,batch_size,j * n_batch);
416-
// TODO: use llama_batch.logits instead of relying on logits_all == true
417-
if (llama_decode(ctx, llama_batch_get_one(tokens.data() + batch_start, batch_size, j * n_batch, 0))) {
423+
if (llama_decode(ctx, batch)) {
418424
//LOG_ERR("%s : failed to eval\n", __func__);
425+
llama_batch_free(batch);
419426
return {tokens, -1, logit_history, prob_history};
420427
}
421428

@@ -435,6 +442,8 @@ static results_perplexity perplexity_v2(llama_context * ctx, const common_params
435442
}
436443
}
437444

445+
llama_batch_free(batch);
446+
438447
const auto t_end = std::chrono::high_resolution_clock::now();
439448

440449
if (i == 0) {
@@ -704,7 +713,6 @@ static bool decode_helper(llama_context * ctx, llama_batch & batch, std::vector<
704713
batch.n_seq_id + i,
705714
batch.seq_id + i,
706715
batch.logits + i,
707-
0, 0, 0, // unused
708716
};
709717

710718
const int ret = llama_decode(ctx, batch_view);
@@ -1791,6 +1799,8 @@ static void kl_divergence(llama_context * ctx, const common_params & params) {
17911799
// clear the KV cache
17921800
llama_kv_cache_clear(ctx);
17931801

1802+
llama_batch batch = llama_batch_init(n_batch, 0, 1);
1803+
17941804
for (int j = 0; j < num_batches; ++j) {
17951805
const int batch_start = start + j * n_batch;
17961806
const int batch_size = std::min(end - batch_start, n_batch);
@@ -1803,9 +1813,14 @@ static void kl_divergence(llama_context * ctx, const common_params & params) {
18031813
tokens[batch_start] = llama_token_bos(llama_get_model(ctx));
18041814
}
18051815

1806-
// TODO: use llama_batch.logits instead of relying on logits_all == true
1807-
if (llama_decode(ctx, llama_batch_get_one(tokens.data() + batch_start, batch_size, j * n_batch, 0))) {
1816+
common_batch_clear(batch);
1817+
for (int i = 0; i < batch_size; i++) {
1818+
common_batch_add(batch, tokens[batch_start + i], j*n_batch + i, {0}, true);
1819+
}
1820+
1821+
if (llama_decode(ctx, batch)) {
18081822
LOG_ERR("%s : failed to eval\n", __func__);
1823+
llama_batch_free(batch);
18091824
return;
18101825
}
18111826

@@ -1818,6 +1833,8 @@ static void kl_divergence(llama_context * ctx, const common_params & params) {
18181833
}
18191834
}
18201835

1836+
llama_batch_free(batch);
1837+
18211838
const auto t_end = std::chrono::high_resolution_clock::now();
18221839

18231840
if (i == 0) {

0 commit comments

Comments
 (0)