Skip to content

Commit 04f8641

Browse files
committed
rm redundant llama_batch_ext_set_output_last
1 parent c3dd790 commit 04f8641

File tree

4 files changed

+0
-4
lines changed

4 files changed

+0
-4
lines changed

examples/cvector-generator/cvector-generator.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -344,7 +344,6 @@ static bool cb_eval(struct ggml_tensor * t, bool ask, void * user_data) {
344344
static bool get_hidden_layers(llama_context * ctx, std::vector<llama_token> & tokens) {
345345
llama_kv_self_clear(ctx);
346346
llama_batch_ext_ptr batch(llama_batch_ext_init_from_text(tokens.data(), tokens.size(), 0, 0, true));
347-
llama_batch_ext_set_output_last(batch.get());
348347
if (llama_decode_ext(ctx, batch.get())) {
349348
fprintf(stderr, "%s : failed to eval\n", __func__);
350349
return false;

examples/llama-bench/llama-bench.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1445,7 +1445,6 @@ static void test_prompt(llama_context * ctx, int n_prompt, int n_batch, int n_th
14451445
tokens[i] = std::rand() % n_vocab;
14461446
}
14471447
llama_batch_ext_ptr batch(llama_batch_ext_init_from_text(tokens.data(), n_tokens, 0, 0, true));
1448-
llama_batch_ext_set_output_last(batch.get());
14491448
llama_decode_ext(ctx, batch.get());
14501449
n_processed += n_tokens;
14511450
}

examples/simple-chat/simple-chat.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,6 @@ int main(int argc, char ** argv) {
110110
// prepare a batch for the prompt
111111
llama_pos n_past = 0;
112112
llama_batch_ext * batch = llama_batch_ext_init_from_text(prompt_tokens.data(), prompt_tokens.size(), n_past, 0, true);
113-
llama_batch_ext_set_output_last(batch);
114113
n_past += llama_batch_ext_get_n_tokens(batch);
115114

116115
llama_token new_token_id;

examples/simple/simple.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,6 @@ int main(int argc, char ** argv) {
144144
// prepare a batch for the prompt
145145

146146
llama_batch_ext * batch = llama_batch_ext_init_from_text(prompt_tokens.data(), prompt_tokens.size(), 0, 0, true);
147-
llama_batch_ext_set_output_last(batch);
148147

149148
// main loop
150149

0 commit comments

Comments
 (0)