Skip to content

Commit 50d9a51

Browse files
committed
context : fix optimization logic after the refactor
ggml-ci
1 parent 8323e23 commit 50d9a51

File tree

1 file changed

+3
-4
lines changed

1 file changed

+3
-4
lines changed

src/llama-context.cpp

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1952,8 +1952,7 @@ void llama_context::opt_epoch_iter(
19521952

19531953
int64_t n_outputs_all = n_tokens_all;
19541954

1955-
//llama_sbatch sbatch = kv_self->sbatch_init(batch, /*logits_all =*/ true);
1956-
auto decode_state = kv_self->init(batch, cparams.n_ubatch, embd_pooled, /* logits_all */ n_outputs_all == n_tokens_all);
1955+
auto decode_state = kv_self->init(batch, cparams.n_ubatch, embd_pooled, /* logits_all */ true);
19571956
if (!decode_state) {
19581957
LLAMA_LOG_ERROR("%s: could not initialize batch\n", __func__);
19591958
break;
@@ -1969,8 +1968,6 @@ void llama_context::opt_epoch_iter(
19691968
while (const auto * ubatch_ptr = decode_state->next()) {
19701969
const auto & ubatch = *ubatch_ptr;
19711970

1972-
pos_batch += ubatch.n_tokens;
1973-
19741971
n_outputs = ubatch.n_tokens;
19751972

19761973
auto * gf = graph_init();
@@ -2006,6 +2003,8 @@ void llama_context::opt_epoch_iter(
20062003
callback(train, opt_ctx, dataset, result, idata_in_loop + (pos_ctx + pos_batch)/n_ubatch + 1, ndata_in_loop, t_loop_start);
20072004
}
20082005
ggml_free(ctx_compute_opt);
2006+
2007+
pos_batch += ubatch.n_tokens;
20092008
}
20102009
}
20112010
}

0 commit comments

Comments
 (0)