File tree Expand file tree Collapse file tree 1 file changed +14
-0
lines changed Expand file tree Collapse file tree 1 file changed +14
-0
lines changed Original file line number Diff line number Diff line change @@ -12818,6 +12818,13 @@ static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab &
12818
12818
}
12819
12819
}
12820
12820
12821
+ if (add_special && vocab.special_add_bos != 0 && output[1] == vocab.special_bos_id) {
12822
+ LLAMA_LOG_WARN(
12823
+ "%s: Added a BOS token to the prompt as specified by the model but the prompt "
12824
+ "also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. "
12825
+ "Are you sure this is what you want?\n", __FUNCTION__);
12826
+ }
12827
+
12821
12828
if (add_special && vocab.special_add_eos == 1) {
12822
12829
GGML_ASSERT(vocab.special_eos_id != -1);
12823
12830
output.push_back(vocab.special_eos_id);
@@ -12844,6 +12851,13 @@ static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab &
12844
12851
}
12845
12852
}
12846
12853
12854
+ if (add_special && vocab.special_add_bos != 0 && output[1] == vocab.special_bos_id) {
12855
+ LLAMA_LOG_WARN(
12856
+ "%s: Added a BOS token to the prompt as specified by the model but the prompt "
12857
+ "also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. "
12858
+ "Are you sure this is what you want?\n", __FUNCTION__);
12859
+ }
12860
+
12847
12861
if (add_special && vocab.special_add_eos == 1) {
12848
12862
GGML_ASSERT(vocab.special_add_eos != -1);
12849
12863
output.push_back(vocab.special_eos_id);
You can’t perform that action at this time.
0 commit comments