Skip to content

Commit 5cad62b

Browse files
committed
tests : write a Python tokenizer test (wip)
1 parent a2ca4e9 commit 5cad62b

File tree

2 files changed

+39
-19
lines changed

2 files changed

+39
-19
lines changed

tests/test-tokenizer-0.cpp

Lines changed: 21 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -16,36 +16,38 @@ static std::string unescape_whitespace(llama_context* ctx, const std::vector<lla
1616

1717
static const std::map<std::string, std::vector<llama_token>> & k_tests() {
1818
static std::map<std::string, std::vector<llama_token>> _k_tests = {
19-
{ " ", {1, 259, }, },
20-
{ " ", { 1, 1678, }, },
21-
{ " ", { 1, 268, }, },
22-
{ "\t", { 1, 29871, 12, }, },
23-
{ "\n", { 1, 29871, 13, }, },
24-
{ "\t\n", { 1, 29871, 12, 13, }, },
19+
{ " ", { 1, 259, }, },
20+
{ " ", { 1, 1678, }, },
21+
{ " ", { 1, 268, }, },
22+
{ "\t", { 1, 29871, 12, }, },
23+
{ "\n", { 1, 29871, 13, }, },
24+
{ "\t\n", { 1, 29871, 12, 13, }, },
2525
{ "Hello world", { 1, 15043, 3186, }, },
2626
{ " Hello world", { 1, 29871, 15043, 3186, }, },
2727
{ "Hello World", { 1, 15043, 2787, }, },
2828
{ " Hello World", { 1, 29871, 15043, 2787, }, },
2929
{ " Hello World!", { 1, 29871, 15043, 2787, 29991, }, },
30+
{ "Hello, world!", { 1, 15043, 29892, 3186, 29991, }, },
31+
{ " Hello, world!", { 1, 29871, 15043, 29892, 3186, 29991, }, },
3032
{ " this is 🦙.cpp", { 1, 29871, 445, 338, 29871, 243, 162, 169, 156, 29889, 8223, }, },
3133
{ "w048 7tuijk dsdfhu", { 1, 281, 29900, 29946, 29947, 29871, 29955, 9161, 13535, 18031, 2176, 6905, }, },
3234
{ "нещо на Български", { 1, 1538, 4851, 665, 1386, 29713, 1305, }, },
3335
{ "កាន់តែពិសេសអាចខលចេញ", { 1, 29871, 31849, 31324, 31934, 228, 162, 142, 228, 161,
34-
146, 228, 162, 133, 228, 161, 153, 228, 161, 186,
35-
31708, 228, 162, 132, 31708, 228, 161, 165, 31324, 228,
36-
161, 136, 228, 161, 132, 228, 161, 158, 228, 161,
37-
136, 228, 162, 132, 228, 161, 140, }, },
36+
146, 228, 162, 133, 228, 161, 153, 228, 161, 186,
37+
31708, 228, 162, 132, 31708, 228, 161, 165, 31324, 228,
38+
161, 136, 228, 161, 132, 228, 161, 158, 228, 161,
39+
136, 228, 162, 132, 228, 161, 140, }, },
3840
{ "🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token)",
3941
{ 1, 29871, 243, 162, 157, 131, 313, 8945, 29897, 29871,
40-
243, 162, 155, 185, 30722, 243, 162, 143, 174, 30598,
41-
313, 20787, 953, 3848, 275, 16125, 630, 29897, 29871, 31681,
42-
313, 6194, 953, 29877, 2397, 393, 756, 967, 1914, 5993, 29897, }, },
43-
{ "Hello", { 1, 15043 }, },
44-
{ " Hello", { 1, 29871, 15043 }, },
45-
{ " Hello", { 1, 259, 15043 }, },
46-
{ " Hello", { 1, 1678, 15043 }, },
47-
{ " Hello", { 1, 268, 15043 }, },
48-
{ " Hello\n Hello", { 1, 268, 15043, 13, 1678, 15043 }, },
42+
243, 162, 155, 185, 30722, 243, 162, 143, 174, 30598,
43+
313, 20787, 953, 3848, 275, 16125, 630, 29897, 29871, 31681,
44+
313, 6194, 953, 29877, 2397, 393, 756, 967, 1914, 5993, 29897, }, },
45+
{ "Hello", { 1, 15043, }, },
46+
{ " Hello", { 1, 29871, 15043, }, },
47+
{ " Hello", { 1, 259, 15043, }, },
48+
{ " Hello", { 1, 1678, 15043, }, },
49+
{ " Hello", { 1, 268, 15043, }, },
50+
{ " Hello\n Hello", { 1, 268, 15043, 13, 1678, 15043, }, },
4951
};
5052

5153
return _k_tests;

tests/test-tokenizer-0.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
import os
2+
import sys
3+
import argparse
4+
5+
from sentencepiece import SentencePieceProcessor
6+
7+
parser = argparse.ArgumentParser()
8+
parser.add_argument("dir_tokenizer", help="directory containing 'tokenizer.model' file")
9+
args = parser.parse_args()
10+
11+
dir_tokenizer = args.dir_tokenizer
12+
13+
tokenizer = SentencePieceProcessor(dir_tokenizer + '/tokenizer.model')
14+
15+
text = 'Hello, world!'
16+
print(text)
17+
print(tokenizer.encode(text, add_bos=True))
18+
print(tokenizer.decode(tokenizer.encode(text, add_bos=True)))

0 commit comments

Comments
 (0)