Skip to content

Commit bafab63

Browse files
Tests: Added an option to accept the actual token stream (#2515)
1 parent 5c33f0b commit bafab63

File tree

3 files changed

+99
-53
lines changed

3 files changed

+99
-53
lines changed

test-suite.html

Lines changed: 21 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -93,8 +93,11 @@ <h2 id="writing-tests-writing-your-test">Writing your test</h2>
9393

9494
<p>Your file is built up of two or three sections, separated by ten or more dashes <code>-</code>, starting at the begin of the line:</p>
9595
<ol>
96-
<li>Your language snippet. The code you want to compile using Prism. (<strong>required</strong>)</li>
97-
<li>The simplified token stream you expect. Needs to be valid JSON. (<strong>required</strong>)</li>
96+
<li>Your language snippet. The code you want to tokenize using Prism. (<strong>required</strong>)</li>
97+
<li>
98+
The simplified token stream you expect. Needs to be valid JSON. (<em>optional</em>) <br>
99+
If there no token stream defined, the test case will fail unless the <code>--accept</code> flag is present when running the test command (e.g. <code>npm run test:languages -- --accept</code>). If the flag is present and there is no expected token stream, the runner will insert the actual token stream into the test case file, changing it.
100+
</li>
98101
<li>A comment explaining the test case. (<em>optional</em>)</li>
99102
</ol>
100103
<p>The easiest way would be to look at an existing test file:</p>
@@ -114,10 +117,25 @@ <h2 id="writing-tests-writing-your-test">Writing your test</h2>
114117

115118
This is a comment explaining this test case.</code></pre>
116119

120+
<h2 id="writing-tests-the-easy-way">The easy way</h2>
121+
<p>The easy way to create one or multiple new test case(s) is this:</p>
122+
123+
<ol>
124+
<li>Create a new file for a new test case in <code>tests/languages/${language}</code>.</li>
125+
<li>Insert the code you want to test (and nothing more).</li>
126+
<li>Repeat the first two steps for as many test cases as you want.</li>
127+
<li>Run <code>npm run test:languages -- --accept</code>.</li>
128+
<li>Done.</li>
129+
</ol>
130+
131+
<p>This works by making the test runner insert the actual token stream of you test code as the expected token stream. <strong>Carefully check that the inserted token stream is actually what you expect or else the test is meaningless!</strong></p>
132+
133+
<p>Optionally, you can then also add comments to test cases.</p>
134+
117135

118136
<h2 id="writing-tests-explaining-the-simplified-token-stream">Explaining the simplified token stream</h2>
119137

120-
<p>While compiling, Prism transforms your source code into a token stream. This is basically a tree of nested tokens (or arrays, or strings).</p>
138+
<p>While highlighting, Prism transforms your source code into a token stream. This is basically a tree of nested tokens (or arrays, or strings).</p>
121139
<p>As these trees are hard to write by hand, the test runner uses a simplified version of it.</p>
122140
<p>It uses the following rules:</p>
123141
<ul>

tests/helper/test-case.js

Lines changed: 75 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -49,43 +49,66 @@ module.exports = {
4949
*
5050
* @param {string} languageIdentifier
5151
* @param {string} filePath
52+
* @param {boolean} acceptEmpty
5253
*/
53-
runTestCase(languageIdentifier, filePath) {
54+
runTestCase(languageIdentifier, filePath, acceptEmpty) {
5455
const testCase = this.parseTestCaseFile(filePath);
5556
const usedLanguages = this.parseLanguageNames(languageIdentifier);
5657

57-
if (null === testCase) {
58-
throw new Error("Test case file has invalid format (or the provided token stream is invalid JSON), please read the docs.");
59-
}
60-
6158
const Prism = PrismLoader.createInstance(usedLanguages.languages);
6259

6360
// the first language is the main language to highlight
64-
const simplifiedTokenStream = this.simpleTokenize(Prism, testCase.testSource, usedLanguages.mainLanguage);
61+
const simplifiedTokenStream = this.simpleTokenize(Prism, testCase.code, usedLanguages.mainLanguage);
62+
63+
if (testCase.expectedTokenStream === null) {
64+
// the test case doesn't have an expected value
65+
if (!acceptEmpty) {
66+
throw new Error('This test case doesn\'t have an expected toke n stream.'
67+
+ ' Either add the JSON of a token stream or run \`npm run test:languages -- --accept\`'
68+
+ ' to automatically add the current token stream.');
69+
}
70+
71+
// change the file
72+
const lineEnd = (/\r\n/.test(testCase.code) || !/\n/.test(testCase.code)) ? '\r\n' : '\n';
73+
const separator = "\n\n----------------------------------------------------\n\n";
74+
const pretty = TokenStreamTransformer.prettyprint(simplifiedTokenStream)
75+
.replace(/^( +)/gm, m => {
76+
return "\t".repeat(m.length / 4);
77+
});
78+
79+
let content = testCase.code + separator + pretty;
80+
if (testCase.comment) {
81+
content += separator + testCase.comment;
82+
}
83+
content = content.replace(/\r?\n/g, lineEnd);
6584

66-
const actual = JSON.stringify(simplifiedTokenStream);
67-
const expected = JSON.stringify(testCase.expectedTokenStream);
85+
fs.writeFileSync(filePath, content, "utf-8");
86+
} else {
87+
// there is an expected value
88+
const actual = JSON.stringify(simplifiedTokenStream);
89+
const expected = JSON.stringify(testCase.expectedTokenStream);
6890

69-
if (actual === expected) {
70-
// no difference
71-
return;
72-
}
91+
if (actual === expected) {
92+
// no difference
93+
return;
94+
}
7395

74-
// The index of the first difference between the expected token stream and the actual token stream.
75-
// The index is in the raw expected token stream JSON of the test case.
76-
const diffIndex = translateIndexIgnoreSpaces(testCase.expectedJson, expected, firstDiff(expected, actual));
77-
const expectedJsonLines = testCase.expectedJson.substr(0, diffIndex).split(/\r\n?|\n/g);
78-
const columnNumber = expectedJsonLines.pop().length + 1;
79-
const lineNumber = testCase.expectedLineOffset + expectedJsonLines.length;
80-
81-
const tokenStreamStr = TokenStreamTransformer.prettyprint(simplifiedTokenStream);
82-
const message = "\n\nActual Token Stream:" +
83-
"\n-----------------------------------------\n" +
84-
tokenStreamStr +
85-
"\n-----------------------------------------\n" +
86-
"File: " + filePath + ":" + lineNumber + ":" + columnNumber + "\n\n";
87-
88-
assert.deepEqual(simplifiedTokenStream, testCase.expectedTokenStream, testCase.comment + message);
96+
// The index of the first difference between the expected token stream and the actual token stream.
97+
// The index is in the raw expected token stream JSON of the test case.
98+
const diffIndex = translateIndexIgnoreSpaces(testCase.expectedJson, expected, firstDiff(expected, actual));
99+
const expectedJsonLines = testCase.expectedJson.substr(0, diffIndex).split(/\r\n?|\n/g);
100+
const columnNumber = expectedJsonLines.pop().length + 1;
101+
const lineNumber = testCase.expectedLineOffset + expectedJsonLines.length;
102+
103+
const tokenStreamStr = TokenStreamTransformer.prettyprint(simplifiedTokenStream);
104+
const message = "\n\nActual Token Stream:" +
105+
"\n-----------------------------------------\n" +
106+
tokenStreamStr +
107+
"\n-----------------------------------------\n" +
108+
"File: " + filePath + ":" + lineNumber + ":" + columnNumber + "\n\n";
109+
110+
assert.deepEqual(simplifiedTokenStream, testCase.expectedTokenStream, testCase.comment + message);
111+
}
89112
},
90113

91114
/**
@@ -160,33 +183,36 @@ module.exports = {
160183
*
161184
* @private
162185
* @param {string} filePath
163-
* @returns {{testSource: string, expectedTokenStream: Array<string[]>, comment:string?}|null}
186+
* @returns {ParsedTestCase}
187+
*
188+
* @typedef ParsedTestCase
189+
* @property {string} code
190+
* @property {string} expectedJson
191+
* @property {number} expectedLineOffset
192+
* @property {Array | null} expectedTokenStream
193+
* @property {string} comment
164194
*/
165195
parseTestCaseFile(filePath) {
166196
const testCaseSource = fs.readFileSync(filePath, "utf8");
167-
const testCaseParts = testCaseSource.split(/^-{10,}\w*$/m);
168-
169-
try {
170-
const testCase = {
171-
testSource: testCaseParts[0].trim(),
172-
expectedJson: testCaseParts[1],
173-
expectedLineOffset: testCaseParts[0].split(/\r\n?|\n/g).length,
174-
expectedTokenStream: JSON.parse(testCaseParts[1]),
175-
comment: null
176-
};
177-
178-
// if there are three parts, the third one is the comment
179-
// explaining the test case
180-
if (testCaseParts[2]) {
181-
testCase.comment = testCaseParts[2].trim();
182-
}
197+
const testCaseParts = testCaseSource.split(/^-{10,}[ \t]*$/m);
183198

184-
return testCase;
185-
}
186-
catch (e) {
187-
// the JSON can't be parsed (e.g. it could be empty)
188-
return null;
199+
if (testCaseParts.length > 3) {
200+
throw new Error("Invalid test case format: Too many sections.");
189201
}
202+
203+
const code = testCaseParts[0].trim();
204+
const expected = (testCaseParts[1] || '').trim();
205+
const comment = (testCaseParts[2] || '').trim();
206+
207+
const testCase = {
208+
code,
209+
expectedJson: expected,
210+
expectedLineOffset: code.split(/\r\n?|\n/g).length,
211+
expectedTokenStream: expected ? JSON.parse(expected) : null,
212+
comment
213+
};
214+
215+
return testCase;
190216
},
191217

192218
/**

tests/run.js

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,8 @@ const testSuite =
1212
// load complete test suite
1313
: TestDiscovery.loadAllTests(__dirname + "/languages");
1414

15+
const accept = !!argv.accept;
16+
1517
// define tests for all tests in all languages in the test suite
1618
for (const language in testSuite) {
1719
if (!testSuite.hasOwnProperty(language)) {
@@ -27,7 +29,7 @@ for (const language in testSuite) {
2729

2830
it("– should pass test case '" + fileName + "'", function () {
2931
if (path.extname(filePath) === '.test') {
30-
TestCase.runTestCase(language, filePath);
32+
TestCase.runTestCase(language, filePath, accept);
3133
} else {
3234
TestCase.runTestsWithHooks(language, require(filePath));
3335
}

0 commit comments

Comments
 (0)