@@ -12,13 +12,13 @@ const maxTime = 5;
12
12
// The minimum sample size required to perform statistical analysis.
13
13
const minSamples = 5 ;
14
14
15
- async function runBenchmarks ( ) {
15
+ function runBenchmarks ( ) {
16
16
// Get the revisions and make things happen!
17
17
const { benchmarks, revisions } = getArguments ( process . argv . slice ( 2 ) ) ;
18
18
const benchmarkProjects = prepareBenchmarkProjects ( revisions ) ;
19
19
20
20
for ( const benchmark of benchmarks ) {
21
- await runBenchmark ( benchmark , benchmarkProjects ) ;
21
+ runBenchmark ( benchmark , benchmarkProjects ) ;
22
22
}
23
23
}
24
24
@@ -113,14 +113,14 @@ function prepareBenchmarkProjects(
113
113
}
114
114
}
115
115
116
- async function collectSamples ( modulePath : string ) {
116
+ function collectSamples ( modulePath : string ) {
117
117
let numOfConsequentlyRejectedSamples = 0 ;
118
118
const samples = [ ] ;
119
119
120
120
// If time permits, increase sample size to reduce the margin of error.
121
121
const start = Date . now ( ) ;
122
122
while ( samples . length < minSamples || ( Date . now ( ) - start ) / 1e3 < maxTime ) {
123
- const sample = await sampleModule ( modulePath ) ;
123
+ const sample = sampleModule ( modulePath ) ;
124
124
125
125
if ( sample . involuntaryContextSwitches > 0 ) {
126
126
numOfConsequentlyRejectedSamples ++ ;
@@ -278,7 +278,7 @@ function maxBy<T>(array: ReadonlyArray<T>, fn: (obj: T) => number) {
278
278
}
279
279
280
280
// Prepare all revisions and run benchmarks matching a pattern against them.
281
- async function runBenchmark (
281
+ function runBenchmark (
282
282
benchmark : string ,
283
283
benchmarkProjects : ReadonlyArray < BenchmarkProject > ,
284
284
) {
@@ -288,17 +288,17 @@ async function runBenchmark(
288
288
const modulePath = path . join ( projectPath , benchmark ) ;
289
289
290
290
if ( i === 0 ) {
291
- const { name } = await sampleModule ( modulePath ) ;
291
+ const { name } = sampleModule ( modulePath ) ;
292
292
console . log ( '⏱ ' + name ) ;
293
293
}
294
294
295
295
try {
296
- const samples = await collectSamples ( modulePath ) ;
296
+ const samples = collectSamples ( modulePath ) ;
297
297
298
298
results . push ( computeStats ( revision , samples ) ) ;
299
299
process . stdout . write ( ' ' + cyan ( i + 1 ) + ' tests completed.\u000D' ) ;
300
300
} catch ( error ) {
301
- console . log ( ' ' + revision + ': ' + red ( String ( error ) ) ) ;
301
+ console . log ( ' ' + revision + ': ' + red ( error . message ) ) ;
302
302
}
303
303
}
304
304
console . log ( '\n' ) ;
@@ -372,11 +372,9 @@ interface BenchmarkSample {
372
372
involuntaryContextSwitches : number ;
373
373
}
374
374
375
- function sampleModule ( modulePath : string ) : Promise < BenchmarkSample > {
375
+ function sampleModule ( modulePath : string ) : BenchmarkSample {
376
376
const sampleCode = `
377
- import assert from 'node:assert';
378
-
379
- assert(process.send);
377
+ import fs from 'node:fs';
380
378
381
379
import { benchmark } from '${ modulePath } ';
382
380
@@ -399,53 +397,45 @@ function sampleModule(modulePath: string): Promise<BenchmarkSample> {
399
397
const timeDiff = Number(process.hrtime.bigint() - startTime);
400
398
const resourcesEnd = process.resourceUsage();
401
399
402
- process.send( {
400
+ const result = {
403
401
name: benchmark.name,
404
402
clocked: timeDiff / benchmark.count,
405
403
memUsed: (process.memoryUsage().heapUsed - memBaseline) / benchmark.count,
406
404
involuntaryContextSwitches:
407
405
resourcesEnd.involuntaryContextSwitches - resourcesStart.involuntaryContextSwitches,
408
- });
406
+ };
407
+ fs.writeFileSync(3, JSON.stringify(result));
409
408
` ;
410
409
411
- return new Promise ( ( resolve , reject ) => {
412
- const child = cp . spawn (
413
- process . execPath ,
414
- [
415
- // V8 flags
416
- '--predictable' ,
417
- '--no-concurrent-sweeping' ,
418
- '--no-scavenge-task' ,
419
- '--min-semi-space-size=1024' , // 1GB
420
- '--max-semi-space-size=1024' , // 1GB
421
- '--trace-gc' , // no gc calls should happen during benchmark, so trace them
422
-
423
- // Node.js flags
424
- '--input-type=module' ,
425
- '--eval' ,
426
- sampleCode ,
427
- ] ,
428
- {
429
- stdio : [ 'inherit' , 'inherit' , 'inherit' , 'ipc' ] ,
430
- env : { NODE_ENV : 'production' } ,
431
- } ,
432
- ) ;
410
+ const result = cp . spawnSync (
411
+ process . execPath ,
412
+ [
413
+ // V8 flags
414
+ '--predictable' ,
415
+ '--no-concurrent-sweeping' ,
416
+ '--no-scavenge-task' ,
417
+ '--min-semi-space-size=1024' , // 1GB
418
+ '--max-semi-space-size=1024' , // 1GB
419
+ '--trace-gc' , // no gc calls should happen during benchmark, so trace them
420
+
421
+ // Node.js flags
422
+ '--input-type=module' ,
423
+ '--eval' ,
424
+ sampleCode ,
425
+ ] ,
426
+ {
427
+ stdio : [ 'inherit' , 'inherit' , 'inherit' , 'pipe' ] ,
428
+ env : { NODE_ENV : 'production' } ,
429
+ } ,
430
+ ) ;
433
431
434
- let message : any ;
435
- let error : any ;
432
+ if ( result . status !== 0 ) {
433
+ throw new Error ( `Benchmark failed with "${ result . status } " status.` ) ;
434
+ }
436
435
437
- child . on ( 'message' , ( msg ) => ( message = msg ) ) ;
438
- child . on ( 'error' , ( e ) => ( error = e ) ) ;
439
- child . on ( 'close' , ( ) => {
440
- if ( message ) {
441
- return resolve ( message ) ;
442
- }
443
- reject ( error || new Error ( 'Spawn process closed without error' ) ) ;
444
- } ) ;
445
- } ) ;
436
+ const resultStr = result . output [ 3 ] ?. toString ( ) ;
437
+ assert ( resultStr != null ) ;
438
+ return JSON . parse ( resultStr ) ;
446
439
}
447
440
448
- runBenchmarks ( ) . catch ( ( error ) => {
449
- console . error ( error ) ;
450
- process . exit ( 1 ) ;
451
- } ) ;
441
+ runBenchmarks ( ) ;
0 commit comments