File tree 2 files changed +18
-1
lines changed 2 files changed +18
-1
lines changed Original file line number Diff line number Diff line change 7
7
from array_api_tests import _array_module as xp
8
8
from array_api_tests ._array_module import _UndefinedStub
9
9
10
- from reporting import pytest_metadata , add_extra_json_metadata # noqa
10
+ from reporting import pytest_metadata , pytest_json_modifyreport , add_extra_json_metadata # noqa
11
11
12
12
settings .register_profile ("xp_default" , deadline = 800 )
13
13
Original file line number Diff line number Diff line change 2
2
from array_api_tests import _array_module as xp
3
3
from array_api_tests import __version__
4
4
5
+ from collections import Counter
5
6
from types import BuiltinFunctionType , FunctionType
6
7
import dataclasses
7
8
import json
@@ -89,3 +90,19 @@ def finalizer():
89
90
add_metadata ('hypothesis_statistics' , request .node .hypothesis_statistics )
90
91
91
92
request .addfinalizer (finalizer )
93
+
94
+ def pytest_json_modifyreport (json_report ):
95
+ # Deduplicate warnings. These duplicate warnings can cause the file size
96
+ # to become huge. For instance, a warning from np.bool which is emitted
97
+ # every time hypothesis runs (over a million times) causes the warnings
98
+ # JSON for a plain numpy namespace run to be over 500MB.
99
+
100
+ # This will lose information about what order the warnings were issued in,
101
+ # but that isn't particularly helpful anyway since the warning metadata
102
+ # doesn't store a full stack of where it was issued from. The resulting
103
+ # warnings will be in order of the first time each warning is issued since
104
+ # collections.Counter is ordered just like dict().
105
+ counted_warnings = Counter ([frozenset (i .items ()) for i in json_report ['warnings' ]])
106
+ deduped_warnings = [{** dict (i ), 'count' : counted_warnings [i ]} for i in counted_warnings ]
107
+
108
+ json_report ['warnings' ] = deduped_warnings
You can’t perform that action at this time.
0 commit comments