diff --git a/.gitignore b/.gitignore index b6e47617..49d7dca5 100644 --- a/.gitignore +++ b/.gitignore @@ -127,3 +127,6 @@ dmypy.json # Pyre type checker .pyre/ + +# pytest-json-report +.report.json diff --git a/array_api_tests/stubs.py b/array_api_tests/stubs.py index 35cc885f..3fba33fc 100644 --- a/array_api_tests/stubs.py +++ b/array_api_tests/stubs.py @@ -9,6 +9,7 @@ __all__ = [ "name_to_func", "array_methods", + "array_attributes", "category_to_funcs", "EXTENSIONS", "extension_to_funcs", @@ -34,6 +35,10 @@ f for n, f in inspect.getmembers(array, predicate=inspect.isfunction) if n != "__init__" # probably exists for Sphinx ] +array_attributes = [ + n for n, f in inspect.getmembers(array, predicate=lambda x: not inspect.isfunction(x)) + if n != "__init__" # probably exists for Sphinx +] category_to_funcs: Dict[str, List[FunctionType]] = {} for name, mod in name_to_mod.items(): diff --git a/array_api_tests/test_has_names.py b/array_api_tests/test_has_names.py new file mode 100644 index 00000000..d9194d82 --- /dev/null +++ b/array_api_tests/test_has_names.py @@ -0,0 +1,37 @@ +""" +This is a very basic test to see what names are defined in a library. It +does not even require functioning hypothesis array_api support. +""" + +import pytest + +from ._array_module import mod as xp, mod_name +from .stubs import (array_attributes, array_methods, category_to_funcs, + extension_to_funcs, EXTENSIONS) + +has_name_params = [] +for ext, stubs in extension_to_funcs.items(): + for stub in stubs: + has_name_params.append(pytest.param(ext, stub.__name__)) +for cat, stubs in category_to_funcs.items(): + for stub in stubs: + has_name_params.append(pytest.param(cat, stub.__name__)) +for meth in array_methods: + has_name_params.append(pytest.param('array_method', meth.__name__)) +for attr in array_attributes: + has_name_params.append(pytest.param('array_attribute', attr)) + +@pytest.mark.parametrize("category, name", has_name_params) +def test_has_names(category, name): + if category in EXTENSIONS: + ext_mod = getattr(xp, category) + assert hasattr(ext_mod, name), f"{mod_name} is missing the {category} extension function {name}()" + elif category.startswith('array_'): + # TODO: This would fail if ones() is missing. + arr = xp.ones((1, 1)) + if category == 'array_attribute': + assert hasattr(arr, name), f"The {mod_name} array object is missing the attribute {name}" + else: + assert hasattr(arr, name), f"The {mod_name} array object is missing the method {name}()" + else: + assert hasattr(xp, name), f"{mod_name} is missing the {category} function {name}()" diff --git a/conftest.py b/conftest.py index 9fec536b..e0453e40 100644 --- a/conftest.py +++ b/conftest.py @@ -7,8 +7,9 @@ from array_api_tests import _array_module as xp from array_api_tests._array_module import _UndefinedStub -settings.register_profile("xp_default", deadline=800) +from reporting import pytest_metadata, pytest_json_modifyreport, add_extra_json_metadata # noqa +settings.register_profile("xp_default", deadline=800) def pytest_addoption(parser): # Hypothesis max examples @@ -120,7 +121,7 @@ def pytest_collection_modifyitems(config, items): mark.skip(reason="disabled via --disable-data-dependent-shapes") ) break - # skip if test not appropiate for CI + # skip if test not appropriate for CI if ci: ci_mark = next((m for m in markers if m.name == "ci"), None) if ci_mark is None: diff --git a/reporting.py b/reporting.py new file mode 100644 index 00000000..b15b3364 --- /dev/null +++ b/reporting.py @@ -0,0 +1,108 @@ +from array_api_tests.dtype_helpers import dtype_to_name +from array_api_tests import _array_module as xp +from array_api_tests import __version__ + +from collections import Counter +from types import BuiltinFunctionType, FunctionType +import dataclasses +import json +import warnings + +from hypothesis.strategies import SearchStrategy + +from pytest import mark, fixture +try: + import pytest_jsonreport # noqa +except ImportError: + raise ImportError("pytest-json-report is required to run the array API tests") + +def to_json_serializable(o): + if o in dtype_to_name: + return dtype_to_name[o] + if isinstance(o, (BuiltinFunctionType, FunctionType, type)): + return o.__name__ + if dataclasses.is_dataclass(o): + return to_json_serializable(dataclasses.asdict(o)) + if isinstance(o, SearchStrategy): + return repr(o) + if isinstance(o, dict): + return {to_json_serializable(k): to_json_serializable(v) for k, v in o.items()} + if isinstance(o, tuple): + if hasattr(o, '_asdict'): # namedtuple + return to_json_serializable(o._asdict()) + return tuple(to_json_serializable(i) for i in o) + if isinstance(o, list): + return [to_json_serializable(i) for i in o] + + # Ensure everything is JSON serializable. If this warning is issued, it + # means the given type needs to be added above if possible. + try: + json.dumps(o) + except TypeError: + warnings.warn(f"{o!r} (of type {type(o)}) is not JSON-serializable. Using the repr instead.") + return repr(o) + + return o + +@mark.optionalhook +def pytest_metadata(metadata): + """ + Additional global metadata for --json-report. + """ + metadata['array_api_tests_module'] = xp.mod_name + metadata['array_api_tests_version'] = __version__ + +@fixture(autouse=True) +def add_extra_json_metadata(request, json_metadata): + """ + Additional per-test metadata for --json-report + """ + def add_metadata(name, obj): + obj = to_json_serializable(obj) + json_metadata[name] = obj + + test_module = request.module.__name__ + if test_module.startswith('array_api_tests.meta'): + return + + test_function = request.function.__name__ + assert test_function.startswith('test_'), 'unexpected test function name' + + if test_module == 'array_api_tests.test_has_names': + array_api_function_name = None + else: + array_api_function_name = test_function[len('test_'):] + + add_metadata('test_module', test_module) + add_metadata('test_function', test_function) + add_metadata('array_api_function_name', array_api_function_name) + + if hasattr(request.node, 'callspec'): + params = request.node.callspec.params + add_metadata('params', params) + + def finalizer(): + # TODO: This metadata is all in the form of error strings. It might be + # nice to extract the hypothesis failing inputs directly somehow. + if hasattr(request.node, 'hypothesis_report_information'): + add_metadata('hypothesis_report_information', request.node.hypothesis_report_information) + if hasattr(request.node, 'hypothesis_statistics'): + add_metadata('hypothesis_statistics', request.node.hypothesis_statistics) + + request.addfinalizer(finalizer) + +def pytest_json_modifyreport(json_report): + # Deduplicate warnings. These duplicate warnings can cause the file size + # to become huge. For instance, a warning from np.bool which is emitted + # every time hypothesis runs (over a million times) causes the warnings + # JSON for a plain numpy namespace run to be over 500MB. + + # This will lose information about what order the warnings were issued in, + # but that isn't particularly helpful anyway since the warning metadata + # doesn't store a full stack of where it was issued from. The resulting + # warnings will be in order of the first time each warning is issued since + # collections.Counter is ordered just like dict(). + counted_warnings = Counter([frozenset(i.items()) for i in json_report['warnings']]) + deduped_warnings = [{**dict(i), 'count': counted_warnings[i]} for i in counted_warnings] + + json_report['warnings'] = deduped_warnings diff --git a/requirements.txt b/requirements.txt index b3b26223..de07fcc5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,4 @@ pytest +pytest-json-report hypothesis>=6.45.0 ndindex>=1.6