From f49f42b2865fb7f4ae0b01ab6ab21c4f6786a88b Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Fri, 6 Jan 2023 17:59:37 -0700 Subject: [PATCH 01/90] Start pytorch compatibility layer --- array_api_compat/torch/__init__.py | 14 +++++++++++++ array_api_compat/torch/_aliases.py | 33 ++++++++++++++++++++++++++++++ array_api_compat/torch/linalg.py | 1 + 3 files changed, 48 insertions(+) create mode 100644 array_api_compat/torch/__init__.py create mode 100644 array_api_compat/torch/_aliases.py create mode 100644 array_api_compat/torch/linalg.py diff --git a/array_api_compat/torch/__init__.py b/array_api_compat/torch/__init__.py new file mode 100644 index 00000000..bdbc6b39 --- /dev/null +++ b/array_api_compat/torch/__init__.py @@ -0,0 +1,14 @@ +from torch import * + +# Several names are not included in the above import * +import torch +for n in dir(torch): + if not n.startswith('_'): + exec(n + ' = torch.' + n) + +# These imports may overwrite names from the import * above. +from ._aliases import * + +from ..common._helpers import * + +__array_api_version__ = '2021.12' diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py new file mode 100644 index 00000000..6195fb30 --- /dev/null +++ b/array_api_compat/torch/_aliases.py @@ -0,0 +1,33 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import Optional, Tuple, Union + from ..common._typing import Device, Dtype + +import torch +array = torch.Tensor + +# Basic renames +permute_dims = torch.permute + +def expand_dims(x: array, /, *, axis: int = 0) -> array: + if axis > x.ndim or axis < -x.ndim - 1: + raise IndexError("axis must be in the range [-x.ndim-1, x.ndim]") + if axis < 0: + axis = x.ndim + axis + 1 + slices = (slice(None),)*axis + return x[slices + (None,)] + +def full(shape: Union[int, Tuple[int, ...]], + fill_value: Union[bool, int, float, complex], + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs) -> array: + if isinstance(shape, int): + shape = (shape,) + + return torch.full(shape, fill_value, dtype=dtype, device=device, **kwargs) + +__all__ = ['permute_dims', 'expand_dims', 'full'] diff --git a/array_api_compat/torch/linalg.py b/array_api_compat/torch/linalg.py new file mode 100644 index 00000000..8d223fd4 --- /dev/null +++ b/array_api_compat/torch/linalg.py @@ -0,0 +1 @@ +raise ImportError("The array api compat torch.linalg module extension is not yet implemented") From 4b957489ab7241221a6fe28fcaa7bce84e5b71a3 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Fri, 6 Jan 2023 18:08:49 -0700 Subject: [PATCH 02/90] Add vendor tests for torch --- tests/test_vendoring.py | 4 ++++ vendor_test/uses_torch.py | 22 ++++++++++++++++++++++ 2 files changed, 26 insertions(+) create mode 100644 vendor_test/uses_torch.py diff --git a/tests/test_vendoring.py b/tests/test_vendoring.py index 85f68626..93a961aa 100644 --- a/tests/test_vendoring.py +++ b/tests/test_vendoring.py @@ -13,3 +13,7 @@ def test_vendoring_cupy(): from vendor_test import uses_cupy uses_cupy._test_cupy() + +def test_vendoring_torch(): + from vendor_test import uses_torch + uses_torch._test_torch() diff --git a/vendor_test/uses_torch.py b/vendor_test/uses_torch.py new file mode 100644 index 00000000..b828ad33 --- /dev/null +++ b/vendor_test/uses_torch.py @@ -0,0 +1,22 @@ +# Basic test that vendoring works + +from .vendored._compat import torch as torch_compat + +import torch + +def _test_torch(): + a = torch_compat.asarray([1., 2., 3.]) + b = torch_compat.arange(3, dtype=torch_compat.float64) + assert a.dtype == torch_compat.float32 == torch.float32 + assert b.dtype == torch_compat.float64 == torch.float64 + + # torch.expand_dims does not exist. Update this to use something else if it is added + res = torch_compat.expand_dims(a, axis=0) + assert res.dtype == torch_compat.float32 == torch.float32 + assert res.shape == (1, 3) + assert isinstance(res.shape, torch.Size) + assert isinstance(a, torch.Tensor) + assert isinstance(b, torch.Tensor) + assert isinstance(res, torch.Tensor) + + torch.testing.assert_allclose(res, [[1., 2., 3.]]) From 1ecb7cac2457342d7bea13345cdbefbf35edc11d Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Mon, 9 Jan 2023 14:11:22 -0700 Subject: [PATCH 03/90] Replace torch expand_dims wrapper to a wrapper around unsqueeze --- array_api_compat/torch/_aliases.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index 6195fb30..cb5273de 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -12,12 +12,7 @@ permute_dims = torch.permute def expand_dims(x: array, /, *, axis: int = 0) -> array: - if axis > x.ndim or axis < -x.ndim - 1: - raise IndexError("axis must be in the range [-x.ndim-1, x.ndim]") - if axis < 0: - axis = x.ndim + axis + 1 - slices = (slice(None),)*axis - return x[slices + (None,)] + return torch.unsqueeze(x, axis) def full(shape: Union[int, Tuple[int, ...]], fill_value: Union[bool, int, float, complex], From 52b60549e6bebcc19f41d06a89279d0f8c1d304e Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Mon, 9 Jan 2023 15:30:56 -0700 Subject: [PATCH 04/90] Add torch support to the helper functions --- array_api_compat/common/_helpers.py | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/array_api_compat/common/_helpers.py b/array_api_compat/common/_helpers.py index a1310b1c..6b2a2734 100644 --- a/array_api_compat/common/_helpers.py +++ b/array_api_compat/common/_helpers.py @@ -29,11 +29,24 @@ def _is_cupy_array(x): # TODO: Should we reject ndarray subclasses? return isinstance(x, (cp.ndarray, cp.generic)) +def _is_torch_array(x): + # Avoid importing torch if it isn't already + if 'torch' not in sys.modules: + return False + + import torch + + # TODO: Should we reject ndarray subclasses? + return isinstance(x, torch.Tensor) + def is_array_api_obj(x): """ Check if x is an array API compatible array object. """ - return _is_numpy_array(x) or _is_cupy_array(x) or hasattr(x, '__array_namespace__') + return _is_numpy_array(x) \ + or _is_cupy_array(x) \ + or _is_torch_array(x) \ + or hasattr(x, '__array_namespace__') def get_namespace(*xs, _use_compat=True): """ @@ -139,6 +152,11 @@ def _cupy_to_device(x, device, /, stream=None): prev_stream.use() return arr +def _torch_to_device(x, device, /, stream=None): + if stream is not None: + raise NotImplementedError + return x.to(device) + def to_device(x: "Array", device: "Device", /, *, stream: Optional[Union[int, Any]] = None) -> "Array": """ Copy the array from the device on which it currently resides to the specified ``device``. @@ -169,7 +187,8 @@ def to_device(x: "Array", device: "Device", /, *, stream: Optional[Union[int, An elif _is_cupy_array(x): # cupy does not yet have to_device return _cupy_to_device(x, device, stream=stream) - + elif _is_torch_array(x): + return _torch_to_device(x) return x.to_device(device, stream=stream) __all__ = ['is_array_api_obj', 'get_namespace', 'device', 'to_device'] From c484dbff2acc576a4362c95a06afd91507cbcbb2 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Mon, 9 Jan 2023 15:31:42 -0700 Subject: [PATCH 05/90] Add max and min wrappers for torch --- array_api_compat/torch/_aliases.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index cb5273de..657864fc 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -11,6 +11,20 @@ # Basic renames permute_dims = torch.permute +# These wrappers are mostly based on the fact that pytorch uses 'dim' instead +# of 'axis'. +def max(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array: + # https://github.com/pytorch/pytorch/issues/29137 + if axis == (): + return x + return torch.amax(x, axis, keepdims=keepdims) + +def min(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array: + # https://github.com/pytorch/pytorch/issues/29137 + if axis == (): + return x + return torch.amin(x, axis, keepdims=keepdims) + def expand_dims(x: array, /, *, axis: int = 0) -> array: return torch.unsqueeze(x, axis) @@ -25,4 +39,4 @@ def full(shape: Union[int, Tuple[int, ...]], return torch.full(shape, fill_value, dtype=dtype, device=device, **kwargs) -__all__ = ['permute_dims', 'expand_dims', 'full'] +__all__ = ['permute_dims', 'max', 'min', 'expand_dims', 'full'] From 3023a8943de8b316fdc955bce948cd40026485ec Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Mon, 9 Jan 2023 15:48:38 -0700 Subject: [PATCH 06/90] Add a wrapper for torch.prod --- array_api_compat/torch/_aliases.py | 38 ++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index 657864fc..94412126 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -25,6 +25,44 @@ def min(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keep return x return torch.amin(x, axis, keepdims=keepdims) +def _normalize_axes(axis, ndim): + axes = [] + lower, upper = -ndim, ndim - 1 + for a in axis: + if a < lower or a > upper: + # Match torch error message (e.g., from sum()) + raise IndexError(f"Dimension out of range (expected to be in range of [{lower}, {upper}], but got {a}") + if a < 0: + a = a + ndim + if a in axes: + # Match torch error message but use IndexError instead of RuntimeError + raise IndexError(f"dim {a} appears multiple times in the list of dims") + axes.append(a) + return sorted(axes) + +def prod(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype: Optional[Dtype] = None, keepdims: bool = False) -> array: + # torch.prod doesn't support multiple axes + # (https://github.com/pytorch/pytorch/issues/56586). + if isinstance(axis, tuple): + axes = _normalize_axes(axis, x.ndim) + if keepdims: + for a in axes: + x = torch.prod(x, a, dtype=dtype, keepdims=keepdims) + return x + else: + for i, a in enumerate(axes): + x = torch.prod(x, a - i, dtype=dtype, keepdims=keepdims) + return x + if axis is None: + # torch doesn't support keepdims with axis=None + # (https://github.com/pytorch/pytorch/issues/71209) + res = torch.prod(x, dtype=dtype) + if keepdims: + res = res[(None,)*x.ndim] + return res + + return torch.prod(x, axis, dtype=dtype, keepdims=keepdims) + def expand_dims(x: array, /, *, axis: int = 0) -> array: return torch.unsqueeze(x, axis) From a1bbd9bd1e766ffb5c07989dc1a1a30df23eee11 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Mon, 9 Jan 2023 15:48:50 -0700 Subject: [PATCH 07/90] Add the torch prod wrapper to __all__ --- array_api_compat/torch/_aliases.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index 94412126..da69cb23 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -77,4 +77,4 @@ def full(shape: Union[int, Tuple[int, ...]], return torch.full(shape, fill_value, dtype=dtype, device=device, **kwargs) -__all__ = ['permute_dims', 'max', 'min', 'expand_dims', 'full'] +__all__ = ['permute_dims', 'max', 'min', 'prod', 'expand_dims', 'full'] From dba7fa7320454a1911f76056a732b0b4b509ac57 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Tue, 10 Jan 2023 13:32:17 -0700 Subject: [PATCH 08/90] Return a copy from max and min with axis=() --- array_api_compat/torch/_aliases.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index da69cb23..3e586b73 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -16,13 +16,13 @@ def max(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array: # https://github.com/pytorch/pytorch/issues/29137 if axis == (): - return x + return torch.clone(x) return torch.amax(x, axis, keepdims=keepdims) def min(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array: # https://github.com/pytorch/pytorch/issues/29137 if axis == (): - return x + return torch.clone(x) return torch.amin(x, axis, keepdims=keepdims) def _normalize_axes(axis, ndim): From 44d91e1bf8664ba99a9634f87e6fa2228ca593c8 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Tue, 10 Jan 2023 13:41:13 -0700 Subject: [PATCH 09/90] Add a size() helper function --- README.md | 7 +++++++ array_api_compat/common/_helpers.py | 11 ++++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index a49fcb5b..6814d992 100644 --- a/README.md +++ b/README.md @@ -90,6 +90,13 @@ the array API: [Stream](https://docs.cupy.dev/en/stable/reference/generated/cupy.cuda.Stream.html) objects. +- `size(x)`: Equivalent to + [`x.size`](https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.size.html#array_api.array.size), + i.e., the number of elements in the array. Included because PyTorch's + `Tensor` defines `size` as a method which returns the shape, and this cannot + be wrapped because this compat library doesn't wrap or extend the array + objects. + ## Known Differences from the Array API Specification There are some known differences between this library and the array API diff --git a/array_api_compat/common/_helpers.py b/array_api_compat/common/_helpers.py index 6b2a2734..69150b69 100644 --- a/array_api_compat/common/_helpers.py +++ b/array_api_compat/common/_helpers.py @@ -8,6 +8,7 @@ from __future__ import annotations import sys +import math def _is_numpy_array(x): # Avoid importing NumPy if it isn't already @@ -191,4 +192,12 @@ def to_device(x: "Array", device: "Device", /, *, stream: Optional[Union[int, An return _torch_to_device(x) return x.to_device(device, stream=stream) -__all__ = ['is_array_api_obj', 'get_namespace', 'device', 'to_device'] +def size(x): + """ + Return the total number of elements of x + """ + if None in x.shape: + return None + return math.prod(x.shape) + +__all__ = ['is_array_api_obj', 'get_namespace', 'device', 'to_device', 'size'] From 1faea7ba8e4b8e410816ac91fe4d58bafab75b19 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Tue, 10 Jan 2023 14:33:24 -0700 Subject: [PATCH 10/90] Add any and all torch wrappers and fix some issues with prod --- array_api_compat/torch/_aliases.py | 79 +++++++++++++++++++++++++----- 1 file changed, 68 insertions(+), 11 deletions(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index 3e586b73..1260abb3 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -13,6 +13,8 @@ # These wrappers are mostly based on the fact that pytorch uses 'dim' instead # of 'axis'. + +# torch.min and torch.max return a tuple and don't support multiple axes https://github.com/pytorch/pytorch/issues/58745 def max(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array: # https://github.com/pytorch/pytorch/issues/29137 if axis == (): @@ -40,29 +42,84 @@ def _normalize_axes(axis, ndim): axes.append(a) return sorted(axes) + +def _apply_keepdims(x, ndim, keepdims): + if keepdims: + return x[(None,)*ndim] + return x + def prod(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype: Optional[Dtype] = None, keepdims: bool = False) -> array: # torch.prod doesn't support multiple axes # (https://github.com/pytorch/pytorch/issues/56586). + ndim = x.ndim if isinstance(axis, tuple): axes = _normalize_axes(axis, x.ndim) - if keepdims: - for a in axes: - x = torch.prod(x, a, dtype=dtype, keepdims=keepdims) - return x - else: - for i, a in enumerate(axes): - x = torch.prod(x, a - i, dtype=dtype, keepdims=keepdims) - return x + for i, a in enumerate(axes): + if keepdims: + x = torch.prod(x, a, dtype=dtype) + x = torch.unsqueeze(x, a) + else: + x = torch.prod(x, a - i, dtype=dtype) + return x if axis is None: # torch doesn't support keepdims with axis=None # (https://github.com/pytorch/pytorch/issues/71209) res = torch.prod(x, dtype=dtype) - if keepdims: - res = res[(None,)*x.ndim] + res = _apply_keepdims(res, ndim, keepdims) return res return torch.prod(x, axis, dtype=dtype, keepdims=keepdims) +def any(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array: + # torch.any doesn't support multiple axes + # (https://github.com/pytorch/pytorch/issues/56586). + ndim = x.ndim + if axis == (): + return x.to(torch.bool) + if isinstance(axis, tuple): + axes = _normalize_axes(axis, x.ndim) + for i, a in enumerate(axes): + if keepdims: + x = torch.any(x, a) + x = torch.unsqueeze(x, a) + else: + x = torch.any(x, a - i) + return x.to(torch.bool) + if axis is None: + # torch doesn't support keepdims with axis=None + # (https://github.com/pytorch/pytorch/issues/71209) + res = torch.any(x) + res = _apply_keepdims(res, ndim, keepdims) + return res.to(torch.bool) + + # torch.any doesn't return bool for uint8 + return torch.any(x, axis, keepdims=keepdims).to(torch.bool) + +def all(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array: + # torch.all doesn't support multiple axes + # (https://github.com/pytorch/pytorch/issues/56586). + ndim = x.ndim + if axis == (): + return x.to(torch.bool) + if isinstance(axis, tuple): + axes = _normalize_axes(axis, x.ndim) + for i, a in enumerate(axes): + if keepdims: + x = torch.all(x, a) + x = torch.unsqueeze(x, a) + else: + x = torch.all(x, a - i) + return x.to(torch.bool) + if axis is None: + # torch doesn't support keepdims with axis=None + # (https://github.com/pytorch/pytorch/issues/71209) + res = torch.all(x) + res = _apply_keepdims(res, ndim, keepdims) + return res.to(torch.bool) + + # torch.all doesn't return bool for uint8 + return torch.all(x, axis, keepdims=keepdims).to(torch.bool) + def expand_dims(x: array, /, *, axis: int = 0) -> array: return torch.unsqueeze(x, axis) @@ -77,4 +134,4 @@ def full(shape: Union[int, Tuple[int, ...]], return torch.full(shape, fill_value, dtype=dtype, device=device, **kwargs) -__all__ = ['permute_dims', 'max', 'min', 'prod', 'expand_dims', 'full'] +__all__ = ['permute_dims', 'max', 'min', 'prod', 'any', 'all', 'expand_dims', 'full'] From ad4484dc57f2ebf5f558e4245fbf0ca544720bd7 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Tue, 10 Jan 2023 18:30:31 -0700 Subject: [PATCH 11/90] Add astype torch wrapper --- array_api_compat/torch/_aliases.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index 1260abb3..0e9eab6f 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -134,4 +134,8 @@ def full(shape: Union[int, Tuple[int, ...]], return torch.full(shape, fill_value, dtype=dtype, device=device, **kwargs) -__all__ = ['permute_dims', 'max', 'min', 'prod', 'any', 'all', 'expand_dims', 'full'] +def astype(x: array, dtype: Dtype, /, *, copy: bool = True) -> array: + return x.to(dtype, copy=copy) + +__all__ = ['permute_dims', 'max', 'min', 'prod', 'any', 'all', 'expand_dims', + 'full', 'astype'] From db3241da9e4926227d2d9f9f5e4ab4669e5fdbc7 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Tue, 10 Jan 2023 18:35:14 -0700 Subject: [PATCH 12/90] Cast the input to prod/all/any to tensor --- array_api_compat/torch/_aliases.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index 0e9eab6f..f83d6565 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -51,6 +51,7 @@ def _apply_keepdims(x, ndim, keepdims): def prod(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype: Optional[Dtype] = None, keepdims: bool = False) -> array: # torch.prod doesn't support multiple axes # (https://github.com/pytorch/pytorch/issues/56586). + x = torch.asarray(x) ndim = x.ndim if isinstance(axis, tuple): axes = _normalize_axes(axis, x.ndim) @@ -73,6 +74,7 @@ def prod(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, dty def any(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array: # torch.any doesn't support multiple axes # (https://github.com/pytorch/pytorch/issues/56586). + x = torch.asarray(x) ndim = x.ndim if axis == (): return x.to(torch.bool) @@ -98,6 +100,7 @@ def any(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keep def all(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array: # torch.all doesn't support multiple axes # (https://github.com/pytorch/pytorch/issues/56586). + x = torch.asarray(x) ndim = x.ndim if axis == (): return x.to(torch.bool) From 3f0d91397637bb43fca0ce4a93f25c9819b4d085 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Tue, 10 Jan 2023 18:35:43 -0700 Subject: [PATCH 13/90] More logical order for some functions --- array_api_compat/torch/_aliases.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index f83d6565..20075598 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -123,9 +123,9 @@ def all(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keep # torch.all doesn't return bool for uint8 return torch.all(x, axis, keepdims=keepdims).to(torch.bool) -def expand_dims(x: array, /, *, axis: int = 0) -> array: - return torch.unsqueeze(x, axis) +# torch.full does not accept an int size +# https://github.com/pytorch/pytorch/issues/70906 def full(shape: Union[int, Tuple[int, ...]], fill_value: Union[bool, int, float, complex], *, @@ -137,8 +137,12 @@ def full(shape: Union[int, Tuple[int, ...]], return torch.full(shape, fill_value, dtype=dtype, device=device, **kwargs) +# Functions that aren't in torch https://github.com/pytorch/pytorch/issues/58742 +def expand_dims(x: array, /, *, axis: int = 0) -> array: + return torch.unsqueeze(x, axis) + def astype(x: array, dtype: Dtype, /, *, copy: bool = True) -> array: return x.to(dtype, copy=copy) -__all__ = ['permute_dims', 'max', 'min', 'prod', 'any', 'all', 'expand_dims', - 'full', 'astype'] +__all__ = ['permute_dims', 'max', 'min', 'prod', 'any', 'all', 'full', + 'expand_dims', 'astype'] From 2d25dd26e85dca045823557ad35674f44ae69ec4 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Mon, 23 Jan 2023 21:19:21 -0700 Subject: [PATCH 14/90] Add wrappers for two-argument elementwise functions PyTorch doesn't do correct type promotion when one of the arguments is a scalar. We unfortunately can't do anything about this for the actual operators. --- array_api_compat/torch/_aliases.py | 136 ++++++++++++++++++++++++++++- 1 file changed, 134 insertions(+), 2 deletions(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index 20075598..13222e26 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -1,5 +1,7 @@ from __future__ import annotations +from functools import wraps + from typing import TYPE_CHECKING if TYPE_CHECKING: from typing import Optional, Tuple, Union @@ -8,9 +10,135 @@ import torch array = torch.Tensor +_array_api_dtypes = { + torch.bool, + torch.uint8, + torch.int8, + torch.int16, + torch.int32, + torch.int64, + torch.float32, + torch.float64, +} + +_promotion_table = { + # bool + (torch.bool, torch.bool): torch.bool, + # ints + (torch.int8, torch.int8): torch.int8, + (torch.int8, torch.int16): torch.int16, + (torch.int8, torch.int32): torch.int32, + (torch.int8, torch.int64): torch.int64, + (torch.int16, torch.int8): torch.int16, + (torch.int16, torch.int16): torch.int16, + (torch.int16, torch.int32): torch.int32, + (torch.int16, torch.int64): torch.int64, + (torch.int32, torch.int8): torch.int32, + (torch.int32, torch.int16): torch.int32, + (torch.int32, torch.int32): torch.int32, + (torch.int32, torch.int64): torch.int64, + (torch.int64, torch.int8): torch.int64, + (torch.int64, torch.int16): torch.int64, + (torch.int64, torch.int32): torch.int64, + (torch.int64, torch.int64): torch.int64, + # uints + (torch.uint8, torch.uint8): torch.uint8, + # ints and uints (mixed sign) + (torch.int8, torch.uint8): torch.int16, + (torch.int16, torch.uint8): torch.int16, + (torch.int32, torch.uint8): torch.int32, + (torch.int64, torch.uint8): torch.int64, + (torch.uint8, torch.int8): torch.int16, + (torch.uint8, torch.int16): torch.int16, + (torch.uint8, torch.int32): torch.int32, + (torch.uint8, torch.int64): torch.int64, + # floats + (torch.float32, torch.float32): torch.float32, + (torch.float32, torch.float64): torch.float64, + (torch.float64, torch.float32): torch.float64, + (torch.float64, torch.float64): torch.float64, +} + + +def _two_arg(f): + @wraps(f) + def _f(x1, x2, /, **kwargs): + x1, x2 = _fix_promotion(x1, x2) + return f(x1, x2, **kwargs) + if _f.__doc__ is None: + _f.__doc__ = f"""\ +Array API compatibility wrapper for torch.{f.__name__}. + +See the corresponding PyTorch documentation and/or the array API specification +for more details. + +""" + return _f + +def _fix_promotion(x1, x2): + if x1.dtype not in _array_api_dtypes or x2.dtype not in _array_api_dtypes: + return x1, x2 + # If an argument is 0-D pytorch downcasts the other argument + if x1.shape == (): + dtype = result_type(x1, x2) + x2 = x2.to(dtype) + if x2.shape == (): + dtype = result_type(x1, x2) + x1 = x1.to(dtype) + return x1, x2 + +def result_type(*arrays_and_dtypes: Union[array, Dtype]) -> Dtype: + if len(arrays_and_dtypes) == 0: + raise TypeError("At least one array or dtype must be provided") + if len(arrays_and_dtypes) == 1: + x = arrays_and_dtypes[0] + if isinstance(x, torch.dtype): + return x + return x.dtype + if len(arrays_and_dtypes) > 2: + return result_type(arrays_and_dtypes[0], result_type(*arrays_and_dtypes[1:])) + + x, y = arrays_and_dtypes + xdt = x.dtype if not isinstance(x, torch.dtype) else x + ydt = y.dtype if not isinstance(y, torch.dtype) else y + + if (xdt, ydt) in _promotion_table: + return _promotion_table[xdt, ydt] + + # This doesn't result_type(dtype, dtype) for non-array API dtypes + # because torch.result_type only accepts tensors. This does however, allow + # cross-kind promotion. + return torch.result_type(x, y) + # Basic renames permute_dims = torch.permute +# Two-arg elementwise functions +# These require a wrapper to do the correct type promotion on 0-D tensors +add = _two_arg(torch.add) +atan2 = _two_arg(torch.atan2) +bitwise_and = _two_arg(torch.bitwise_and) +bitwise_left_shift = _two_arg(torch.bitwise_left_shift) +bitwise_or = _two_arg(torch.bitwise_or) +bitwise_right_shift = _two_arg(torch.bitwise_right_shift) +bitwise_xor = _two_arg(torch.bitwise_xor) +divide = _two_arg(torch.divide) +# Also a rename. torch.equal does not broadcast +equal = _two_arg(torch.eq) +floor_divide = _two_arg(torch.floor_divide) +greater = _two_arg(torch.greater) +greater_equal = _two_arg(torch.greater_equal) +less = _two_arg(torch.less) +less_equal = _two_arg(torch.less_equal) +logaddexp = _two_arg(torch.logaddexp) +# logical functions are not included here because they only accept bool in the +# spec, so type promotion is irrelevant. +multiply = _two_arg(torch.multiply) +not_equal = _two_arg(torch.not_equal) +pow = _two_arg(torch.pow) +remainder = _two_arg(torch.remainder) +subtract = _two_arg(torch.subtract) + # These wrappers are mostly based on the fact that pytorch uses 'dim' instead # of 'axis'. @@ -144,5 +272,9 @@ def expand_dims(x: array, /, *, axis: int = 0) -> array: def astype(x: array, dtype: Dtype, /, *, copy: bool = True) -> array: return x.to(dtype, copy=copy) -__all__ = ['permute_dims', 'max', 'min', 'prod', 'any', 'all', 'full', - 'expand_dims', 'astype'] +__all__ = ['result_type', 'permute_dims', 'add', 'atan2', 'bitwise_and', + 'bitwise_left_shift', 'bitwise_or', 'bitwise_right_shift', + 'bitwise_xor', 'divide', 'equal', 'floor_divide', 'greater', + 'greater_equal', 'less', 'less_equal', 'logaddexp', 'multiply', + 'not_equal', 'pow', 'remainder', 'subtract', 'max', 'min', 'prod', + 'any', 'all', 'full', 'expand_dims', 'astype'] From c4c0cfa7f70a044655fe61504f66ad11a34ea534 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Tue, 24 Jan 2023 11:25:04 -0700 Subject: [PATCH 15/90] Add bitwise_invert to torch --- array_api_compat/torch/_aliases.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index 13222e26..0fefd596 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -112,6 +112,7 @@ def result_type(*arrays_and_dtypes: Union[array, Dtype]) -> Dtype: # Basic renames permute_dims = torch.permute +bitwise_invert = torch.bitwise_not # Two-arg elementwise functions # These require a wrapper to do the correct type promotion on 0-D tensors @@ -272,9 +273,10 @@ def expand_dims(x: array, /, *, axis: int = 0) -> array: def astype(x: array, dtype: Dtype, /, *, copy: bool = True) -> array: return x.to(dtype, copy=copy) -__all__ = ['result_type', 'permute_dims', 'add', 'atan2', 'bitwise_and', - 'bitwise_left_shift', 'bitwise_or', 'bitwise_right_shift', - 'bitwise_xor', 'divide', 'equal', 'floor_divide', 'greater', - 'greater_equal', 'less', 'less_equal', 'logaddexp', 'multiply', - 'not_equal', 'pow', 'remainder', 'subtract', 'max', 'min', 'prod', - 'any', 'all', 'full', 'expand_dims', 'astype'] +__all__ = ['result_type', 'permute_dims', 'bitwise_invert', 'add', 'atan2', + 'bitwise_and', 'bitwise_left_shift', 'bitwise_or', + 'bitwise_right_shift', 'bitwise_xor', 'divide', 'equal', + 'floor_divide', 'greater', 'greater_equal', 'less', 'less_equal', + 'logaddexp', 'multiply', 'not_equal', 'pow', 'remainder', + 'subtract', 'max', 'min', 'prod', 'any', 'all', 'full', + 'expand_dims', 'astype'] From a1917f8a20938a640f49d9fdf8c4ab02128b426f Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Tue, 24 Jan 2023 11:58:07 -0700 Subject: [PATCH 16/90] Add torch wrappers for broadcast_to and can_cast --- array_api_compat/torch/_aliases.py | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index 0fefd596..1cd75b84 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -4,8 +4,9 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: - from typing import Optional, Tuple, Union - from ..common._typing import Device, Dtype + from typing import List, Optional, Tuple, Union + from ..common._typing import Device + from torch import dtype as Dtype import torch array = torch.Tensor @@ -110,6 +111,11 @@ def result_type(*arrays_and_dtypes: Union[array, Dtype]) -> Dtype: # cross-kind promotion. return torch.result_type(x, y) +def can_cast(from_: Union[dtype, array], to: Dtype, /) -> bool: + if not isinstance(from_, torch.dtype): + from_ = from_.dtype + return torch.can_cast(from_, to) + # Basic renames permute_dims = torch.permute bitwise_invert = torch.bitwise_not @@ -273,10 +279,14 @@ def expand_dims(x: array, /, *, axis: int = 0) -> array: def astype(x: array, dtype: Dtype, /, *, copy: bool = True) -> array: return x.to(dtype, copy=copy) -__all__ = ['result_type', 'permute_dims', 'bitwise_invert', 'add', 'atan2', - 'bitwise_and', 'bitwise_left_shift', 'bitwise_or', +def broadcast_arrays(*arrays: array) -> List[array]: + shape = torch.broadcast_shapes(*[a.shape for a in arrays]) + return [torch.broadcast_to(a, shape) for a in arrays] + +__all__ = ['result_type', 'can_cast', 'permute_dims', 'bitwise_invert', 'add', + 'atan2', 'bitwise_and', 'bitwise_left_shift', 'bitwise_or', 'bitwise_right_shift', 'bitwise_xor', 'divide', 'equal', 'floor_divide', 'greater', 'greater_equal', 'less', 'less_equal', 'logaddexp', 'multiply', 'not_equal', 'pow', 'remainder', 'subtract', 'max', 'min', 'prod', 'any', 'all', 'full', - 'expand_dims', 'astype'] + 'expand_dims', 'astype', 'broadcast_arrays'] From ae28ce0e4b471358a36e6fa4e1c44575772f1521 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Tue, 24 Jan 2023 12:26:09 -0700 Subject: [PATCH 17/90] Add torch arange wrapper --- array_api_compat/torch/_aliases.py | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index 1cd75b84..24744e28 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -1,6 +1,7 @@ from __future__ import annotations from functools import wraps +from builtins import all as builtin_all from typing import TYPE_CHECKING if TYPE_CHECKING: @@ -258,6 +259,28 @@ def all(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keep # torch.all doesn't return bool for uint8 return torch.all(x, axis, keepdims=keepdims).to(torch.bool) +# torch.arange doesn't support returning empty arrays +# (https://github.com/pytorch/pytorch/issues/70915), and doesn't support some +# keyword argument combinations +# (https://github.com/pytorch/pytorch/issues/70914) +def arange(start: Union[int, float], + /, + stop: Optional[Union[int, float]] = None, + step: Union[int, float] = 1, + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs) -> array: + if stop is None: + start, stop = 0, start + if step > 0 and stop <= start or step < 0 and stop >= start: + if dtype is None: + if builtin_all(isinstance(i, int) for i in [start, stop, step]): + dtype = torch.int64 + else: + dtype = torch.float32 + return torch.empty(0, dtype=dtype, device=device, **kwargs) + return torch.arange(start, stop, step, dtype=dtype, device=device, **kwargs) # torch.full does not accept an int size # https://github.com/pytorch/pytorch/issues/70906 @@ -288,5 +311,5 @@ def broadcast_arrays(*arrays: array) -> List[array]: 'bitwise_right_shift', 'bitwise_xor', 'divide', 'equal', 'floor_divide', 'greater', 'greater_equal', 'less', 'less_equal', 'logaddexp', 'multiply', 'not_equal', 'pow', 'remainder', - 'subtract', 'max', 'min', 'prod', 'any', 'all', 'full', + 'subtract', 'max', 'min', 'prod', 'any', 'all', 'arange', 'full', 'expand_dims', 'astype', 'broadcast_arrays'] From c3d933491ebc2ce395e4bcede327ffd339324d71 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Tue, 24 Jan 2023 13:48:43 -0700 Subject: [PATCH 18/90] Add a wrapper for torch.eye --- array_api_compat/torch/_aliases.py | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index 24744e28..984ab648 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -282,6 +282,23 @@ def arange(start: Union[int, float], return torch.empty(0, dtype=dtype, device=device, **kwargs) return torch.arange(start, stop, step, dtype=dtype, device=device, **kwargs) +# torch.eye does not accept None as a default for the second argument and +# doesn't support off-diagonals (https://github.com/pytorch/pytorch/issues/70910) +def eye(n_rows: int, + n_cols: Optional[int] = None, + /, + *, + k: int = 0, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs) -> array: + if n_cols is None: + n_cols = n_rows + z = torch.zeros(n_rows, n_cols, dtype=dtype, device=device, **kwargs) + if abs(k) <= n_rows + n_cols: + z.diagonal(k).fill_(1) + return z + # torch.full does not accept an int size # https://github.com/pytorch/pytorch/issues/70906 def full(shape: Union[int, Tuple[int, ...]], @@ -311,5 +328,5 @@ def broadcast_arrays(*arrays: array) -> List[array]: 'bitwise_right_shift', 'bitwise_xor', 'divide', 'equal', 'floor_divide', 'greater', 'greater_equal', 'less', 'less_equal', 'logaddexp', 'multiply', 'not_equal', 'pow', 'remainder', - 'subtract', 'max', 'min', 'prod', 'any', 'all', 'arange', 'full', - 'expand_dims', 'astype', 'broadcast_arrays'] + 'subtract', 'max', 'min', 'prod', 'any', 'all', 'arange', 'eye', + 'full', 'expand_dims', 'astype', 'broadcast_arrays'] From f4d6df1bdba8a5ea4350dd5a87374751cbade2bb Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Tue, 24 Jan 2023 14:03:03 -0700 Subject: [PATCH 19/90] Add pytorch linspace wrapper --- array_api_compat/torch/_aliases.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index 984ab648..b8576f20 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -299,6 +299,20 @@ def eye(n_rows: int, z.diagonal(k).fill_(1) return z +# torch.linspace doesn't have the endpoint parameter +def linspace(start: Union[int, float], + stop: Union[int, float], + /, + num: int, + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + endpoint: bool = True, + **kwargs) -> array: + if not endpoint: + return torch.linspace(start, stop, num+1, dtype=dtype, device=device, **kwargs)[:-1] + return torch.linspace(start, stop, num, dtype=dtype, device=device, **kwargs) + # torch.full does not accept an int size # https://github.com/pytorch/pytorch/issues/70906 def full(shape: Union[int, Tuple[int, ...]], @@ -329,4 +343,4 @@ def broadcast_arrays(*arrays: array) -> List[array]: 'floor_divide', 'greater', 'greater_equal', 'less', 'less_equal', 'logaddexp', 'multiply', 'not_equal', 'pow', 'remainder', 'subtract', 'max', 'min', 'prod', 'any', 'all', 'arange', 'eye', - 'full', 'expand_dims', 'astype', 'broadcast_arrays'] + 'linspace', 'full', 'expand_dims', 'astype', 'broadcast_arrays'] From 14b651918a77003df81f3324fec2d68cf2b747d1 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Tue, 24 Jan 2023 14:29:55 -0700 Subject: [PATCH 20/90] Add torch squeeze wrapper --- array_api_compat/torch/_aliases.py | 33 ++++++++++++++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index b8576f20..2941ea91 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -259,6 +259,34 @@ def all(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keep # torch.all doesn't return bool for uint8 return torch.all(x, axis, keepdims=keepdims).to(torch.bool) +# torch.concat doesn't support dim=None +# https://github.com/pytorch/pytorch/issues/70925 +def concat(arrays: Union[Tuple[array, ...], List[array]], + /, + *, + axis: Optional[int] = 0, + **kwargs) -> array: + if axis is None: + arrays = tuple(ar.ravel() for ar in arrays) + axis = 0 + return torch.concat(arrays, axis, **kwargs) + +# torch.squeeze only accepts int dim and doesn't require it +# https://github.com/pytorch/pytorch/issues/70924. Support for tuple dim was +# added at https://github.com/pytorch/pytorch/pull/89017. +def squeeze(x: array, /, axis: Union[int, Tuple[int, ...]]) -> array: + if isinstance(axis, int): + axis = (axis,) + for a in axis: + if x.shape[a] != 1: + raise ValueError("squeezed dimensions must be equal to 1") + axes = _normalize_axes(axis, x.ndim) + # Remove this once pytorch 1.14 is released with the above PR #89017. + sequence = [a - i for i, a in enumerate(axes)] + for a in sequence: + x = torch.squeeze(x, a) + return x + # torch.arange doesn't support returning empty arrays # (https://github.com/pytorch/pytorch/issues/70915), and doesn't support some # keyword argument combinations @@ -342,5 +370,6 @@ def broadcast_arrays(*arrays: array) -> List[array]: 'bitwise_right_shift', 'bitwise_xor', 'divide', 'equal', 'floor_divide', 'greater', 'greater_equal', 'less', 'less_equal', 'logaddexp', 'multiply', 'not_equal', 'pow', 'remainder', - 'subtract', 'max', 'min', 'prod', 'any', 'all', 'arange', 'eye', - 'linspace', 'full', 'expand_dims', 'astype', 'broadcast_arrays'] + 'subtract', 'max', 'min', 'prod', 'any', 'all', 'concat', + 'squeeze', 'arange', 'eye', 'linspace', 'full', 'expand_dims', + 'astype', 'broadcast_arrays'] From db3f579f2ee03b076721f426b9dcb8e55a4693ee Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Tue, 24 Jan 2023 14:43:27 -0700 Subject: [PATCH 21/90] Add torch flip and roll wrappers --- array_api_compat/torch/_aliases.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index 2941ea91..ce6ea2b3 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -287,6 +287,19 @@ def squeeze(x: array, /, axis: Union[int, Tuple[int, ...]]) -> array: x = torch.squeeze(x, a) return x +# The axis parameter doesn't work for flip() and roll() +# https://github.com/pytorch/pytorch/issues/71210. Also torch.flip() doesn't +# accept axis=None +def flip(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None) -> array: + if axis is None: + axis = tuple(range(x.ndim-1, -1, -1)) + # torch.flip doesn't accept dim as an int but the method does + # https://github.com/pytorch/pytorch/issues/18095 + return x.flip(axis) + +def roll(x: array, /, shift: Union[int, Tuple[int, ...]], *, axis: Optional[Union[int, Tuple[int, ...]]] = None) -> array: + return torch.roll(x, shift, axis) + # torch.arange doesn't support returning empty arrays # (https://github.com/pytorch/pytorch/issues/70915), and doesn't support some # keyword argument combinations @@ -371,5 +384,5 @@ def broadcast_arrays(*arrays: array) -> List[array]: 'floor_divide', 'greater', 'greater_equal', 'less', 'less_equal', 'logaddexp', 'multiply', 'not_equal', 'pow', 'remainder', 'subtract', 'max', 'min', 'prod', 'any', 'all', 'concat', - 'squeeze', 'arange', 'eye', 'linspace', 'full', 'expand_dims', - 'astype', 'broadcast_arrays'] + 'squeeze', 'flip', 'roll', 'arange', 'eye', 'linspace', 'full', + 'expand_dims', 'astype', 'broadcast_arrays'] From 27b7e8caa5ee1783a23adbf79ea00c089a3bb67c Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Tue, 24 Jan 2023 14:47:22 -0700 Subject: [PATCH 22/90] Add a torch wrapper for nonzero --- array_api_compat/torch/_aliases.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index ce6ea2b3..535ffaa5 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -300,6 +300,9 @@ def flip(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None) -> def roll(x: array, /, shift: Union[int, Tuple[int, ...]], *, axis: Optional[Union[int, Tuple[int, ...]]] = None) -> array: return torch.roll(x, shift, axis) +def nonzero(x: array, /, **kwargs) -> Tuple[array, ...]: + return torch.nonzero(x, as_tuple=True, **kwargs) + # torch.arange doesn't support returning empty arrays # (https://github.com/pytorch/pytorch/issues/70915), and doesn't support some # keyword argument combinations @@ -384,5 +387,5 @@ def broadcast_arrays(*arrays: array) -> List[array]: 'floor_divide', 'greater', 'greater_equal', 'less', 'less_equal', 'logaddexp', 'multiply', 'not_equal', 'pow', 'remainder', 'subtract', 'max', 'min', 'prod', 'any', 'all', 'concat', - 'squeeze', 'flip', 'roll', 'arange', 'eye', 'linspace', 'full', - 'expand_dims', 'astype', 'broadcast_arrays'] + 'squeeze', 'flip', 'roll', 'nonzero', 'arange', 'eye', 'linspace', + 'full', 'expand_dims', 'astype', 'broadcast_arrays'] From e8866449b6d584cf5c0e099ed963a676caf38d6e Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Tue, 24 Jan 2023 14:49:25 -0700 Subject: [PATCH 23/90] Fix pyflakes warning --- array_api_compat/torch/_aliases.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index 535ffaa5..f5e909cc 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -112,7 +112,7 @@ def result_type(*arrays_and_dtypes: Union[array, Dtype]) -> Dtype: # cross-kind promotion. return torch.result_type(x, y) -def can_cast(from_: Union[dtype, array], to: Dtype, /) -> bool: +def can_cast(from_: Union[Dtype, array], to: Dtype, /) -> bool: if not isinstance(from_, torch.dtype): from_ = from_.dtype return torch.can_cast(from_, to) From fd5d1793ac20272f12669b8e16db329d17bb5da2 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Tue, 24 Jan 2023 14:50:36 -0700 Subject: [PATCH 24/90] Add torch wrapper for where --- array_api_compat/torch/_aliases.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index f5e909cc..180332e9 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -303,6 +303,10 @@ def roll(x: array, /, shift: Union[int, Tuple[int, ...]], *, axis: Optional[Unio def nonzero(x: array, /, **kwargs) -> Tuple[array, ...]: return torch.nonzero(x, as_tuple=True, **kwargs) +def where(condition: array, x1: array, x2: array, /) -> array: + x1, x2 = _fix_promotion(x1, x2) + return torch.where(condition, x1, x2) + # torch.arange doesn't support returning empty arrays # (https://github.com/pytorch/pytorch/issues/70915), and doesn't support some # keyword argument combinations @@ -387,5 +391,5 @@ def broadcast_arrays(*arrays: array) -> List[array]: 'floor_divide', 'greater', 'greater_equal', 'less', 'less_equal', 'logaddexp', 'multiply', 'not_equal', 'pow', 'remainder', 'subtract', 'max', 'min', 'prod', 'any', 'all', 'concat', - 'squeeze', 'flip', 'roll', 'nonzero', 'arange', 'eye', 'linspace', - 'full', 'expand_dims', 'astype', 'broadcast_arrays'] + 'squeeze', 'flip', 'roll', 'nonzero', 'where', 'arange', 'eye', + 'linspace', 'full', 'expand_dims', 'astype', 'broadcast_arrays'] From a5f3253e5b9a3aee445af31c3fa6db2717e0defc Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Wed, 1 Feb 2023 17:40:32 -0700 Subject: [PATCH 25/90] Add sort wrapper to torch --- array_api_compat/torch/_aliases.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index 180332e9..a3b04328 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -163,6 +163,11 @@ def min(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keep return torch.clone(x) return torch.amin(x, axis, keepdims=keepdims) +# torch.sort also returns a tuple +# https://github.com/pytorch/pytorch/issues/70921 +def sort(x: array, /, *, axis: int = -1, descending: bool = False, stable: bool = True, **kwargs) -> array: + return torch.sort(x, dim=axis, descending=descending, stable=stable, **kwargs).values + def _normalize_axes(axis, ndim): axes = [] lower, upper = -ndim, ndim - 1 @@ -390,6 +395,6 @@ def broadcast_arrays(*arrays: array) -> List[array]: 'bitwise_right_shift', 'bitwise_xor', 'divide', 'equal', 'floor_divide', 'greater', 'greater_equal', 'less', 'less_equal', 'logaddexp', 'multiply', 'not_equal', 'pow', 'remainder', - 'subtract', 'max', 'min', 'prod', 'any', 'all', 'concat', + 'subtract', 'max', 'min', 'sort', 'prod', 'any', 'all', 'concat', 'squeeze', 'flip', 'roll', 'nonzero', 'where', 'arange', 'eye', 'linspace', 'full', 'expand_dims', 'astype', 'broadcast_arrays'] From 4a71e63a5aaf3290356721a84b5a8092e0d30834 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Wed, 1 Feb 2023 17:41:22 -0700 Subject: [PATCH 26/90] Pass kwargs through some torch wrappers --- array_api_compat/torch/_aliases.py | 44 ++++++++++++++++++++---------- 1 file changed, 30 insertions(+), 14 deletions(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index a3b04328..c2861e87 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -189,7 +189,13 @@ def _apply_keepdims(x, ndim, keepdims): return x[(None,)*ndim] return x -def prod(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype: Optional[Dtype] = None, keepdims: bool = False) -> array: +def prod(x: array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + dtype: Optional[Dtype] = None, + keepdims: bool = False, + **kwargs) -> array: # torch.prod doesn't support multiple axes # (https://github.com/pytorch/pytorch/issues/56586). x = torch.asarray(x) @@ -198,21 +204,26 @@ def prod(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, dty axes = _normalize_axes(axis, x.ndim) for i, a in enumerate(axes): if keepdims: - x = torch.prod(x, a, dtype=dtype) + x = torch.prod(x, a, dtype=dtype, **kwargs) x = torch.unsqueeze(x, a) else: - x = torch.prod(x, a - i, dtype=dtype) + x = torch.prod(x, a - i, dtype=dtype, **kwargs) return x if axis is None: # torch doesn't support keepdims with axis=None # (https://github.com/pytorch/pytorch/issues/71209) - res = torch.prod(x, dtype=dtype) + res = torch.prod(x, dtype=dtype, **kwargs) res = _apply_keepdims(res, ndim, keepdims) return res - return torch.prod(x, axis, dtype=dtype, keepdims=keepdims) + return torch.prod(x, axis, dtype=dtype, keepdims=keepdims, **kwargs) -def any(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array: +def any(x: array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + keepdims: bool = False, + **kwargs) -> array: # torch.any doesn't support multiple axes # (https://github.com/pytorch/pytorch/issues/56586). x = torch.asarray(x) @@ -223,22 +234,27 @@ def any(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keep axes = _normalize_axes(axis, x.ndim) for i, a in enumerate(axes): if keepdims: - x = torch.any(x, a) + x = torch.any(x, a, **kwargs) x = torch.unsqueeze(x, a) else: - x = torch.any(x, a - i) + x = torch.any(x, a - i, **kwargs) return x.to(torch.bool) if axis is None: # torch doesn't support keepdims with axis=None # (https://github.com/pytorch/pytorch/issues/71209) - res = torch.any(x) + res = torch.any(x, **kwargs) res = _apply_keepdims(res, ndim, keepdims) return res.to(torch.bool) # torch.any doesn't return bool for uint8 return torch.any(x, axis, keepdims=keepdims).to(torch.bool) -def all(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array: +def all(x: array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + keepdims: bool = False, + **kwargs) -> array: # torch.all doesn't support multiple axes # (https://github.com/pytorch/pytorch/issues/56586). x = torch.asarray(x) @@ -246,18 +262,18 @@ def all(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keep if axis == (): return x.to(torch.bool) if isinstance(axis, tuple): - axes = _normalize_axes(axis, x.ndim) + axes = _normalize_axes(axis, ndim) for i, a in enumerate(axes): if keepdims: - x = torch.all(x, a) + x = torch.all(x, a, **kwargs) x = torch.unsqueeze(x, a) else: - x = torch.all(x, a - i) + x = torch.all(x, a - i, **kwargs) return x.to(torch.bool) if axis is None: # torch doesn't support keepdims with axis=None # (https://github.com/pytorch/pytorch/issues/71209) - res = torch.all(x) + res = torch.all(x, **kwargs) res = _apply_keepdims(res, ndim, keepdims) return res.to(torch.bool) From 157fc1eb1ef229ca330c42a5a8ceb1f65c9adfab Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Wed, 1 Feb 2023 18:13:30 -0700 Subject: [PATCH 27/90] Add torch mean(), std(), and var() wrappers --- array_api_compat/torch/_aliases.py | 78 ++++++++++++++++++++++++++++-- 1 file changed, 75 insertions(+), 3 deletions(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index c2861e87..30c88408 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -280,6 +280,77 @@ def all(x: array, # torch.all doesn't return bool for uint8 return torch.all(x, axis, keepdims=keepdims).to(torch.bool) +def mean(x: array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + keepdims: bool = False, + **kwargs) -> array: + # https://github.com/pytorch/pytorch/issues/29137 + if axis == (): + return torch.clone(x) + if axis is None: + # torch doesn't support keepdims with axis=None + # (https://github.com/pytorch/pytorch/issues/71209) + res = torch.mean(x, **kwargs) + res = _apply_keepdims(res, x.ndim, keepdims) + return res + return torch.mean(x, axis, keepdims=keepdims, **kwargs) + +def std(x: array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + correction: Union[int, float] = 0.0, + keepdims: bool = False, + **kwargs) -> array: + # Note, float correction is not supported + # https://github.com/pytorch/pytorch/issues/61492. We don't try to + # implement it here for now. + + # if isinstance(correction, float): + # correction = int(correction) + + # https://github.com/pytorch/pytorch/issues/29137 + if axis == (): + return torch.zeros_like(x) + if isinstance(axis, int): + axis = (axis,) + if axis is None: + # torch doesn't support keepdims with axis=None + # (https://github.com/pytorch/pytorch/issues/71209) + res = torch.std(x, tuple(range(x.ndim)), correction=correction, **kwargs) + res = _apply_keepdims(res, x.ndim, keepdims) + return res + return torch.std(x, axis, correction=correction, keepdims=keepdims, **kwargs) + +def var(x: array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + correction: Union[int, float] = 0.0, + keepdims: bool = False, + **kwargs) -> array: + # Note, float correction is not supported + # https://github.com/pytorch/pytorch/issues/61492. We don't try to + # implement it here for now. + + # if isinstance(correction, float): + # correction = int(correction) + + # https://github.com/pytorch/pytorch/issues/29137 + if axis == (): + return torch.zeros_like(x) + if isinstance(axis, int): + axis = (axis,) + if axis is None: + # torch doesn't support keepdims with axis=None + # (https://github.com/pytorch/pytorch/issues/71209) + res = torch.var(x, tuple(range(x.ndim)), correction=correction, **kwargs) + res = _apply_keepdims(res, x.ndim, keepdims) + return res + return torch.var(x, axis, correction=correction, keepdims=keepdims, **kwargs) + # torch.concat doesn't support dim=None # https://github.com/pytorch/pytorch/issues/70925 def concat(arrays: Union[Tuple[array, ...], List[array]], @@ -411,6 +482,7 @@ def broadcast_arrays(*arrays: array) -> List[array]: 'bitwise_right_shift', 'bitwise_xor', 'divide', 'equal', 'floor_divide', 'greater', 'greater_equal', 'less', 'less_equal', 'logaddexp', 'multiply', 'not_equal', 'pow', 'remainder', - 'subtract', 'max', 'min', 'sort', 'prod', 'any', 'all', 'concat', - 'squeeze', 'flip', 'roll', 'nonzero', 'where', 'arange', 'eye', - 'linspace', 'full', 'expand_dims', 'astype', 'broadcast_arrays'] + 'subtract', 'max', 'min', 'sort', 'prod', 'any', 'all', 'mean', + 'std', 'var', 'concat', 'squeeze', 'flip', 'roll', 'nonzero', 'where', + 'arange', 'eye', 'linspace', 'full', 'expand_dims', 'astype', + 'broadcast_arrays'] From c3efe6ae4d46ba881089687e7ec0ec1ed757b103 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Thu, 2 Feb 2023 17:51:25 -0700 Subject: [PATCH 28/90] Add torch sum() and prod() wrappers --- array_api_compat/torch/_aliases.py | 53 ++++++++++++++++++++++++++---- 1 file changed, 47 insertions(+), 6 deletions(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index 30c88408..08df1c66 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -196,10 +196,21 @@ def prod(x: array, dtype: Optional[Dtype] = None, keepdims: bool = False, **kwargs) -> array: - # torch.prod doesn't support multiple axes - # (https://github.com/pytorch/pytorch/issues/56586). x = torch.asarray(x) ndim = x.ndim + + # https://github.com/pytorch/pytorch/issues/29137. Separate from the logic + # below because it still needs to upcast. + if axis == (): + if dtype is None: + if x.dtype in [torch.int8, torch.int16, torch.int32]: + return x.to(torch.int64) + # we can't upcast uint8 because there is no torch.uint64 + return x.clone() + return x.to(dtype) + + # torch.prod doesn't support multiple axes + # (https://github.com/pytorch/pytorch/issues/56586). if isinstance(axis, tuple): axes = _normalize_axes(axis, x.ndim) for i, a in enumerate(axes): @@ -218,6 +229,36 @@ def prod(x: array, return torch.prod(x, axis, dtype=dtype, keepdims=keepdims, **kwargs) + +def sum(x: array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + dtype: Optional[Dtype] = None, + keepdims: bool = False, + **kwargs) -> array: + x = torch.asarray(x) + ndim = x.ndim + + # https://github.com/pytorch/pytorch/issues/29137. + # Make sure it upcasts. + if axis == (): + if dtype is None: + if x.dtype in [torch.int8, torch.int16, torch.int32]: + return x.to(torch.int64) + # we can't upcast uint8 because there is no torch.uint64 + return x.clone() + return x.to(dtype) + + if axis is None: + # torch doesn't support keepdims with axis=None + # (https://github.com/pytorch/pytorch/issues/71209) + res = torch.sum(x, dtype=dtype, **kwargs) + res = _apply_keepdims(res, ndim, keepdims) + return res + + return torch.sum(x, axis, dtype=dtype, keepdims=keepdims, **kwargs) + def any(x: array, /, *, @@ -482,7 +523,7 @@ def broadcast_arrays(*arrays: array) -> List[array]: 'bitwise_right_shift', 'bitwise_xor', 'divide', 'equal', 'floor_divide', 'greater', 'greater_equal', 'less', 'less_equal', 'logaddexp', 'multiply', 'not_equal', 'pow', 'remainder', - 'subtract', 'max', 'min', 'sort', 'prod', 'any', 'all', 'mean', - 'std', 'var', 'concat', 'squeeze', 'flip', 'roll', 'nonzero', 'where', - 'arange', 'eye', 'linspace', 'full', 'expand_dims', 'astype', - 'broadcast_arrays'] + 'subtract', 'max', 'min', 'sort', 'prod', 'sum', 'any', 'all', + 'mean', 'std', 'var', 'concat', 'squeeze', 'flip', 'roll', + 'nonzero', 'where', 'arange', 'eye', 'linspace', 'full', + 'expand_dims', 'astype', 'broadcast_arrays'] From ed46247f82accfaa02b6bd02f93ee9287e4ff128 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Fri, 3 Feb 2023 18:27:17 -0700 Subject: [PATCH 29/90] Add unique_* wrappers to torch --- array_api_compat/torch/_aliases.py | 37 +++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index 08df1c66..1ff58e0d 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -3,6 +3,8 @@ from functools import wraps from builtins import all as builtin_all +from ..common._aliases import (UniqueAllResult, UniqueCountsResult, UniqueInverseResult) + from typing import TYPE_CHECKING if TYPE_CHECKING: from typing import List, Optional, Tuple, Union @@ -518,6 +520,38 @@ def broadcast_arrays(*arrays: array) -> List[array]: shape = torch.broadcast_shapes(*[a.shape for a in arrays]) return [torch.broadcast_to(a, shape) for a in arrays] +# https://github.com/pytorch/pytorch/issues/70920 +def unique_all(x: array) -> UniqueAllResult: + values, inverse_indices, counts = torch.unique(x, return_counts=True, return_inverse=True) + # torch.unique doesn't support returning indices. We use this workaround + # suggested at + # https://github.com/pytorch/pytorch/issues/36748#issuecomment-619514810. + # Note that this makes use of an undocumented behavior of scatter, that it + # returns elements in order. + perm = torch.arange(inverse_indices.numel() - 1, -1, -1, dtype=inverse_indices.dtype, device=inverse_indices.device) + _inverse = inverse_indices.reshape((inverse_indices.numel(),)).flip([0]) + indices = torch.zeros((values.numel(),), dtype=_inverse.dtype).scatter_(0, _inverse, perm) + + # torch.unique incorrectly gives a 0 count for nan values. + # https://github.com/pytorch/pytorch/issues/94106 + counts[torch.isnan(values)] = 1 + return UniqueAllResult(values, indices, inverse_indices, counts) + +def unique_counts(x: array) -> UniqueCountsResult: + values, counts = torch.unique(x, return_counts=True) + + # torch.unique incorrectly gives a 0 count for nan values. + # https://github.com/pytorch/pytorch/issues/94106 + counts[torch.isnan(values)] = 1 + return UniqueCountsResult(values, counts) + +def unique_inverse(x: array) -> UniqueInverseResult: + values, inverse = torch.unique(x, return_inverse=True) + return UniqueInverseResult(values, inverse) + +def unique_values(x: array) -> array: + return torch.unique(x) + __all__ = ['result_type', 'can_cast', 'permute_dims', 'bitwise_invert', 'add', 'atan2', 'bitwise_and', 'bitwise_left_shift', 'bitwise_or', 'bitwise_right_shift', 'bitwise_xor', 'divide', 'equal', @@ -526,4 +560,5 @@ def broadcast_arrays(*arrays: array) -> List[array]: 'subtract', 'max', 'min', 'sort', 'prod', 'sum', 'any', 'all', 'mean', 'std', 'var', 'concat', 'squeeze', 'flip', 'roll', 'nonzero', 'where', 'arange', 'eye', 'linspace', 'full', - 'expand_dims', 'astype', 'broadcast_arrays'] + 'expand_dims', 'astype', 'broadcast_arrays', 'unique_all', + 'unique_counts', 'unique_inverse', 'unique_values'] From eaf53587b2fa969170e050d3b195e7321b93aad5 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Fri, 3 Feb 2023 19:18:46 -0700 Subject: [PATCH 30/90] Just raise NotImplementedError in pytorch unique_all() --- array_api_compat/torch/_aliases.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index 1ff58e0d..08c141f9 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -523,15 +523,11 @@ def broadcast_arrays(*arrays: array) -> List[array]: # https://github.com/pytorch/pytorch/issues/70920 def unique_all(x: array) -> UniqueAllResult: values, inverse_indices, counts = torch.unique(x, return_counts=True, return_inverse=True) - # torch.unique doesn't support returning indices. We use this workaround - # suggested at - # https://github.com/pytorch/pytorch/issues/36748#issuecomment-619514810. - # Note that this makes use of an undocumented behavior of scatter, that it - # returns elements in order. - perm = torch.arange(inverse_indices.numel() - 1, -1, -1, dtype=inverse_indices.dtype, device=inverse_indices.device) - _inverse = inverse_indices.reshape((inverse_indices.numel(),)).flip([0]) - indices = torch.zeros((values.numel(),), dtype=_inverse.dtype).scatter_(0, _inverse, perm) - + # torch.unique doesn't support returning indices. + # https://github.com/pytorch/pytorch/issues/36748. The workaround + # suggested in that issue doesn't actually function correctly (it relies + # on non-deterministic behavior of scatter()). + raise NotImplementedError("unique_all() not yet implemented for pytorch (see https://github.com/pytorch/pytorch/issues/36748)") # torch.unique incorrectly gives a 0 count for nan values. # https://github.com/pytorch/pytorch/issues/94106 counts[torch.isnan(values)] = 1 From cd25f472e3dd5fa6a56f08ebb5ac69bea90fe975 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Thu, 9 Feb 2023 17:32:40 -0700 Subject: [PATCH 31/90] Fix to_device for pytorch tensors --- array_api_compat/common/_helpers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/array_api_compat/common/_helpers.py b/array_api_compat/common/_helpers.py index 69150b69..6a4a43fd 100644 --- a/array_api_compat/common/_helpers.py +++ b/array_api_compat/common/_helpers.py @@ -158,7 +158,7 @@ def _torch_to_device(x, device, /, stream=None): raise NotImplementedError return x.to(device) -def to_device(x: "Array", device: "Device", /, *, stream: Optional[Union[int, Any]] = None) -> "Array": +def to_device(x: "Array", device: "Device", /, *, stream: "Optional[Union[int, Any]]" = None) -> "Array": """ Copy the array from the device on which it currently resides to the specified ``device``. @@ -189,7 +189,7 @@ def to_device(x: "Array", device: "Device", /, *, stream: Optional[Union[int, An # cupy does not yet have to_device return _cupy_to_device(x, device, stream=stream) elif _is_torch_array(x): - return _torch_to_device(x) + return _torch_to_device(x, device, stream=stream) return x.to_device(device, stream=stream) def size(x): From 98ed0b2647f0ed542667d578062701d9b8d0e026 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Thu, 9 Feb 2023 17:32:51 -0700 Subject: [PATCH 32/90] Restrict the names imported from torch into the compat submodule --- array_api_compat/torch/__init__.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/array_api_compat/torch/__init__.py b/array_api_compat/torch/__init__.py index bdbc6b39..7dfdf482 100644 --- a/array_api_compat/torch/__init__.py +++ b/array_api_compat/torch/__init__.py @@ -3,8 +3,13 @@ # Several names are not included in the above import * import torch for n in dir(torch): - if not n.startswith('_'): - exec(n + ' = torch.' + n) + if (n.startswith('_') + or n.endswith('_') + or 'cuda' in n + or 'cpu' in n + or 'backward' in n): + continue + exec(n + ' = torch.' + n) # These imports may overwrite names from the import * above. from ._aliases import * From be4b5343a4708021236af1499c5bd6d4e9487949 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Thu, 9 Feb 2023 17:33:24 -0700 Subject: [PATCH 33/90] Allow torch sum and prod to upcast uint8 to int64 --- array_api_compat/torch/_aliases.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index 08c141f9..ebcf9d5b 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -205,9 +205,11 @@ def prod(x: array, # below because it still needs to upcast. if axis == (): if dtype is None: - if x.dtype in [torch.int8, torch.int16, torch.int32]: + # We can't upcast uint8 according to the spec because there is no + # torch.uint64, so at least upcast to int64 which is what sum does + # when axis=None. + if x.dtype in [torch.int8, torch.int16, torch.int32, torch.uint8]: return x.to(torch.int64) - # we can't upcast uint8 because there is no torch.uint64 return x.clone() return x.to(dtype) @@ -246,9 +248,11 @@ def sum(x: array, # Make sure it upcasts. if axis == (): if dtype is None: - if x.dtype in [torch.int8, torch.int16, torch.int32]: + # We can't upcast uint8 according to the spec because there is no + # torch.uint64, so at least upcast to int64 which is what sum does + # when axis=None. + if x.dtype in [torch.int8, torch.int16, torch.int32, torch.uint8]: return x.to(torch.int64) - # we can't upcast uint8 because there is no torch.uint64 return x.clone() return x.to(dtype) From ecda0175bfd3462af3ef941ebbe0d62d4da7a6a5 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Thu, 9 Feb 2023 17:33:37 -0700 Subject: [PATCH 34/90] Don't unnecessarily flip the axes in flip() --- array_api_compat/torch/_aliases.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index ebcf9d5b..0b249aea 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -431,7 +431,7 @@ def squeeze(x: array, /, axis: Union[int, Tuple[int, ...]]) -> array: # accept axis=None def flip(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None) -> array: if axis is None: - axis = tuple(range(x.ndim-1, -1, -1)) + axis = tuple(range(x.ndim)) # torch.flip doesn't accept dim as an int but the method does # https://github.com/pytorch/pytorch/issues/18095 return x.flip(axis) From 3ccee1b57fab25f9f5ec4ccd34a46b7114ed0018 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Thu, 9 Feb 2023 17:33:51 -0700 Subject: [PATCH 35/90] Comment out dead code in the torch unique_all() wrapper --- array_api_compat/torch/_aliases.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index 0b249aea..cd5291a9 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -526,16 +526,17 @@ def broadcast_arrays(*arrays: array) -> List[array]: # https://github.com/pytorch/pytorch/issues/70920 def unique_all(x: array) -> UniqueAllResult: - values, inverse_indices, counts = torch.unique(x, return_counts=True, return_inverse=True) # torch.unique doesn't support returning indices. # https://github.com/pytorch/pytorch/issues/36748. The workaround # suggested in that issue doesn't actually function correctly (it relies # on non-deterministic behavior of scatter()). raise NotImplementedError("unique_all() not yet implemented for pytorch (see https://github.com/pytorch/pytorch/issues/36748)") - # torch.unique incorrectly gives a 0 count for nan values. - # https://github.com/pytorch/pytorch/issues/94106 - counts[torch.isnan(values)] = 1 - return UniqueAllResult(values, indices, inverse_indices, counts) + + # values, inverse_indices, counts = torch.unique(x, return_counts=True, return_inverse=True) + # # torch.unique incorrectly gives a 0 count for nan values. + # # https://github.com/pytorch/pytorch/issues/94106 + # counts[torch.isnan(values)] = 1 + # return UniqueAllResult(values, indices, inverse_indices, counts) def unique_counts(x: array) -> UniqueCountsResult: values, counts = torch.unique(x, return_counts=True) From 7699755b89ee6730a3c9dba97e106c196c09706a Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Thu, 9 Feb 2023 19:04:16 -0700 Subject: [PATCH 36/90] Use flatten instead of ravel --- array_api_compat/torch/_aliases.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index cd5291a9..be32e97d 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -406,7 +406,7 @@ def concat(arrays: Union[Tuple[array, ...], List[array]], axis: Optional[int] = 0, **kwargs) -> array: if axis is None: - arrays = tuple(ar.ravel() for ar in arrays) + arrays = tuple(ar.flatten() for ar in arrays) axis = 0 return torch.concat(arrays, axis, **kwargs) From d38bad5c6c258e5faecd9297eea78d00041355b6 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Fri, 10 Feb 2023 01:27:52 -0700 Subject: [PATCH 37/90] Improve some error messages --- array_api_compat/torch/_aliases.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index be32e97d..bdba9fec 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -172,6 +172,9 @@ def sort(x: array, /, *, axis: int = -1, descending: bool = False, stable: bool def _normalize_axes(axis, ndim): axes = [] + if ndim == 0 and axis: + # Better error message in this case + raise IndexError(f"Dimension out of range: {axis[0]}") lower, upper = -ndim, ndim - 1 for a in axis: if a < lower or a > upper: @@ -180,12 +183,11 @@ def _normalize_axes(axis, ndim): if a < 0: a = a + ndim if a in axes: - # Match torch error message but use IndexError instead of RuntimeError - raise IndexError(f"dim {a} appears multiple times in the list of dims") + # Use IndexError instead of RuntimeError, and "axis" instead of "dim" + raise IndexError(f"Axis {a} appears multiple times in the list of axes") axes.append(a) return sorted(axes) - def _apply_keepdims(x, ndim, keepdims): if keepdims: return x[(None,)*ndim] From 24c0ea377b0891ddcb0135f882c038fe0171ba8c Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Mon, 13 Feb 2023 16:17:23 -0700 Subject: [PATCH 38/90] Use a better function name and use unsqueeze instead of None indexing --- array_api_compat/torch/_aliases.py | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index bdba9fec..bd38fdf7 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -188,9 +188,13 @@ def _normalize_axes(axis, ndim): axes.append(a) return sorted(axes) -def _apply_keepdims(x, ndim, keepdims): +def _axis_none_keepdims(x, ndim, keepdims): + # Apply keepdims when axis=None + # (https://github.com/pytorch/pytorch/issues/71209) + # Note that this is only valid for the axis=None case. if keepdims: - return x[(None,)*ndim] + for i in range(ndim): + x = torch.unsqueeze(x, 0) return x def prod(x: array, @@ -230,7 +234,7 @@ def prod(x: array, # torch doesn't support keepdims with axis=None # (https://github.com/pytorch/pytorch/issues/71209) res = torch.prod(x, dtype=dtype, **kwargs) - res = _apply_keepdims(res, ndim, keepdims) + res = _axis_none_keepdims(res, ndim, keepdims) return res return torch.prod(x, axis, dtype=dtype, keepdims=keepdims, **kwargs) @@ -262,7 +266,7 @@ def sum(x: array, # torch doesn't support keepdims with axis=None # (https://github.com/pytorch/pytorch/issues/71209) res = torch.sum(x, dtype=dtype, **kwargs) - res = _apply_keepdims(res, ndim, keepdims) + res = _axis_none_keepdims(res, ndim, keepdims) return res return torch.sum(x, axis, dtype=dtype, keepdims=keepdims, **kwargs) @@ -292,7 +296,7 @@ def any(x: array, # torch doesn't support keepdims with axis=None # (https://github.com/pytorch/pytorch/issues/71209) res = torch.any(x, **kwargs) - res = _apply_keepdims(res, ndim, keepdims) + res = _axis_none_keepdims(res, ndim, keepdims) return res.to(torch.bool) # torch.any doesn't return bool for uint8 @@ -323,7 +327,7 @@ def all(x: array, # torch doesn't support keepdims with axis=None # (https://github.com/pytorch/pytorch/issues/71209) res = torch.all(x, **kwargs) - res = _apply_keepdims(res, ndim, keepdims) + res = _axis_none_keepdims(res, ndim, keepdims) return res.to(torch.bool) # torch.all doesn't return bool for uint8 @@ -342,7 +346,7 @@ def mean(x: array, # torch doesn't support keepdims with axis=None # (https://github.com/pytorch/pytorch/issues/71209) res = torch.mean(x, **kwargs) - res = _apply_keepdims(res, x.ndim, keepdims) + res = _axis_none_keepdims(res, x.ndim, keepdims) return res return torch.mean(x, axis, keepdims=keepdims, **kwargs) @@ -369,7 +373,7 @@ def std(x: array, # torch doesn't support keepdims with axis=None # (https://github.com/pytorch/pytorch/issues/71209) res = torch.std(x, tuple(range(x.ndim)), correction=correction, **kwargs) - res = _apply_keepdims(res, x.ndim, keepdims) + res = _axis_none_keepdims(res, x.ndim, keepdims) return res return torch.std(x, axis, correction=correction, keepdims=keepdims, **kwargs) @@ -396,7 +400,7 @@ def var(x: array, # torch doesn't support keepdims with axis=None # (https://github.com/pytorch/pytorch/issues/71209) res = torch.var(x, tuple(range(x.ndim)), correction=correction, **kwargs) - res = _apply_keepdims(res, x.ndim, keepdims) + res = _axis_none_keepdims(res, x.ndim, keepdims) return res return torch.var(x, axis, correction=correction, keepdims=keepdims, **kwargs) From 48d1ae14b14a1cb55c881eb5f21dd4ccc98d901c Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Fri, 17 Feb 2023 20:03:25 -0700 Subject: [PATCH 39/90] Add pytorch-xfails.txt (still need to validate) --- pytorch-xfails.txt | 151 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 151 insertions(+) create mode 100644 pytorch-xfails.txt diff --git a/pytorch-xfails.txt b/pytorch-xfails.txt new file mode 100644 index 00000000..560bce5c --- /dev/null +++ b/pytorch-xfails.txt @@ -0,0 +1,151 @@ +array_api_tests/test_array_object.py::test_getitem +array_api_tests/test_array_object.py::test_setitem +array_api_tests/test_array_object.py::test_getitem_masking +array_api_tests/test_array_object.py::test_scalar_casting[__int__(uint16)] +array_api_tests/test_array_object.py::test_scalar_casting[__int__(uint32)] +array_api_tests/test_array_object.py::test_scalar_casting[__int__(uint64)] +array_api_tests/test_array_object.py::test_scalar_casting[__index__(uint16)] +array_api_tests/test_array_object.py::test_scalar_casting[__index__(uint32)] +array_api_tests/test_array_object.py::test_scalar_casting[__index__(uint64)] +array_api_tests/test_creation_functions.py::test_arange +array_api_tests/test_creation_functions.py::test_linspace +array_api_tests/test_creation_functions.py::test_meshgrid +array_api_tests/test_data_type_functions.py::test_iinfo[uint16] +array_api_tests/test_data_type_functions.py::test_iinfo[uint32] +array_api_tests/test_data_type_functions.py::test_iinfo[uint64] +array_api_tests/test_has_names.py::test_has_names[linalg-matrix_transpose] +array_api_tests/test_has_names.py::test_has_names[linalg-outer] +array_api_tests/test_has_names.py::test_has_names[linalg-tensordot] +array_api_tests/test_has_names.py::test_has_names[linalg-trace] +array_api_tests/test_has_names.py::test_has_names[linear_algebra-matrix_transpose] +array_api_tests/test_has_names.py::test_has_names[linear_algebra-vecdot] +array_api_tests/test_has_names.py::test_has_names[array_method-__array_namespace__] +array_api_tests/test_has_names.py::test_has_names[array_method-to_device] +array_api_tests/test_linalg.py::test_cross +array_api_tests/test_linalg.py::test_eigvalsh +array_api_tests/test_linalg.py::test_matmul +array_api_tests/test_linalg.py::test_matrix_power +array_api_tests/test_linalg.py::test_matrix_transpose +array_api_tests/test_linalg.py::test_outer +array_api_tests/test_linalg.py::test_solve +array_api_tests/test_linalg.py::test_tensordot +array_api_tests/test_linalg.py::test_trace +array_api_tests/test_linalg.py::test_vecdot +array_api_tests/test_manipulation_functions.py::test_concat +array_api_tests/test_operators_and_elementwise_functions.py::test_add[__add__(x1, x2)] +array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_and[__and__(x1, x2)] +array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_left_shift[__lshift__(x1, x2)] +array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_or[__or__(x1, x2)] +array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_right_shift[bitwise_right_shift(x1, x2)] +array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_right_shift[__rshift__(x1, x2)] +array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_right_shift[__irshift__(x1, x2)] +array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_xor[__xor__(x1, x2)] +array_api_tests/test_operators_and_elementwise_functions.py::test_divide[__truediv__(x1, x2)] +array_api_tests/test_operators_and_elementwise_functions.py::test_equal[__eq__(x1, x2)] +array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[__floordiv__(x1, x2)] +array_api_tests/test_operators_and_elementwise_functions.py::test_greater[__gt__(x1, x2)] +array_api_tests/test_operators_and_elementwise_functions.py::test_greater_equal[__ge__(x1, x2)] +array_api_tests/test_operators_and_elementwise_functions.py::test_less[__lt__(x1, x2)] +array_api_tests/test_operators_and_elementwise_functions.py::test_less_equal[__le__(x1, x2)] +array_api_tests/test_operators_and_elementwise_functions.py::test_log1p +array_api_tests/test_operators_and_elementwise_functions.py::test_multiply[__mul__(x1, x2)] +array_api_tests/test_operators_and_elementwise_functions.py::test_pow[__pow__(x1, x2)] +array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[remainder(x1, x2)] +array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__mod__(x1, x2)] +array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__imod__(x1, x2)] +array_api_tests/test_operators_and_elementwise_functions.py::test_subtract[__sub__(x1, x2)] +array_api_tests/test_searching_functions.py::test_nonzero +array_api_tests/test_set_functions.py::test_unique_all +array_api_tests/test_set_functions.py::test_unique_counts +array_api_tests/test_set_functions.py::test_unique_inverse +array_api_tests/test_set_functions.py::test_unique_values +array_api_tests/test_signatures.py::test_func_signature[floor_divide] +array_api_tests/test_signatures.py::test_func_signature[remainder] +array_api_tests/test_signatures.py::test_func_signature[matrix_transpose] +array_api_tests/test_signatures.py::test_func_signature[tensordot] +array_api_tests/test_signatures.py::test_func_signature[vecdot] +array_api_tests/test_signatures.py::test_extension_func_signature[linalg.matrix_transpose] +array_api_tests/test_signatures.py::test_extension_func_signature[linalg.outer] +array_api_tests/test_signatures.py::test_extension_func_signature[linalg.tensordot] +array_api_tests/test_signatures.py::test_extension_func_signature[linalg.trace] +array_api_tests/test_signatures.py::test_array_method_signature[__array_namespace__] +array_api_tests/test_signatures.py::test_array_method_signature[to_device] +array_api_tests/test_special_cases.py::test_binary[add(isfinite(x1_i) and x1_i != 0 and x2_i == -x1_i) -> +0] +array_api_tests/test_special_cases.py::test_binary[__add__(x1_i is +infinity and isfinite(x2_i)) -> +infinity] +array_api_tests/test_special_cases.py::test_binary[__add__(isfinite(x1_i) and x2_i is +infinity) -> +infinity] +array_api_tests/test_special_cases.py::test_binary[__add__((x1_i is +0 or x1_i == -0) and isfinite(x2_i) and x2_i != 0) -> x2_i] +array_api_tests/test_special_cases.py::test_binary[__add__(isfinite(x1_i) and x1_i != 0 and (x2_i is +0 or x2_i == -0)) -> x1_i] +array_api_tests/test_special_cases.py::test_binary[__add__(isfinite(x1_i) and x1_i != 0 and x2_i == -x1_i) -> +0] +array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i is -0 and x2_i > 0) -> -0] +array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i is +0 and x2_i < 0) -> -0] +array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i is -0 and x2_i < 0) -> +0] +array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i > 0 and x2_i is +0) -> +infinity] +array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i > 0 and x2_i is -0) -> -infinity] +array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i < 0 and x2_i is +0) -> -infinity] +array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i < 0 and x2_i is -0) -> +infinity] +array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i is +infinity and isfinite(x2_i) and x2_i < 0) -> -infinity] +array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity] +array_api_tests/test_special_cases.py::test_binary[__truediv__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0] +array_api_tests/test_special_cases.py::test_binary[__truediv__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0] +array_api_tests/test_special_cases.py::test_binary[__truediv__(isfinite(x1_i) and x1_i < 0 and x2_i is -infinity) -> +0] +array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is +infinity and isfinite(x2_i) and x2_i > 0) -> +infinity] +array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is +infinity and isfinite(x2_i) and x2_i < 0) -> -infinity] +array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is -infinity and isfinite(x2_i) and x2_i > 0) -> -infinity] +array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity] +array_api_tests/test_special_cases.py::test_binary[floor_divide(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0] +array_api_tests/test_special_cases.py::test_binary[floor_divide(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0] +array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is +0 and x2_i > 0) -> +0] +array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is -0 and x2_i > 0) -> -0] +array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is +0 and x2_i < 0) -> -0] +array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is -0 and x2_i < 0) -> +0] +array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i > 0 and x2_i is +0) -> +infinity] +array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i > 0 and x2_i is -0) -> -infinity] +array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i < 0 and x2_i is +0) -> -infinity] +array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i < 0 and x2_i is -0) -> +infinity] +array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i > 0) -> +infinity] +array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i < 0) -> -infinity] +array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i > 0) -> -infinity] +array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity] +array_api_tests/test_special_cases.py::test_binary[__floordiv__(isfinite(x1_i) and x1_i > 0 and x2_i is +infinity) -> +0] +array_api_tests/test_special_cases.py::test_binary[__floordiv__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0] +array_api_tests/test_special_cases.py::test_binary[__floordiv__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0] +array_api_tests/test_special_cases.py::test_binary[__floordiv__(isfinite(x1_i) and x1_i < 0 and x2_i is -infinity) -> +0] +array_api_tests/test_special_cases.py::test_binary[__pow__(abs(x1_i) > 1 and x2_i is +infinity) -> +infinity] +array_api_tests/test_special_cases.py::test_binary[__pow__(abs(x1_i) > 1 and x2_i is -infinity) -> +0] +array_api_tests/test_special_cases.py::test_binary[__pow__(abs(x1_i) < 1 and x2_i is +infinity) -> +0] +array_api_tests/test_special_cases.py::test_binary[__pow__(abs(x1_i) < 1 and x2_i is -infinity) -> +infinity] +array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is +infinity and x2_i > 0) -> +infinity] +array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is +infinity and x2_i < 0) -> +0] +array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -infinity and x2_i > 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -infinity] +array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -infinity and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +infinity] +array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is +0 and x2_i < 0) -> +infinity] +array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -0 and x2_i > 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -0] +array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -0 and x2_i < 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -infinity] +array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -0 and x2_i < 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +infinity] +array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i < 0 and isfinite(x1_i) and isfinite(x2_i) and not x2_i.is_integer()) -> NaN] +array_api_tests/test_special_cases.py::test_binary[remainder(x1_i is -0 and x2_i > 0) -> +0] +array_api_tests/test_special_cases.py::test_binary[remainder(x1_i is +0 and x2_i < 0) -> -0] +array_api_tests/test_special_cases.py::test_binary[__mod__(x1_i is +0 and x2_i > 0) -> +0] +array_api_tests/test_special_cases.py::test_binary[__mod__(x1_i is -0 and x2_i > 0) -> +0] +array_api_tests/test_special_cases.py::test_binary[__mod__(x1_i is +0 and x2_i < 0) -> -0] +array_api_tests/test_special_cases.py::test_binary[__mod__(x1_i is -0 and x2_i < 0) -> -0] +array_api_tests/test_special_cases.py::test_binary[__mod__(isfinite(x1_i) and x1_i > 0 and x2_i is +infinity) -> x1_i] +array_api_tests/test_special_cases.py::test_binary[__mod__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> x2_i] +array_api_tests/test_special_cases.py::test_binary[__mod__(isfinite(x1_i) and x1_i < 0 and x2_i is -infinity) -> x1_i] +array_api_tests/test_special_cases.py::test_iop[__iadd__(isfinite(x1_i) and x1_i != 0 and x2_i == -x1_i) -> +0] +array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i > 0) -> +infinity] +array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i < 0) -> -infinity] +array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i > 0) -> -infinity] +array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity] +array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0] +array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0] +array_api_tests/test_special_cases.py::test_iop[__imod__(x1_i is -0 and x2_i > 0) -> +0] +array_api_tests/test_special_cases.py::test_iop[__imod__(x1_i is +0 and x2_i < 0) -> -0] +array_api_tests/test_special_cases.py::test_empty_arrays[std] +array_api_tests/test_special_cases.py::test_empty_arrays[var] +array_api_tests/test_special_cases.py::test_nan_propagation[std] +array_api_tests/test_special_cases.py::test_nan_propagation[var] +array_api_tests/test_statistical_functions.py::test_prod +array_api_tests/test_statistical_functions.py::test_std +array_api_tests/test_statistical_functions.py::test_sum +array_api_tests/test_statistical_functions.py::test_var From 866647d3f86555ac2968097c8ac7a12a071a0681 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Fri, 17 Feb 2023 20:38:29 -0700 Subject: [PATCH 40/90] Move main namespace linear algebra helpers to _aliases.py --- array_api_compat/common/_aliases.py | 40 +++++++++++++++++++++++++++-- array_api_compat/common/_linalg.py | 34 +----------------------- 2 files changed, 39 insertions(+), 35 deletions(-) diff --git a/array_api_compat/common/_aliases.py b/array_api_compat/common/_aliases.py index 95885080..8e00d806 100644 --- a/array_api_compat/common/_aliases.py +++ b/array_api_compat/common/_aliases.py @@ -6,7 +6,7 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: - from typing import Optional, Tuple, Union, List + from typing import Optional, Sequence, Tuple, Union, List from ._typing import ndarray, Device, Dtype, NestedSequence, SupportsBufferProtocol from typing import NamedTuple @@ -408,7 +408,43 @@ def trunc(x: ndarray, /, xp, **kwargs) -> ndarray: return x return xp.trunc(x, **kwargs) +# linear algebra functions + +def matmul(x1: ndarray, x2: ndarray, /, xp, **kwargs) -> ndarray: + return xp.matmul(x1, x2, **kwargs) + +# Unlike transpose, matrix_transpose only transposes the last two axes. +def matrix_transpose(x: ndarray, /, xp) -> ndarray: + if x.ndim < 2: + raise ValueError("x must be at least 2-dimensional for matrix_transpose") + return xp.swapaxes(x, -1, -2) + +def tensordot(x1: ndarray, + x2: ndarray, + /, + xp, + *, + axes: Union[int, Tuple[Sequence[int], Sequence[int]]] = 2, + **kwargs, +) -> ndarray: + return xp.tensordot(x1, x2, axes=axes, **kwargs) + +def vecdot(x1: ndarray, x2: ndarray, /, xp, *, axis: int = -1) -> ndarray: + ndim = max(x1.ndim, x2.ndim) + x1_shape = (1,)*(ndim - x1.ndim) + tuple(x1.shape) + x2_shape = (1,)*(ndim - x2.ndim) + tuple(x2.shape) + if x1_shape[axis] != x2_shape[axis]: + raise ValueError("x1 and x2 must have the same size along the given axis") + + x1_, x2_ = xp.broadcast_arrays(x1, x2) + x1_ = xp.moveaxis(x1_, axis, -1) + x2_ = xp.moveaxis(x2_, axis, -1) + + res = x1_[..., None, :] @ x2_[..., None] + return res[..., 0, 0] + __all__ = ['UniqueAllResult', 'UniqueCountsResult', 'UniqueInverseResult', 'unique_all', 'unique_counts', 'unique_inverse', 'unique_values', 'astype', 'std', 'var', 'permute_dims', 'reshape', 'argsort', - 'sort', 'sum', 'prod', 'ceil', 'floor', 'trunc'] + 'sort', 'sum', 'prod', 'ceil', 'floor', 'trunc', 'matmul', + 'matrix_transpose', 'tensordot', 'vecdot'] diff --git a/array_api_compat/common/_linalg.py b/array_api_compat/common/_linalg.py index c42879d6..07ed1b31 100644 --- a/array_api_compat/common/_linalg.py +++ b/array_api_compat/common/_linalg.py @@ -7,28 +7,16 @@ from numpy.core.numeric import normalize_axis_tuple +from .._aliases import matmul, matrix_transpose, tensordot, vecdot from .._internal import get_xp # These are in the main NumPy namespace but not in numpy.linalg def cross(x1: ndarray, x2: ndarray, /, xp, *, axis: int = -1, **kwargs) -> ndarray: return xp.cross(x1, x2, axis=axis, **kwargs) -def matmul(x1: ndarray, x2: ndarray, /, xp, **kwargs) -> ndarray: - return xp.matmul(x1, x2, **kwargs) - def outer(x1: ndarray, x2: ndarray, /, xp, **kwargs) -> ndarray: return xp.outer(x1, x2, **kwargs) -def tensordot(x1: ndarray, - x2: ndarray, - /, - xp, - *, - axes: Union[int, Tuple[Sequence[int], Sequence[int]]] = 2, - **kwargs, -) -> ndarray: - return xp.tensordot(x1, x2, axes=axes, **kwargs) - class EighResult(NamedTuple): eigenvalues: ndarray eigenvectors: ndarray @@ -103,31 +91,11 @@ def pinv(x: ndarray, /, xp, *, rtol: Optional[Union[float, ndarray]] = None, **k def matrix_norm(x: ndarray, /, xp, *, keepdims: bool = False, ord: Optional[Union[int, float, Literal['fro', 'nuc']]] = 'fro') -> ndarray: return xp.linalg.norm(x, axis=(-2, -1), keepdims=keepdims, ord=ord) -# Unlike transpose, matrix_transpose only transposes the last two axes. -def matrix_transpose(x: ndarray, /, xp) -> ndarray: - if x.ndim < 2: - raise ValueError("x must be at least 2-dimensional for matrix_transpose") - return xp.swapaxes(x, -1, -2) - # svdvals is not in NumPy (but it is in SciPy). It is equivalent to # xp.linalg.svd(compute_uv=False). def svdvals(x: ndarray, /, xp) -> Union[ndarray, Tuple[ndarray, ...]]: return xp.linalg.svd(x, compute_uv=False) -def vecdot(x1: ndarray, x2: ndarray, /, xp, *, axis: int = -1) -> ndarray: - ndim = max(x1.ndim, x2.ndim) - x1_shape = (1,)*(ndim - x1.ndim) + tuple(x1.shape) - x2_shape = (1,)*(ndim - x2.ndim) + tuple(x2.shape) - if x1_shape[axis] != x2_shape[axis]: - raise ValueError("x1 and x2 must have the same size along the given axis") - - x1_, x2_ = xp.broadcast_arrays(x1, x2) - x1_ = xp.moveaxis(x1_, axis, -1) - x2_ = xp.moveaxis(x2_, axis, -1) - - res = x1_[..., None, :] @ x2_[..., None] - return res[..., 0, 0] - def vector_norm(x: ndarray, /, xp, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False, ord: Optional[Union[int, float]] = 2) -> ndarray: # xp.linalg.norm tries to do a matrix norm whenever axis is a 2-tuple or # when axis=None and the input is 2-D, so to force a vector norm, we make From c441f33e0f16eea5762743a32a96b8184b6390f7 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Fri, 17 Feb 2023 20:51:47 -0700 Subject: [PATCH 41/90] Fix main namespace linalg functions in numpy and cupy --- array_api_compat/common/_aliases.py | 3 ++- array_api_compat/common/_linalg.py | 2 +- array_api_compat/cupy/_aliases.py | 4 ++++ array_api_compat/cupy/linalg.py | 5 +---- array_api_compat/numpy/_aliases.py | 4 ++++ array_api_compat/numpy/linalg.py | 5 +---- 6 files changed, 13 insertions(+), 10 deletions(-) diff --git a/array_api_compat/common/_aliases.py b/array_api_compat/common/_aliases.py index 2a899e38..20eb6984 100644 --- a/array_api_compat/common/_aliases.py +++ b/array_api_compat/common/_aliases.py @@ -444,7 +444,8 @@ def vecdot(x1: ndarray, x2: ndarray, /, xp, *, axis: int = -1) -> ndarray: return res[..., 0, 0] __all__ = ['arange', 'empty', 'empty_like', 'eye', 'full', 'full_like', - 'linspace', 'ones', 'ones_like', 'zeros', 'zeros_like', 'UniqueAllResult', 'UniqueCountsResult', 'UniqueInverseResult', + 'linspace', 'ones', 'ones_like', 'zeros', 'zeros_like', + 'UniqueAllResult', 'UniqueCountsResult', 'UniqueInverseResult', 'unique_all', 'unique_counts', 'unique_inverse', 'unique_values', 'astype', 'std', 'var', 'permute_dims', 'reshape', 'argsort', 'sort', 'sum', 'prod', 'ceil', 'floor', 'trunc', 'matmul', diff --git a/array_api_compat/common/_linalg.py b/array_api_compat/common/_linalg.py index 07ed1b31..07daefd9 100644 --- a/array_api_compat/common/_linalg.py +++ b/array_api_compat/common/_linalg.py @@ -7,7 +7,7 @@ from numpy.core.numeric import normalize_axis_tuple -from .._aliases import matmul, matrix_transpose, tensordot, vecdot +from ._aliases import matmul, matrix_transpose, tensordot, vecdot from .._internal import get_xp # These are in the main NumPy namespace but not in numpy.linalg diff --git a/array_api_compat/cupy/_aliases.py b/array_api_compat/cupy/_aliases.py index 8cace32e..ce7f3780 100644 --- a/array_api_compat/cupy/_aliases.py +++ b/array_api_compat/cupy/_aliases.py @@ -57,6 +57,10 @@ ceil = get_xp(cp)(_aliases.ceil) floor = get_xp(cp)(_aliases.floor) trunc = get_xp(cp)(_aliases.trunc) +matmul = get_xp(cp)(_aliases.matmul) +matrix_transpose = get_xp(cp)(_aliases.matrix_transpose) +tensordot = get_xp(cp)(_aliases.tensordot) +vecdot = get_xp(cp)(_aliases.vecdot) __all__ = _aliases.__all__ + ['asarray', 'asarray_cupy', 'bool', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', diff --git a/array_api_compat/cupy/linalg.py b/array_api_compat/cupy/linalg.py index 04c71dec..99c4cc68 100644 --- a/array_api_compat/cupy/linalg.py +++ b/array_api_compat/cupy/linalg.py @@ -10,13 +10,12 @@ from ..common import _linalg from .._internal import get_xp +from ._aliases import (matmul, matrix_transpose, tensordot, vecdot) import cupy as cp cross = get_xp(cp)(_linalg.cross) -matmul = get_xp(cp)(_linalg.matmul) outer = get_xp(cp)(_linalg.outer) -tensordot = get_xp(cp)(_linalg.tensordot) EighResult = _linalg.EighResult QRResult = _linalg.QRResult SlogdetResult = _linalg.SlogdetResult @@ -29,9 +28,7 @@ matrix_rank = get_xp(cp)(_linalg.matrix_rank) pinv = get_xp(cp)(_linalg.pinv) matrix_norm = get_xp(cp)(_linalg.matrix_norm) -matrix_transpose = get_xp(cp)(_linalg.matrix_transpose) svdvals = get_xp(cp)(_linalg.svdvals) -vecdot = get_xp(cp)(_linalg.vecdot) vector_norm = get_xp(cp)(_linalg.vector_norm) diagonal = get_xp(cp)(_linalg.diagonal) trace = get_xp(cp)(_linalg.trace) diff --git a/array_api_compat/numpy/_aliases.py b/array_api_compat/numpy/_aliases.py index 9df2c3fb..2022b842 100644 --- a/array_api_compat/numpy/_aliases.py +++ b/array_api_compat/numpy/_aliases.py @@ -57,6 +57,10 @@ ceil = get_xp(np)(_aliases.ceil) floor = get_xp(np)(_aliases.floor) trunc = get_xp(np)(_aliases.trunc) +matmul = get_xp(np)(_aliases.matmul) +matrix_transpose = get_xp(np)(_aliases.matrix_transpose) +tensordot = get_xp(np)(_aliases.tensordot) +vecdot = get_xp(np)(_aliases.vecdot) __all__ = _aliases.__all__ + ['asarray', 'asarray_numpy', 'bool', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', diff --git a/array_api_compat/numpy/linalg.py b/array_api_compat/numpy/linalg.py index ac04b055..26d6e88e 100644 --- a/array_api_compat/numpy/linalg.py +++ b/array_api_compat/numpy/linalg.py @@ -3,13 +3,12 @@ from ..common import _linalg from .._internal import get_xp +from ._aliases import (matmul, matrix_transpose, tensordot, vecdot) import numpy as np cross = get_xp(np)(_linalg.cross) -matmul = get_xp(np)(_linalg.matmul) outer = get_xp(np)(_linalg.outer) -tensordot = get_xp(np)(_linalg.tensordot) EighResult = _linalg.EighResult QRResult = _linalg.QRResult SlogdetResult = _linalg.SlogdetResult @@ -22,9 +21,7 @@ matrix_rank = get_xp(np)(_linalg.matrix_rank) pinv = get_xp(np)(_linalg.pinv) matrix_norm = get_xp(np)(_linalg.matrix_norm) -matrix_transpose = get_xp(np)(_linalg.matrix_transpose) svdvals = get_xp(np)(_linalg.svdvals) -vecdot = get_xp(np)(_linalg.vecdot) vector_norm = get_xp(np)(_linalg.vector_norm) diagonal = get_xp(np)(_linalg.diagonal) trace = get_xp(np)(_linalg.trace) From 04eef189da6014d77a97ad68eba8f751a077230d Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Fri, 17 Feb 2023 20:53:28 -0700 Subject: [PATCH 42/90] Add main namespace linalg functions to the torch wrapper --- array_api_compat/common/_aliases.py | 7 ++++++- array_api_compat/torch/_aliases.py | 13 +++++++++++-- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/array_api_compat/common/_aliases.py b/array_api_compat/common/_aliases.py index 20eb6984..68d80838 100644 --- a/array_api_compat/common/_aliases.py +++ b/array_api_compat/common/_aliases.py @@ -436,7 +436,12 @@ def vecdot(x1: ndarray, x2: ndarray, /, xp, *, axis: int = -1) -> ndarray: if x1_shape[axis] != x2_shape[axis]: raise ValueError("x1 and x2 must have the same size along the given axis") - x1_, x2_ = xp.broadcast_arrays(x1, x2) + if hasattr(xp, 'broadcast_tensors'): + _broadcast = xp.broadcast_tensors + else: + _broadcast = xp.broadcast_arrays + + x1_, x2_ = _broadcast(x1, x2) x1_ = xp.moveaxis(x1_, axis, -1) x2_ = xp.moveaxis(x2_, axis, -1) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index bd38fdf7..f16a2d6c 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -3,7 +3,11 @@ from functools import wraps from builtins import all as builtin_all -from ..common._aliases import (UniqueAllResult, UniqueCountsResult, UniqueInverseResult) +from ..common._aliases import (UniqueAllResult, UniqueCountsResult, + UniqueInverseResult, + matrix_transpose as _aliases_matrix_transpose, + vecdot as _aliases_vecdot) +from .._internal import get_xp from typing import TYPE_CHECKING if TYPE_CHECKING: @@ -559,6 +563,10 @@ def unique_inverse(x: array) -> UniqueInverseResult: def unique_values(x: array) -> array: return torch.unique(x) + +matrix_transpose = get_xp(torch)(_aliases_matrix_transpose) +vecdot = get_xp(torch)(_aliases_vecdot) + __all__ = ['result_type', 'can_cast', 'permute_dims', 'bitwise_invert', 'add', 'atan2', 'bitwise_and', 'bitwise_left_shift', 'bitwise_or', 'bitwise_right_shift', 'bitwise_xor', 'divide', 'equal', @@ -568,4 +576,5 @@ def unique_values(x: array) -> array: 'mean', 'std', 'var', 'concat', 'squeeze', 'flip', 'roll', 'nonzero', 'where', 'arange', 'eye', 'linspace', 'full', 'expand_dims', 'astype', 'broadcast_arrays', 'unique_all', - 'unique_counts', 'unique_inverse', 'unique_values'] + 'unique_counts', 'unique_inverse', 'unique_values', + 'matrix_transpose', 'vecdot'] From 453ecb8c80170ce39e858761c65890c8f762c49c Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Fri, 17 Feb 2023 20:57:56 -0700 Subject: [PATCH 43/90] Add torch wrapper for matmul --- array_api_compat/torch/_aliases.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index f16a2d6c..86bc61c8 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -563,6 +563,12 @@ def unique_inverse(x: array) -> UniqueInverseResult: def unique_values(x: array) -> array: return torch.unique(x) +def matmul(x1: array, x2: array, /, **kwargs) -> array: + # torch.matmul doesn't type promote (but differently from _fix_promotion) + dtype = result_type(x1, x2) + x1 = x1.to(dtype) + x2 = x2.to(dtype) + return torch.matmul(x1, x2, **kwargs) matrix_transpose = get_xp(torch)(_aliases_matrix_transpose) vecdot = get_xp(torch)(_aliases_vecdot) @@ -577,4 +583,4 @@ def unique_values(x: array) -> array: 'nonzero', 'where', 'arange', 'eye', 'linspace', 'full', 'expand_dims', 'astype', 'broadcast_arrays', 'unique_all', 'unique_counts', 'unique_inverse', 'unique_values', - 'matrix_transpose', 'vecdot'] + 'matmul', 'matrix_transpose', 'vecdot'] From 5a3bbbe576e0ea3fb0f4ca6e826bae1ae57fc719 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Mon, 20 Feb 2023 14:37:10 -0700 Subject: [PATCH 44/90] Finish torch wrappers for matmul, vecdot, and tensordot --- array_api_compat/torch/_aliases.py | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index 86bc61c8..505146c7 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -11,7 +11,7 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: - from typing import List, Optional, Tuple, Union + from typing import List, Optional, Sequence, Tuple, Union from ..common._typing import Device from torch import dtype as Dtype @@ -83,14 +83,14 @@ def _f(x1, x2, /, **kwargs): """ return _f -def _fix_promotion(x1, x2): +def _fix_promotion(x1, x2, only_scalar=True): if x1.dtype not in _array_api_dtypes or x2.dtype not in _array_api_dtypes: return x1, x2 # If an argument is 0-D pytorch downcasts the other argument - if x1.shape == (): + if not only_scalar or x1.shape == (): dtype = result_type(x1, x2) x2 = x2.to(dtype) - if x2.shape == (): + if not only_scalar or x2.shape == (): dtype = result_type(x1, x2) x1 = x1.to(dtype) return x1, x2 @@ -565,13 +565,22 @@ def unique_values(x: array) -> array: def matmul(x1: array, x2: array, /, **kwargs) -> array: # torch.matmul doesn't type promote (but differently from _fix_promotion) - dtype = result_type(x1, x2) - x1 = x1.to(dtype) - x2 = x2.to(dtype) + x1, x2 = _fix_promotion(x1, x2, only_scalar=False) return torch.matmul(x1, x2, **kwargs) matrix_transpose = get_xp(torch)(_aliases_matrix_transpose) -vecdot = get_xp(torch)(_aliases_vecdot) +_vecdot = get_xp(torch)(_aliases_vecdot) + +def vecdot(x1: array, x2: array, /, *, axis: int = -1) -> array: + x1, x2 = _fix_promotion(x1, x2, only_scalar=False) + return _vecdot(x1, x2, axis=axis) + +# torch.tensordot uses dims instead of axes +def tensordot(x1: array, x2: array, /, *, axes: Union[int, Tuple[Sequence[int], Sequence[int]]] = 2, **kwargs) -> array: + # Note: torch.tensordot fails with integer dtypes when there is only 1 + # element in the axis (https://github.com/pytorch/pytorch/issues/84530). + x1, x2 = _fix_promotion(x1, x2, only_scalar=False) + return torch.tensordot(x1, x2, dims=axes, **kwargs) __all__ = ['result_type', 'can_cast', 'permute_dims', 'bitwise_invert', 'add', 'atan2', 'bitwise_and', 'bitwise_left_shift', 'bitwise_or', @@ -583,4 +592,4 @@ def matmul(x1: array, x2: array, /, **kwargs) -> array: 'nonzero', 'where', 'arange', 'eye', 'linspace', 'full', 'expand_dims', 'astype', 'broadcast_arrays', 'unique_all', 'unique_counts', 'unique_inverse', 'unique_values', - 'matmul', 'matrix_transpose', 'vecdot'] + 'matmul', 'matrix_transpose', 'vecdot', 'tensordot'] From b8bbdc80fe8ff8a2a9ec2ee60a77e3030e040ef5 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Mon, 20 Feb 2023 17:11:15 -0700 Subject: [PATCH 45/90] Clean up pytorch-xfails file --- pytorch-xfails.txt | 81 ++++++++++++++++++++++++++++++---------------- 1 file changed, 53 insertions(+), 28 deletions(-) diff --git a/pytorch-xfails.txt b/pytorch-xfails.txt index 560bce5c..d26bbe23 100644 --- a/pytorch-xfails.txt +++ b/pytorch-xfails.txt @@ -1,42 +1,51 @@ +# Note: see array_api_compat/torch/_aliases.py for links to corresponding +# pytorch issues + +# We cannot wrap the array object + +# Indexing does not support negative step array_api_tests/test_array_object.py::test_getitem array_api_tests/test_array_object.py::test_setitem +# Masking doesn't suport 0 dimensions in the mask array_api_tests/test_array_object.py::test_getitem_masking +# torch doesn't have uint dtypes other than uint8 array_api_tests/test_array_object.py::test_scalar_casting[__int__(uint16)] array_api_tests/test_array_object.py::test_scalar_casting[__int__(uint32)] array_api_tests/test_array_object.py::test_scalar_casting[__int__(uint64)] array_api_tests/test_array_object.py::test_scalar_casting[__index__(uint16)] array_api_tests/test_array_object.py::test_scalar_casting[__index__(uint32)] array_api_tests/test_array_object.py::test_scalar_casting[__index__(uint64)] + +# Overflow error from large inputs array_api_tests/test_creation_functions.py::test_arange +# pytorch linspace bug (should be fixed in torch 2.0) array_api_tests/test_creation_functions.py::test_linspace -array_api_tests/test_creation_functions.py::test_meshgrid + +# torch doesn't have higher uint dtypes array_api_tests/test_data_type_functions.py::test_iinfo[uint16] array_api_tests/test_data_type_functions.py::test_iinfo[uint32] array_api_tests/test_data_type_functions.py::test_iinfo[uint64] + +# --disable-extension broken with test_has_names.py +# https://github.com/data-apis/array-api-tests/issues/169 array_api_tests/test_has_names.py::test_has_names[linalg-matrix_transpose] array_api_tests/test_has_names.py::test_has_names[linalg-outer] array_api_tests/test_has_names.py::test_has_names[linalg-tensordot] array_api_tests/test_has_names.py::test_has_names[linalg-trace] -array_api_tests/test_has_names.py::test_has_names[linear_algebra-matrix_transpose] -array_api_tests/test_has_names.py::test_has_names[linear_algebra-vecdot] + +# We cannot wrap the tensor object array_api_tests/test_has_names.py::test_has_names[array_method-__array_namespace__] array_api_tests/test_has_names.py::test_has_names[array_method-to_device] -array_api_tests/test_linalg.py::test_cross -array_api_tests/test_linalg.py::test_eigvalsh -array_api_tests/test_linalg.py::test_matmul -array_api_tests/test_linalg.py::test_matrix_power -array_api_tests/test_linalg.py::test_matrix_transpose -array_api_tests/test_linalg.py::test_outer -array_api_tests/test_linalg.py::test_solve + + +# tensordot doesn't allow integer dtypes in some corner cases array_api_tests/test_linalg.py::test_tensordot -array_api_tests/test_linalg.py::test_trace -array_api_tests/test_linalg.py::test_vecdot -array_api_tests/test_manipulation_functions.py::test_concat + +# We cannot wrap the tensor object array_api_tests/test_operators_and_elementwise_functions.py::test_add[__add__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_and[__and__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_left_shift[__lshift__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_or[__or__(x1, x2)] -array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_right_shift[bitwise_right_shift(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_right_shift[__rshift__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_right_shift[__irshift__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_xor[__xor__(x1, x2)] @@ -47,29 +56,41 @@ array_api_tests/test_operators_and_elementwise_functions.py::test_greater[__gt__ array_api_tests/test_operators_and_elementwise_functions.py::test_greater_equal[__ge__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_less[__lt__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_less_equal[__le__(x1, x2)] -array_api_tests/test_operators_and_elementwise_functions.py::test_log1p array_api_tests/test_operators_and_elementwise_functions.py::test_multiply[__mul__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_pow[__pow__(x1, x2)] -array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[remainder(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__mod__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__imod__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_subtract[__sub__(x1, x2)] -array_api_tests/test_searching_functions.py::test_nonzero + + +# Mac-only bug (overflow near float max) +# array_api_tests/test_operators_and_elementwise_functions.py::test_log1p + +# torch doesn't handle shifting by more than the bit size correctly +# https://github.com/pytorch/pytorch/issues/70904 +array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_right_shift[bitwise_right_shift(x1, x2)] +# Torch bug for remainder in some cases with large values +array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[remainder(x1, x2)] + +# unique_all cannot be implemented because torch's unique does not support +# returning indices array_api_tests/test_set_functions.py::test_unique_all -array_api_tests/test_set_functions.py::test_unique_counts +# unique_inverse incorrectly counts nan values +# (https://github.com/pytorch/pytorch/issues/94106) array_api_tests/test_set_functions.py::test_unique_inverse -array_api_tests/test_set_functions.py::test_unique_values + +# The test suite incorrectly divides by 0 here +# (https://github.com/data-apis/array-api-tests/issues/170) array_api_tests/test_signatures.py::test_func_signature[floor_divide] array_api_tests/test_signatures.py::test_func_signature[remainder] -array_api_tests/test_signatures.py::test_func_signature[matrix_transpose] -array_api_tests/test_signatures.py::test_func_signature[tensordot] -array_api_tests/test_signatures.py::test_func_signature[vecdot] -array_api_tests/test_signatures.py::test_extension_func_signature[linalg.matrix_transpose] -array_api_tests/test_signatures.py::test_extension_func_signature[linalg.outer] -array_api_tests/test_signatures.py::test_extension_func_signature[linalg.tensordot] -array_api_tests/test_signatures.py::test_extension_func_signature[linalg.trace] + +# We cannot add attributes to the tensor object array_api_tests/test_signatures.py::test_array_method_signature[__array_namespace__] array_api_tests/test_signatures.py::test_array_method_signature[to_device] + + +# We do not attempt to work around special-case differences (most are on +# tensor methods which we couldn't fix anyway). array_api_tests/test_special_cases.py::test_binary[add(isfinite(x1_i) and x1_i != 0 and x2_i == -x1_i) -> +0] array_api_tests/test_special_cases.py::test_binary[__add__(x1_i is +infinity and isfinite(x2_i)) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__add__(isfinite(x1_i) and x2_i is +infinity) -> +infinity] @@ -145,7 +166,11 @@ array_api_tests/test_special_cases.py::test_empty_arrays[std] array_api_tests/test_special_cases.py::test_empty_arrays[var] array_api_tests/test_special_cases.py::test_nan_propagation[std] array_api_tests/test_special_cases.py::test_nan_propagation[var] -array_api_tests/test_statistical_functions.py::test_prod + +# Float correction is not supported by pytorch (https://github.com/data-apis/array-api-tests/issues/168) array_api_tests/test_statistical_functions.py::test_std -array_api_tests/test_statistical_functions.py::test_sum array_api_tests/test_statistical_functions.py::test_var + +# The test suite is incorrectly checking sums that have loss of significance +# (https://github.com/data-apis/array-api-tests/issues/168) +array_api_tests/test_statistical_functions.py::test_sum From 7d176b91a8b6f9baf917414e53b2b51d9f29eb63 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Mon, 20 Feb 2023 18:03:37 -0700 Subject: [PATCH 46/90] Update pytorch-xfails.txt --- pytorch-xfails.txt | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pytorch-xfails.txt b/pytorch-xfails.txt index d26bbe23..b737d5ec 100644 --- a/pytorch-xfails.txt +++ b/pytorch-xfails.txt @@ -162,12 +162,13 @@ array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(isfinite(x1_i) and array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0] array_api_tests/test_special_cases.py::test_iop[__imod__(x1_i is -0 and x2_i > 0) -> +0] array_api_tests/test_special_cases.py::test_iop[__imod__(x1_i is +0 and x2_i < 0) -> -0] + +# Float correction is not supported by pytorch +# (https://github.com/data-apis/array-api-tests/issues/168) array_api_tests/test_special_cases.py::test_empty_arrays[std] array_api_tests/test_special_cases.py::test_empty_arrays[var] array_api_tests/test_special_cases.py::test_nan_propagation[std] array_api_tests/test_special_cases.py::test_nan_propagation[var] - -# Float correction is not supported by pytorch (https://github.com/data-apis/array-api-tests/issues/168) array_api_tests/test_statistical_functions.py::test_std array_api_tests/test_statistical_functions.py::test_var From 1ffcb15e870e03e10cf9be9c2e5718380aa55418 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Mon, 20 Feb 2023 18:08:23 -0700 Subject: [PATCH 47/90] Install pytorch in on CI --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index b014304b..4ff074c4 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -15,7 +15,7 @@ jobs: - name: Install Dependencies run: | python -m pip install --upgrade pip - python -m pip install pytest numpy + python -m pip install pytest numpy torch - name: Run Tests run: | From 0db034da8d3dd2787f08d1599f315d01293ef7d0 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Mon, 20 Feb 2023 18:15:06 -0700 Subject: [PATCH 48/90] Make the GitHub Actions workflow reusable so that we can test pytorch --- .github/workflows/array-api-tests-numpy.yml | 9 +++++++++ .github/workflows/array-api-tests-torch.yml | 9 +++++++++ .github/workflows/array-api-tests.yml | 15 ++++++++++----- pytorch-xfails.txt => torch-xfails.txt | 0 4 files changed, 28 insertions(+), 5 deletions(-) create mode 100644 .github/workflows/array-api-tests-numpy.yml create mode 100644 .github/workflows/array-api-tests-torch.yml rename pytorch-xfails.txt => torch-xfails.txt (100%) diff --git a/.github/workflows/array-api-tests-numpy.yml b/.github/workflows/array-api-tests-numpy.yml new file mode 100644 index 00000000..e1ea5fd4 --- /dev/null +++ b/.github/workflows/array-api-tests-numpy.yml @@ -0,0 +1,9 @@ +name: Array API Tests (NumPy) + +on: [push, pull_request] + +jobs: + call-workflow-passing-data: + uses: ./github/workflows/array-api-tests.yml + with: + package-name: numpy diff --git a/.github/workflows/array-api-tests-torch.yml b/.github/workflows/array-api-tests-torch.yml new file mode 100644 index 00000000..34d89ec9 --- /dev/null +++ b/.github/workflows/array-api-tests-torch.yml @@ -0,0 +1,9 @@ +name: Array API Tests (PyTorch) + +on: [push, pull_request] + +jobs: + call-workflow-passing-data: + uses: ./github/workflows/array-api-tests.yml + with: + package-name: torch diff --git a/.github/workflows/array-api-tests.yml b/.github/workflows/array-api-tests.yml index 79c8b0b7..dd2ea8d2 100644 --- a/.github/workflows/array-api-tests.yml +++ b/.github/workflows/array-api-tests.yml @@ -1,6 +1,11 @@ name: Array API Tests -on: [push, pull_request] +on: + workflow_call: + inputs: + package-name: + required: true + type: string env: PYTEST_ARGS: "--max-examples 200 -v -rxXfE --ci" @@ -34,15 +39,15 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - python -m pip install numpy + python -m pip install {{ inputs.package-name }} python -m pip install -r ${GITHUB_WORKSPACE}/array-api-tests/requirements.txt - - name: Run the array API testsuite (NumPy) + - name: Run the array API testsuite ({{ inputs.package-name }}) env: - ARRAY_API_TESTS_MODULE: array_api_compat.numpy + ARRAY_API_TESTS_MODULE: array_api_compat.{{ inputs.package-name }} # This enables the NEP 50 type promotion behavior (without it a lot of # tests fail on bad scalar type promotion behavior) NPY_PROMOTION_STATE: weak run: | export PYTHONPATH="${GITHUB_WORKSPACE}/array-api-compat" cd ${GITHUB_WORKSPACE}/array-api-tests - pytest ${PYTEST_ARGS} --xfails-file ${GITHUB_WORKSPACE}/array-api-compat/numpy-xfails.txt array_api_tests/ + pytest ${PYTEST_ARGS} --xfails-file ${GITHUB_WORKSPACE}/array-api-compat/{{ inputs.package-name }}-xfails.txt array_api_tests/ diff --git a/pytorch-xfails.txt b/torch-xfails.txt similarity index 100% rename from pytorch-xfails.txt rename to torch-xfails.txt From 39cbfd42ee1e54be3caa9760e13a8b3109217130 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Mon, 20 Feb 2023 18:17:42 -0700 Subject: [PATCH 49/90] Fix workflow path --- .github/workflows/array-api-tests-numpy.yml | 4 ++-- .github/workflows/array-api-tests-torch.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/array-api-tests-numpy.yml b/.github/workflows/array-api-tests-numpy.yml index e1ea5fd4..1ba54a84 100644 --- a/.github/workflows/array-api-tests-numpy.yml +++ b/.github/workflows/array-api-tests-numpy.yml @@ -3,7 +3,7 @@ name: Array API Tests (NumPy) on: [push, pull_request] jobs: - call-workflow-passing-data: - uses: ./github/workflows/array-api-tests.yml + array-api-tests-numpy: + uses: ./.github/workflows/array-api-tests.yml with: package-name: numpy diff --git a/.github/workflows/array-api-tests-torch.yml b/.github/workflows/array-api-tests-torch.yml index 34d89ec9..babdbb0b 100644 --- a/.github/workflows/array-api-tests-torch.yml +++ b/.github/workflows/array-api-tests-torch.yml @@ -3,7 +3,7 @@ name: Array API Tests (PyTorch) on: [push, pull_request] jobs: - call-workflow-passing-data: - uses: ./github/workflows/array-api-tests.yml + array-api-tests-torch: + uses: ./.github/workflows/array-api-tests.yml with: package-name: torch From c3d0d8ee12545abee67e39c8a85e3328a9164a09 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Mon, 20 Feb 2023 18:18:56 -0700 Subject: [PATCH 50/90] Fix variable interpolation syntax --- .github/workflows/array-api-tests.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/array-api-tests.yml b/.github/workflows/array-api-tests.yml index dd2ea8d2..7f3f8305 100644 --- a/.github/workflows/array-api-tests.yml +++ b/.github/workflows/array-api-tests.yml @@ -39,15 +39,15 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - python -m pip install {{ inputs.package-name }} + python -m pip install ${{ inputs.package-name }} python -m pip install -r ${GITHUB_WORKSPACE}/array-api-tests/requirements.txt - - name: Run the array API testsuite ({{ inputs.package-name }}) + - name: Run the array API testsuite (${{ inputs.package-name }}) env: - ARRAY_API_TESTS_MODULE: array_api_compat.{{ inputs.package-name }} + ARRAY_API_TESTS_MODULE: array_api_compat.${{ inputs.package-name }} # This enables the NEP 50 type promotion behavior (without it a lot of # tests fail on bad scalar type promotion behavior) NPY_PROMOTION_STATE: weak run: | export PYTHONPATH="${GITHUB_WORKSPACE}/array-api-compat" cd ${GITHUB_WORKSPACE}/array-api-tests - pytest ${PYTEST_ARGS} --xfails-file ${GITHUB_WORKSPACE}/array-api-compat/{{ inputs.package-name }}-xfails.txt array_api_tests/ + pytest ${PYTEST_ARGS} --xfails-file ${GITHUB_WORKSPACE}/array-api-compat/${{ inputs.package-name }}-xfails.txt array_api_tests/ From cf21cea4e5eeddc837372d0c6354bcce4aa8fc67 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Tue, 21 Feb 2023 15:35:10 -0700 Subject: [PATCH 51/90] Allow specifying extra pytest args in the test yamls --- .github/workflows/array-api-tests.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/array-api-tests.yml b/.github/workflows/array-api-tests.yml index 7f3f8305..fd610282 100644 --- a/.github/workflows/array-api-tests.yml +++ b/.github/workflows/array-api-tests.yml @@ -6,9 +6,13 @@ on: package-name: required: true type: string + pytest-extra-args: + required: false + type: string + env: - PYTEST_ARGS: "--max-examples 200 -v -rxXfE --ci" + PYTEST_ARGS: "--max-examples 200 -v -rxXfE --ci ${{ inputs.pytest-extra-args }}" jobs: tests: From a95eeb6b3c096d751aab2dca4a25a8d7a74be1f5 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Tue, 21 Feb 2023 15:35:20 -0700 Subject: [PATCH 52/90] Enable verbose output for the torch tests --- .github/workflows/array-api-tests-torch.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/array-api-tests-torch.yml b/.github/workflows/array-api-tests-torch.yml index babdbb0b..274fade7 100644 --- a/.github/workflows/array-api-tests-torch.yml +++ b/.github/workflows/array-api-tests-torch.yml @@ -7,3 +7,4 @@ jobs: uses: ./.github/workflows/array-api-tests.yml with: package-name: torch + pytest-extra-args: "-s --hypothesis-verbosity=verbose" From 4737dc0cafca6c9c0518441d8a8a5cd213a9de88 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Tue, 21 Feb 2023 16:14:27 -0700 Subject: [PATCH 53/90] Revert "Enable verbose output for the torch tests" This reverts commit a95eeb6b3c096d751aab2dca4a25a8d7a74be1f5. --- .github/workflows/array-api-tests-torch.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/array-api-tests-torch.yml b/.github/workflows/array-api-tests-torch.yml index 274fade7..babdbb0b 100644 --- a/.github/workflows/array-api-tests-torch.yml +++ b/.github/workflows/array-api-tests-torch.yml @@ -7,4 +7,3 @@ jobs: uses: ./.github/workflows/array-api-tests.yml with: package-name: torch - pytest-extra-args: "-s --hypothesis-verbosity=verbose" From ea9c1e22c2fc14bd6db4f1c8a32c57402ecbc94e Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Tue, 21 Feb 2023 16:20:56 -0700 Subject: [PATCH 54/90] Skip the torch test that crashes the CI --- .github/workflows/array-api-tests.yml | 2 +- numpy-skips.txt | 0 torch-skips.txt | 3 +++ 3 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 numpy-skips.txt create mode 100644 torch-skips.txt diff --git a/.github/workflows/array-api-tests.yml b/.github/workflows/array-api-tests.yml index fd610282..a1f9c223 100644 --- a/.github/workflows/array-api-tests.yml +++ b/.github/workflows/array-api-tests.yml @@ -54,4 +54,4 @@ jobs: run: | export PYTHONPATH="${GITHUB_WORKSPACE}/array-api-compat" cd ${GITHUB_WORKSPACE}/array-api-tests - pytest ${PYTEST_ARGS} --xfails-file ${GITHUB_WORKSPACE}/array-api-compat/${{ inputs.package-name }}-xfails.txt array_api_tests/ + pytest ${PYTEST_ARGS} --xfails-file ${GITHUB_WORKSPACE}/array-api-compat/${{ inputs.package-name }}-xfails.txt --skips-file ${GITHUB_WORKSPACE}/array-api-compat/${{ inputs.package-name }}-skips.txt array_api_tests/ diff --git a/numpy-skips.txt b/numpy-skips.txt new file mode 100644 index 00000000..e69de29b diff --git a/torch-skips.txt b/torch-skips.txt new file mode 100644 index 00000000..15e63ea8 --- /dev/null +++ b/torch-skips.txt @@ -0,0 +1,3 @@ +# This test causes a core dump on CI, so we have to skip it entirely (it +# should actually pass) +array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__mod__(x, s)] From 4904411779c1bd6c0b2f2c032edcc83bff97e797 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Tue, 21 Feb 2023 17:31:44 -0700 Subject: [PATCH 55/90] Skip another test that crashes on CI --- torch-skips.txt | 4 ++-- torch-xfails.txt | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/torch-skips.txt b/torch-skips.txt index 15e63ea8..6669190e 100644 --- a/torch-skips.txt +++ b/torch-skips.txt @@ -1,3 +1,3 @@ -# This test causes a core dump on CI, so we have to skip it entirely (it -# should actually pass) +# These tests cause a core dump on CI, so we have to skip them entirely array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__mod__(x, s)] +array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__mod__(x1, x2)] diff --git a/torch-xfails.txt b/torch-xfails.txt index b737d5ec..d5fc68fb 100644 --- a/torch-xfails.txt +++ b/torch-xfails.txt @@ -58,7 +58,8 @@ array_api_tests/test_operators_and_elementwise_functions.py::test_less[__lt__(x1 array_api_tests/test_operators_and_elementwise_functions.py::test_less_equal[__le__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_multiply[__mul__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_pow[__pow__(x1, x2)] -array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__mod__(x1, x2)] +# This test is skipped instead of xfailed because it causes core dumps on CI +# array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__mod__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__imod__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_subtract[__sub__(x1, x2)] From 039af599a5345e95471b5c903b8a3dbdf0ff3070 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Tue, 21 Feb 2023 18:03:59 -0700 Subject: [PATCH 56/90] Disable linalg in the torch CI tests --- .github/workflows/array-api-tests-torch.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/array-api-tests-torch.yml b/.github/workflows/array-api-tests-torch.yml index babdbb0b..e8caeffa 100644 --- a/.github/workflows/array-api-tests-torch.yml +++ b/.github/workflows/array-api-tests-torch.yml @@ -7,3 +7,4 @@ jobs: uses: ./.github/workflows/array-api-tests.yml with: package-name: torch + pytest-extra-args: "--disable-extension linalg" From 367c4b6e6ca829dfb8133dfa89431ad4a8da293e Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Tue, 21 Feb 2023 18:16:25 -0700 Subject: [PATCH 57/90] Add missing torch xfails --- torch-xfails.txt | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/torch-xfails.txt b/torch-xfails.txt index d5fc68fb..38e7cf70 100644 --- a/torch-xfails.txt +++ b/torch-xfails.txt @@ -84,6 +84,7 @@ array_api_tests/test_set_functions.py::test_unique_inverse # (https://github.com/data-apis/array-api-tests/issues/170) array_api_tests/test_signatures.py::test_func_signature[floor_divide] array_api_tests/test_signatures.py::test_func_signature[remainder] +array_api_tests/test_signatures.py::test_array_method_signature[__mod__] # We cannot add attributes to the tensor object array_api_tests/test_signatures.py::test_array_method_signature[__array_namespace__] @@ -95,9 +96,11 @@ array_api_tests/test_signatures.py::test_array_method_signature[to_device] array_api_tests/test_special_cases.py::test_binary[add(isfinite(x1_i) and x1_i != 0 and x2_i == -x1_i) -> +0] array_api_tests/test_special_cases.py::test_binary[__add__(x1_i is +infinity and isfinite(x2_i)) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__add__(isfinite(x1_i) and x2_i is +infinity) -> +infinity] +array_api_tests/test_special_cases.py::test_binary[__add__(isfinite(x1_i) and x2_i is -infinity) -> -infinity] array_api_tests/test_special_cases.py::test_binary[__add__((x1_i is +0 or x1_i == -0) and isfinite(x2_i) and x2_i != 0) -> x2_i] array_api_tests/test_special_cases.py::test_binary[__add__(isfinite(x1_i) and x1_i != 0 and (x2_i is +0 or x2_i == -0)) -> x1_i] array_api_tests/test_special_cases.py::test_binary[__add__(isfinite(x1_i) and x1_i != 0 and x2_i == -x1_i) -> +0] +array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i is +0 and x2_i > 0) -> +0] array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i is -0 and x2_i > 0) -> -0] array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i is +0 and x2_i < 0) -> -0] array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i is -0 and x2_i < 0) -> +0] @@ -107,9 +110,12 @@ array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i < 0 and x2_i array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i < 0 and x2_i is -0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i is +infinity and isfinite(x2_i) and x2_i < 0) -> -infinity] array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity] +array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i is -infinity and isfinite(x2_i) and x2_i > 0) -> -infinity] +array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i is +infinity and isfinite(x2_i) and x2_i > 0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__truediv__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0] array_api_tests/test_special_cases.py::test_binary[__truediv__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0] array_api_tests/test_special_cases.py::test_binary[__truediv__(isfinite(x1_i) and x1_i < 0 and x2_i is -infinity) -> +0] +array_api_tests/test_special_cases.py::test_binary[__truediv__(isfinite(x1_i) and x1_i > 0 and x2_i is +infinity) -> +0] array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is +infinity and isfinite(x2_i) and x2_i > 0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is +infinity and isfinite(x2_i) and x2_i < 0) -> -infinity] array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is -infinity and isfinite(x2_i) and x2_i > 0) -> -infinity] @@ -140,6 +146,9 @@ array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is +infinity and array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is +infinity and x2_i < 0) -> +0] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -infinity and x2_i > 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -infinity] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -infinity and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +infinity] +array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -infinity and x2_i < 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -0] +array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -infinity and x2_i < 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +0] +array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is +0 and x2_i > 0) -> +0] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is +0 and x2_i < 0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -0 and x2_i > 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -0] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -0 and x2_i < 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -infinity] From f3ee38cdb1b3be44758ceb500f888ed1dc20e25a Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Wed, 22 Feb 2023 13:08:26 -0700 Subject: [PATCH 58/90] Do a verbose CI run for the pytorch array API tests --- .github/workflows/array-api-tests-torch.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/array-api-tests-torch.yml b/.github/workflows/array-api-tests-torch.yml index e8caeffa..e4320da1 100644 --- a/.github/workflows/array-api-tests-torch.yml +++ b/.github/workflows/array-api-tests-torch.yml @@ -7,4 +7,4 @@ jobs: uses: ./.github/workflows/array-api-tests.yml with: package-name: torch - pytest-extra-args: "--disable-extension linalg" + pytest-extra-args: "--disable-extension linalg -s --hypothesis-verbose=verbose" From c631fa3e77a299b5e097345d2aa233b081a47197 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Wed, 22 Feb 2023 13:08:46 -0700 Subject: [PATCH 59/90] Revert "Do a verbose CI run for the pytorch array API tests" This reverts commit f3ee38cdb1b3be44758ceb500f888ed1dc20e25a. --- .github/workflows/array-api-tests-torch.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/array-api-tests-torch.yml b/.github/workflows/array-api-tests-torch.yml index e4320da1..e8caeffa 100644 --- a/.github/workflows/array-api-tests-torch.yml +++ b/.github/workflows/array-api-tests-torch.yml @@ -7,4 +7,4 @@ jobs: uses: ./.github/workflows/array-api-tests.yml with: package-name: torch - pytest-extra-args: "--disable-extension linalg -s --hypothesis-verbose=verbose" + pytest-extra-args: "--disable-extension linalg" From 33dacf9a50981e3c5caf7f8c6631630d075a2d29 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Wed, 22 Feb 2023 13:12:43 -0700 Subject: [PATCH 60/90] Add some missing torch xfails --- torch-xfails.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/torch-xfails.txt b/torch-xfails.txt index 38e7cf70..ba3b3445 100644 --- a/torch-xfails.txt +++ b/torch-xfails.txt @@ -144,6 +144,7 @@ array_api_tests/test_special_cases.py::test_binary[__pow__(abs(x1_i) < 1 and x2_ array_api_tests/test_special_cases.py::test_binary[__pow__(abs(x1_i) < 1 and x2_i is -infinity) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is +infinity and x2_i > 0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is +infinity and x2_i < 0) -> +0] +array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is NaN and not x2_i == 0) -> NaN] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -infinity and x2_i > 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -infinity] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -infinity and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -infinity and x2_i < 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -0] @@ -163,6 +164,7 @@ array_api_tests/test_special_cases.py::test_binary[__mod__(x1_i is -0 and x2_i < array_api_tests/test_special_cases.py::test_binary[__mod__(isfinite(x1_i) and x1_i > 0 and x2_i is +infinity) -> x1_i] array_api_tests/test_special_cases.py::test_binary[__mod__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> x2_i] array_api_tests/test_special_cases.py::test_binary[__mod__(isfinite(x1_i) and x1_i < 0 and x2_i is -infinity) -> x1_i] +array_api_tests/test_special_cases.py::test_binary[__mod__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> x2_i] array_api_tests/test_special_cases.py::test_iop[__iadd__(isfinite(x1_i) and x1_i != 0 and x2_i == -x1_i) -> +0] array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i > 0) -> +infinity] array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i < 0) -> -infinity] From 82b8defd49722d43209357bfaa70c64519a05109 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Wed, 22 Feb 2023 13:56:24 -0700 Subject: [PATCH 61/90] Do a verbose output run of the torch array API tests (with the correct flag this time) --- .github/workflows/array-api-tests-torch.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/array-api-tests-torch.yml b/.github/workflows/array-api-tests-torch.yml index e8caeffa..aee8bed4 100644 --- a/.github/workflows/array-api-tests-torch.yml +++ b/.github/workflows/array-api-tests-torch.yml @@ -7,4 +7,4 @@ jobs: uses: ./.github/workflows/array-api-tests.yml with: package-name: torch - pytest-extra-args: "--disable-extension linalg" + pytest-extra-args: "--disable-extension linalg -s --hypothesis-verbosity=verbose" From b014c1b460c8dd22693704ef4d18638e66abdf7a Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Wed, 22 Feb 2023 13:56:48 -0700 Subject: [PATCH 62/90] Revert "Do a verbose output run of the torch array API tests (with the correct flag this time)" This reverts commit 82b8defd49722d43209357bfaa70c64519a05109. --- .github/workflows/array-api-tests-torch.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/array-api-tests-torch.yml b/.github/workflows/array-api-tests-torch.yml index aee8bed4..e8caeffa 100644 --- a/.github/workflows/array-api-tests-torch.yml +++ b/.github/workflows/array-api-tests-torch.yml @@ -7,4 +7,4 @@ jobs: uses: ./.github/workflows/array-api-tests.yml with: package-name: torch - pytest-extra-args: "--disable-extension linalg -s --hypothesis-verbosity=verbose" + pytest-extra-args: "--disable-extension linalg" From 124d6c331102b538c0384b7f2f52b882fdab71cd Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Wed, 22 Feb 2023 14:42:17 -0700 Subject: [PATCH 63/90] Add a missing torch xfail --- torch-xfails.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/torch-xfails.txt b/torch-xfails.txt index ba3b3445..f3ac899a 100644 --- a/torch-xfails.txt +++ b/torch-xfails.txt @@ -154,6 +154,7 @@ array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is +0 and x2_i < array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -0 and x2_i > 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -0] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -0 and x2_i < 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -infinity] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -0 and x2_i < 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +infinity] +array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -0 and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +0] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i < 0 and isfinite(x1_i) and isfinite(x2_i) and not x2_i.is_integer()) -> NaN] array_api_tests/test_special_cases.py::test_binary[remainder(x1_i is -0 and x2_i > 0) -> +0] array_api_tests/test_special_cases.py::test_binary[remainder(x1_i is +0 and x2_i < 0) -> -0] From 847a9e5b12567b4607d007381ff21a4dffba8087 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Wed, 22 Feb 2023 15:36:17 -0700 Subject: [PATCH 64/90] Add a missing torch xfail --- torch-xfails.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/torch-xfails.txt b/torch-xfails.txt index f3ac899a..9e0b536c 100644 --- a/torch-xfails.txt +++ b/torch-xfails.txt @@ -95,6 +95,7 @@ array_api_tests/test_signatures.py::test_array_method_signature[to_device] # tensor methods which we couldn't fix anyway). array_api_tests/test_special_cases.py::test_binary[add(isfinite(x1_i) and x1_i != 0 and x2_i == -x1_i) -> +0] array_api_tests/test_special_cases.py::test_binary[__add__(x1_i is +infinity and isfinite(x2_i)) -> +infinity] +array_api_tests/test_special_cases.py::test_binary[__add__(x1_i is -infinity and isfinite(x2_i)) -> -infinity] array_api_tests/test_special_cases.py::test_binary[__add__(isfinite(x1_i) and x2_i is +infinity) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__add__(isfinite(x1_i) and x2_i is -infinity) -> -infinity] array_api_tests/test_special_cases.py::test_binary[__add__((x1_i is +0 or x1_i == -0) and isfinite(x2_i) and x2_i != 0) -> x2_i] From 0857b86c278418b65be355962cd8412c0d521e81 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Wed, 22 Feb 2023 17:15:45 -0700 Subject: [PATCH 65/90] Skip test_floor_divide, which core dumps on CI --- torch-skips.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/torch-skips.txt b/torch-skips.txt index 6669190e..ecd36b6f 100644 --- a/torch-skips.txt +++ b/torch-skips.txt @@ -1,3 +1,4 @@ # These tests cause a core dump on CI, so we have to skip them entirely array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__mod__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__mod__(x1, x2)] +array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[floor_divide(x1, x2)] From 6354cd963cb6753edb2d1cbba6076c9209f16fae Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Wed, 22 Feb 2023 18:20:57 -0700 Subject: [PATCH 66/90] Add a missing torch xfail --- torch-xfails.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/torch-xfails.txt b/torch-xfails.txt index 9e0b536c..576df33c 100644 --- a/torch-xfails.txt +++ b/torch-xfails.txt @@ -51,6 +51,7 @@ array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_right_ array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_xor[__xor__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_divide[__truediv__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_equal[__eq__(x1, x2)] +array_api_tests/test_operators_and_elementwise_functions.py::test_not_equal[__ne__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[__floordiv__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_greater[__gt__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_greater_equal[__ge__(x1, x2)] From 0565dee5fb3a20639f46a27440e09e94741e751d Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Thu, 23 Feb 2023 14:30:21 -0700 Subject: [PATCH 67/90] Update the README --- README.md | 219 +++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 150 insertions(+), 69 deletions(-) diff --git a/README.md b/README.md index 6814d992..0905d8dd 100644 --- a/README.md +++ b/README.md @@ -1,94 +1,114 @@ # Array API compatibility library -This is a small wrapper around NumPy and CuPy that is compatible with the -[Array API standard](https://data-apis.org/array-api/latest/). See also [NEP -47](https://numpy.org/neps/nep-0047-array-api-standard.html). - -Unlike `numpy.array_api`, this is not a strict minimal implementation of the -Array API, but rather just an extension of the main NumPy and CuPy namespaces -with changes needed to be compliant with the Array API. - -Library authors using the Array API may wish to test against `numpy.array_api` -to ensure they are not using functionality outside of the standard, but prefer -this implementation for the default when working with NumPy or CuPy arrays. - -See https://numpy.org/doc/stable/reference/array_api.html for a full list of -changes. In particular, unlike `numpy.array_api`, this package does not use a -separate Array object, but rather just uses `numpy.ndarray` directly. +This is a small wrapper around common array libraries that is compatible with +the [Array API standard](https://data-apis.org/array-api/latest/). Currently, +NumPy, CuPy, and PyTorch are supported. If you want support for other array +libraries, or if you encounter any issues, please [open an +issue](https://github.com/data-apis/array-api-compat/issues). Note that some of the functionality in this library is backwards incompatible -with NumPy. +with the corresponding wrapped libraries. The end-goal is to eventually make +each array library itself fully compatible with the array API, but this +requires making backwards incompatible changes in many cases, so this will +take some time. -This library also supports CuPy in addition to NumPy. If you want support for -other array libraries, please [open an -issue](https://github.com/data-apis/array-api-compat/issues). - -Library authors using the Array API may wish to test against `numpy.array_api` -to ensure they are not using functionality outside of the standard, but prefer -this implementation for end users who use NumPy arrays. +Currently all libraries here are implemented against the 2021.12 version of +the standard. Support for the [2022.12 +version](https://data-apis.org/array-api/2022.12/changelog.html), which adds +complex number support as well as several additional functions, will be added +later this year. ## Usage -To use this library replace +The typical usage of this library will be to get the corresponding array API +compliant namespace from the input arrays using `get_namespace()`, like ```py -import numpy as np +def your_function(x, y): + xp = array_api_compat.get_namespace(x, y) + # Now use xp as the array library namespace + return xp.mean(x, axis=0) + 2*xp.std(y, axis=0) ``` -with +If you wish to have library-specific code-paths, you can import the +corresponding wrapped namespace for each library, like ```py import array_api_compat.numpy as np -``` -and replace - -```py -import cupy as cp -``` - -with - -```py import array_api_compat.cupy as cp -``` -Each will include all the functions from the normal NumPy/CuPy namespace, -except that functions that are part of the array API are wrapped so that they -have the correct array API behavior. In each case, the array object used will -be the same array object from the wrapped library. +import array_api_compat.torch as torch +``` +Each will include all the functions from the normal NumPy/CuPy/Torch +namespace, except that functions that are part of the array API are wrapped so +that they have the correct array API behavior. In each case, the array object +used will be the same array object from the wrapped library. + +## Difference between `array_api_compat` and `numpy.array_api` + +`numpy.array_api` is a strict minimal implementation of the Array API (see +[NEP 47](https://numpy.org/neps/nep-0047-array-api-standard.html)). For +example, `numpy.array_api` does not include any functions that are not part of +the array API specification, and will explicitly disallow behaviors that are +not required by the spec (e.g., [cross-kind type +promotions](https://data-apis.org/array-api/latest/API_specification/type_promotion.html)). +(`cupy.array_api` is similar to `numpy.array_api`) + +`array_api_compat`, on the other hand, is just an extension of the +corresponding array library namespaces with changes needed to be compliant +with the array API. It includes all additional library functions not mentioned +in the spec, and allows any library behaviors not explicitly disallowed by it, +such as cross-kind casting. + +In particular, unlike `numpy.array_api`, this package does not use a separate +`Array` object, but rather just uses the corresponding array library array +objects (`numpy.ndarray`, `cupy.ndarray`, `torch.Tensor`, etc.) directly. This +is because those are the objects that are going to be passed as inputs to +functions by end users. This does mean that a few behaviors cannot be wrapped +(see below), but most of the array API functional, so this does not affect +most things. + +Array consuming library authors coding against the array API may wish to test +against `numpy.array_api` to ensure they are not using functionality outside +of the standard, but prefer this implementation for the default behavior for +end-users. ## Helper Functions -In addition to the default NumPy/CuPy namespace and functions in the array API -specification, there are several helper functions -included that aren't part of the specification but which are useful for using -the array API: +In addition to the wrapped library namespaces and functions in the array API +specification, there are several helper functions included here that aren't +part of the specification but which are useful for using the array API: - `is_array_api_obj(x)`: Return `True` if `x` is an array API compatible array object. - `get_namespace(*xs)`: Get the corresponding array API namespace for the - arrays `xs`. If the arrays are NumPy or CuPy arrays, the returned namespace - will be `array_api_compat.numpy` or `array_api_compat.cupy` so that it is - array API compatible. + arrays `xs`. For example, if the arrays are NumPy arrays, the returned + namespace will be `array_api_compat.numpy`. Note that this function will + also work for namespaces that aren't supported by this compat library but + which do support the array API (i.e., arrays that have the + `__array_namespace__` attribute). - `device(x)`: Equivalent to [`x.device`](https://data-apis.org/array-api/latest/API_specification/generated/signatures.array_object.array.device.html) in the array API specification. Included because `numpy.ndarray` does not include the `device` attribute and this library does not wrap or extend the - array object. Note that for NumPy, `device` is always `"cpu"`. + array object. Note that for NumPy, `device(x)` is always `"cpu"`. - `to_device(x, device, /, *, stream=None)`: Equivalent to [`x.to_device`](https://data-apis.org/array-api/latest/API_specification/generated/signatures.array_object.array.to_device.html). - Included because neither NumPy's nor CuPy's ndarray objects include this - method. For NumPy, this function effectively does nothing since the only - supported device is the CPU, but for CuPy, this method supports CuPy CUDA + Included because neither NumPy's, CuPy's, nor PyTorch's array objects + include this method. For NumPy, this function effectively does nothing since + the only supported device is the CPU, but for CuPy, this method supports + CuPy CUDA [Device](https://docs.cupy.dev/en/stable/reference/generated/cupy.cuda.Device.html) and [Stream](https://docs.cupy.dev/en/stable/reference/generated/cupy.cuda.Stream.html) - objects. + objects. For PyTorch, this is the same as + [`x.to(device)`](https://pytorch.org/docs/stable/generated/torch.Tensor.to.html) + (the `stream` argument is not supported in PyTorch). - `size(x)`: Equivalent to [`x.size`](https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.size.html#array_api.array.size), @@ -102,6 +122,8 @@ the array API: There are some known differences between this library and the array API specification: +### NumPy and CuPy + - The array methods `__array_namespace__`, `device` (for NumPy), `to_device`, and `mT` are not defined. This reuses `np.ndarray` and `cp.ndarray` and we don't want to monkeypatch or wrap it. The helper functions `device()` and @@ -109,16 +131,62 @@ specification: `x.mT` can be replaced with `xp.linalg.matrix_transpose(x)`. `get_namespace(x)` should be used instead of `x.__array_namespace__`. -- NumPy value-based casting for scalars will be in effect unless explicitly - disabled with the environment variable NPY_PROMOTION_STATE=weak or - np._set_promotion_state('weak') (requires NumPy 1.24 or newer, see NEP 50 - and https://github.com/numpy/numpy/issues/22341) +- Value-based casting for scalars will be in effect unless explicitly disabled + with the environment variable `NPY_PROMOTION_STATE=weak` or + `np._set_promotion_state('weak')` (requires NumPy 1.24 or newer, see [NEP + 50](https://numpy.org/neps/nep-0050-scalar-promotion.html) and + https://github.com/numpy/numpy/issues/22341) - Functions which are not wrapped may not have the same type annotations as the spec. - Functions which are not wrapped may not use positional-only arguments. +### PyTorch + +- Like NumPy/CuPy, we do not wrap the `torch.Tensor` object. It is missing the + `__array_namespace__` and `to_device` methods, so the corresponding helper + functions `get_namespace()` and `to_device()` in this library should be + used instead (see above). + +- The `x.size` attribute on `torch.Tensor` is a function that behaves + differently from + [`x.size`](https://data-apis.org/array-api/draft/API_specification/generated/array_api.array.size.html) + in the spec. Use the `size(x)` helper function as a portable workaround (see + above). + +- The `linalg` extension is not yet implemented. + +- PyTorch does not have unsigned integer types other than `uint8`, and no + attempt is made to implement them here. + +- PyTorch has type promotion semantics that differ from the array API + specification for 0-D tensor objects. The array functions in this wrapper + library do work around this, but the operators on the Tensor object do not, + as no operators or methods on the Tensor object are modified. If this is a + concern, use the functional form instead of the operator form, e.g., `add(x, + y)` instead of `x + y`. + +- [`unique_all()`](https://data-apis.org/array-api/late + st/API_specification/generated/array_api.unique_all.html#array_api.unique_all) + is not implemented, due to the fact that `torch.unique` does not support + returning the `indices` array. The other + [`unique_*`](https://data-apis.org/array-api/latest/API_specification/set_functions.html) + functions are implemented. + +- Slices do not support negative steps. + +- [`std()`](https://data-apis.org/array-api/latest/API_specification/generated/array_api.std.html#array_api.std) + and + [`var()`](https://data-apis.org/array-api/latest/API_specification/generated/array_api.var.html#array_api.var) + do not support floating-point `correction`. + +- The `stream` argument of the `to_device()` helper (see above) is not + supported. + +- As with NumPy, type annotations and positional-only arguments may not + exactly match the spec for functions that are not wrapped at all. + ## Vendoring This library supports vendoring as an installation method. To vendor the @@ -140,11 +208,15 @@ As noted before, the goal of this library is to reuse the NumPy and CuPy array objects, rather than wrapping or extending them. This means that the functions need to accept and return `np.ndarray` for NumPy and `cp.ndarray` for CuPy. -Each namespace (`array_api_compat.numpy` and `array_api_compat.cupy`) is -populated with the normal library namespace (like `from numpy import *`). Then -specific functions are replaced with wrapped variants. Wrapped functions that -have the same logic between NumPy and CuPy (which is most functions) are in -`array_api_compat/common/`. These functions are defined like +Each namespace (`array_api_compat.numpy`, `array_api_compat.cupy`, and +`array_api_compat.torch`) is populated with the normal library namespace (like +`from numpy import *`). Then specific functions are replaced with wrapped +variants. + +Since NumPy and CuPy are nearly identical in behavior, most wrapping logic can +be shared between them. Wrapped functions that have the same logic between +NumPy and CuPy are in `array_api_compat/common/`. +These functions are defined like ```py # In array_api_compat/common/_aliases.py @@ -154,10 +226,10 @@ def acos(x, /, xp): ``` The `xp` argument refers to the original array namespace (either `numpy` or -`cupy`). Then in the specific `array_api_compat/numpy` and -`array_api_compat/cupy` namespace, the `get_xp` decorator is applied to these -functions, which automatically removes the `xp` argument from the function -signature and replaces it with the corresponding array library, like +`cupy`). Then in the specific `array_api_compat/numpy/` and +`array_api_compat/cupy/` namespaces, the `@get_xp` decorator is applied to +these functions, which automatically removes the `xp` argument from the +function signature and replaces it with the corresponding array library, like ```py # In array_api_compat/numpy/_aliases.py @@ -184,6 +256,15 @@ acos = get_xp(cp)(_aliases.acos) ``` Since NumPy and CuPy are nearly identical in their behaviors, this allows -writing the wrapping logic for both libraries only once. If support is added -for other libraries which differ significantly from NumPy, their wrapper code -should go in their specific sub-namespace instead of `common/`. +writing the wrapping logic for both libraries only once. + +PyTorch uses a similar layout in `array_api_compat/torch/`, but it differs +enough from NumPy/CuPy that very few common wrappers for those libraries are +reused. + +See https://numpy.org/doc/stable/reference/array_api.html for a full list of +changes from the base NumPy (the differences for CuPy are nearly identical). A +corresponding document does not yet exist for PyTorch, but you can examine the +various comments in the +[implementation](https://github.com/data-apis/array-api-compat/blob/main/array_api_compat/torch/_aliases.py) +to see what functions and behaviors have been wrapped. From e9b447c52ced97f4f9d203c29056af3b8167d8cc Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Thu, 23 Feb 2023 14:32:27 -0700 Subject: [PATCH 68/90] Fix some formatting in the README --- README.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 0905d8dd..31dbdd5f 100644 --- a/README.md +++ b/README.md @@ -35,9 +35,13 @@ corresponding wrapped namespace for each library, like ```py import array_api_compat.numpy as np +``` +```py import array_api_compat.cupy as cp +``` +```py import array_api_compat.torch as torch ``` @@ -167,8 +171,7 @@ specification: concern, use the functional form instead of the operator form, e.g., `add(x, y)` instead of `x + y`. -- [`unique_all()`](https://data-apis.org/array-api/late - st/API_specification/generated/array_api.unique_all.html#array_api.unique_all) +- [`unique_all()`](https://data-apis.org/array-api/latest/API_specification/generated/array_api.unique_all.html#array_api.unique_all) is not implemented, due to the fact that `torch.unique` does not support returning the `indices` array. The other [`unique_*`](https://data-apis.org/array-api/latest/API_specification/set_functions.html) @@ -202,7 +205,7 @@ references the name "array_api_compat"). Alternatively, the library may be installed as dependency on PyPI. -## Implementation +## Implementation Notes As noted before, the goal of this library is to reuse the NumPy and CuPy array objects, rather than wrapping or extending them. This means that the functions From bb4d3af335eedbc922513224c08ca8273cd41cba Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Thu, 23 Feb 2023 14:33:16 -0700 Subject: [PATCH 69/90] Typo fix --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 31dbdd5f..90eee1d2 100644 --- a/README.md +++ b/README.md @@ -45,7 +45,7 @@ import array_api_compat.cupy as cp import array_api_compat.torch as torch ``` -Each will include all the functions from the normal NumPy/CuPy/Torch +Each will include all the functions from the normal NumPy/CuPy/PyTorch namespace, except that functions that are part of the array API are wrapped so that they have the correct array API behavior. In each case, the array object used will be the same array object from the wrapped library. From 5545635faa2a1b05e2f2eb9c60d4c3f214b0fd89 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Thu, 23 Feb 2023 14:50:42 -0700 Subject: [PATCH 70/90] Update torch reduction functions that don't support multiple axes Instead of applying them multiple times, we move the dimensions to the end and flatten, and apply them once. --- array_api_compat/torch/_aliases.py | 52 ++++++++++++++---------------- 1 file changed, 24 insertions(+), 28 deletions(-) diff --git a/array_api_compat/torch/_aliases.py b/array_api_compat/torch/_aliases.py index 505146c7..ecd0ba10 100644 --- a/array_api_compat/torch/_aliases.py +++ b/array_api_compat/torch/_aliases.py @@ -201,6 +201,21 @@ def _axis_none_keepdims(x, ndim, keepdims): x = torch.unsqueeze(x, 0) return x +def _reduce_multiple_axes(f, x, axis, keepdims=False, **kwargs): + # Some reductions don't support multiple axes + # (https://github.com/pytorch/pytorch/issues/56586). + axes = _normalize_axes(axis, x.ndim) + for a in reversed(axes): + x = torch.movedim(x, a, -1) + x = torch.flatten(x, -len(axes)) + + out = f(x, -1, **kwargs) + + if keepdims: + for a in axes: + out = torch.unsqueeze(out, a) + return out + def prod(x: array, /, *, @@ -226,14 +241,7 @@ def prod(x: array, # torch.prod doesn't support multiple axes # (https://github.com/pytorch/pytorch/issues/56586). if isinstance(axis, tuple): - axes = _normalize_axes(axis, x.ndim) - for i, a in enumerate(axes): - if keepdims: - x = torch.prod(x, a, dtype=dtype, **kwargs) - x = torch.unsqueeze(x, a) - else: - x = torch.prod(x, a - i, dtype=dtype, **kwargs) - return x + return _reduce_multiple_axes(torch.prod, x, axis, keepdims=keepdims, dtype=dtype, **kwargs) if axis is None: # torch doesn't support keepdims with axis=None # (https://github.com/pytorch/pytorch/issues/71209) @@ -281,21 +289,15 @@ def any(x: array, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False, **kwargs) -> array: - # torch.any doesn't support multiple axes - # (https://github.com/pytorch/pytorch/issues/56586). x = torch.asarray(x) ndim = x.ndim if axis == (): return x.to(torch.bool) + # torch.any doesn't support multiple axes + # (https://github.com/pytorch/pytorch/issues/56586). if isinstance(axis, tuple): - axes = _normalize_axes(axis, x.ndim) - for i, a in enumerate(axes): - if keepdims: - x = torch.any(x, a, **kwargs) - x = torch.unsqueeze(x, a) - else: - x = torch.any(x, a - i, **kwargs) - return x.to(torch.bool) + res = _reduce_multiple_axes(torch.any, x, axis, keepdims=keepdims, **kwargs) + return res.to(torch.bool) if axis is None: # torch doesn't support keepdims with axis=None # (https://github.com/pytorch/pytorch/issues/71209) @@ -312,21 +314,15 @@ def all(x: array, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False, **kwargs) -> array: - # torch.all doesn't support multiple axes - # (https://github.com/pytorch/pytorch/issues/56586). x = torch.asarray(x) ndim = x.ndim if axis == (): return x.to(torch.bool) + # torch.all doesn't support multiple axes + # (https://github.com/pytorch/pytorch/issues/56586). if isinstance(axis, tuple): - axes = _normalize_axes(axis, ndim) - for i, a in enumerate(axes): - if keepdims: - x = torch.all(x, a, **kwargs) - x = torch.unsqueeze(x, a) - else: - x = torch.all(x, a - i, **kwargs) - return x.to(torch.bool) + res = _reduce_multiple_axes(torch.all, x, axis, keepdims=keepdims, **kwargs) + return res.to(torch.bool) if axis is None: # torch doesn't support keepdims with axis=None # (https://github.com/pytorch/pytorch/issues/71209) From a78f733adc3995ca07dddbf77628e584676fe2dc Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Thu, 23 Feb 2023 14:52:13 -0700 Subject: [PATCH 71/90] Add a test skip that crashes on CI --- torch-skips.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/torch-skips.txt b/torch-skips.txt index ecd36b6f..e6d64604 100644 --- a/torch-skips.txt +++ b/torch-skips.txt @@ -1,4 +1,5 @@ # These tests cause a core dump on CI, so we have to skip them entirely array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__mod__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__mod__(x1, x2)] +array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__imod__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[floor_divide(x1, x2)] From 0cefa7b007010275cab90077ab57946823c57d4c Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Thu, 23 Feb 2023 15:01:55 -0700 Subject: [PATCH 72/90] Add a CHANGELOG for the upcoming 1.1 release --- CHANGELOG.md | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 CHANGELOG.md diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000..9585f676 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,22 @@ +# 1.1 (2023-02-23) + +## Major Changes + +- Added support for PyTorch. + +- Add helper function `size()` (required if torch is used as + `torch.Tensor.size` is a method that is incompatible with the array API + [`.size`](https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.size.html#array_api.array.size)). + +- All wrapper functions that wrap existing library functions now pass through + arbitrary `**kwargs`. + +## Minor Changes + +- Added CI to run against the [array API testsuite](https://github.com/data-apis/array-api-tests). + +# 1.0 (2022-12-05) + +## Major Changes + +- Initial release. Includes support for NumPy and CuPy. From 1aceff51a270677a0ce4127701f4c92e73100a3f Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Thu, 23 Feb 2023 15:03:28 -0700 Subject: [PATCH 73/90] Add more skips for tests that crash on CI --- torch-skips.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/torch-skips.txt b/torch-skips.txt index e6d64604..493205a6 100644 --- a/torch-skips.txt +++ b/torch-skips.txt @@ -1,5 +1,7 @@ # These tests cause a core dump on CI, so we have to skip them entirely +array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[remainder(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__mod__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__mod__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__imod__(x1, x2)] +array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__imod__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[floor_divide(x1, x2)] From b016d4c90b8b45fac6c68fb3147ad202ea3a8bf4 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Thu, 23 Feb 2023 15:53:09 -0700 Subject: [PATCH 74/90] Skip a torch test that crashes CI --- torch-skips.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/torch-skips.txt b/torch-skips.txt index 493205a6..eac3f5ee 100644 --- a/torch-skips.txt +++ b/torch-skips.txt @@ -5,3 +5,4 @@ array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__mo array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__imod__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__imod__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[floor_divide(x1, x2)] +array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[__floordiv__(x1, x2)] From 1cd43f20472bcb4e2f199306b9916d366c5607f3 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Thu, 23 Feb 2023 16:27:29 -0700 Subject: [PATCH 75/90] Add a torch xfail --- torch-xfails.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/torch-xfails.txt b/torch-xfails.txt index 576df33c..f67ca189 100644 --- a/torch-xfails.txt +++ b/torch-xfails.txt @@ -41,6 +41,9 @@ array_api_tests/test_has_names.py::test_has_names[array_method-to_device] # tensordot doesn't allow integer dtypes in some corner cases array_api_tests/test_linalg.py::test_tensordot +# A numerical difference in stacking (will be fixed by +# https://github.com/data-apis/array-api-tests/pull/101) +array_api_tests/test_linalg.py::test_matmul # We cannot wrap the tensor object array_api_tests/test_operators_and_elementwise_functions.py::test_add[__add__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_and[__and__(x1, x2)] From c8a5a70e4f68c5d5cb0491498567c74151c35d90 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Thu, 23 Feb 2023 16:52:28 -0700 Subject: [PATCH 76/90] Add a script to manually run the cupy tests --- test_cupy.sh | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100755 test_cupy.sh diff --git a/test_cupy.sh b/test_cupy.sh new file mode 100755 index 00000000..89a1da8c --- /dev/null +++ b/test_cupy.sh @@ -0,0 +1,26 @@ +# We cannot test cupy on CI so this script will test it manually. Assumes it +# is being run in an environment that has cupy and the array-api-tests +# dependencies installed +set -x +set -e + +tmpdir=$(mktemp -d) +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +export PYTHONPATH=$SCRIPT_DIR + +PYTEST_ARGS="--max-examples 200 -v -rxXfE --ci" + +cd $tmpdir +git clone https://github.com/data-apis/array-api-tests +cd array-api-tests + +# Remove this once https://github.com/data-apis/array-api-tests/pull/157 is +# merged +git remote add asmeurer https://github.com/asmeurer/array-api-tests +git fetch asmeurer +git checkout asmeurer/xfails-file + +git submodule update --init + +export ARRAY_API_TESTS_MODULE=array_api_compat.cupy +pytest ${PYTEST_ARGS} --xfails-file $SCRIPT_DIR/numpy-xfails.txt From 7261dae61bf18aaab835d3dbf49016a17e174e13 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Thu, 23 Feb 2023 16:53:57 -0700 Subject: [PATCH 77/90] Add a torch skip --- torch-skips.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/torch-skips.txt b/torch-skips.txt index eac3f5ee..54f311a3 100644 --- a/torch-skips.txt +++ b/torch-skips.txt @@ -6,3 +6,4 @@ array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__im array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__imod__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[floor_divide(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[__floordiv__(x1, x2)] +array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[__ifloordiv__(x1, x2)] From 0368c8fb3e05bbb95648768ab6c1ec8339124403 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Fri, 24 Feb 2023 15:09:12 -0700 Subject: [PATCH 78/90] Use cupy specific skips and xfails --- cupy-skips.txt | 2 + cupy-xfails.txt | 151 ++++++++++++++++++++++++++++++++++++++++++++++++ test_cupy.sh | 2 +- 3 files changed, 154 insertions(+), 1 deletion(-) create mode 100644 cupy-skips.txt create mode 100644 cupy-xfails.txt diff --git a/cupy-skips.txt b/cupy-skips.txt new file mode 100644 index 00000000..fed737c3 --- /dev/null +++ b/cupy-skips.txt @@ -0,0 +1,2 @@ +# Hangs +array_api_tests/test_linalg.py::test_qr diff --git a/cupy-xfails.txt b/cupy-xfails.txt new file mode 100644 index 00000000..a34bdb1e --- /dev/null +++ b/cupy-xfails.txt @@ -0,0 +1,151 @@ +# cupy doesn't have __index__ (and we cannot wrap the ndarray object) +array_api_tests/test_array_object.py::test_scalar_casting[__index__(uint8)] +array_api_tests/test_array_object.py::test_scalar_casting[__index__(uint16)] +array_api_tests/test_array_object.py::test_scalar_casting[__index__(uint32)] +array_api_tests/test_array_object.py::test_scalar_casting[__index__(uint64)] +array_api_tests/test_array_object.py::test_scalar_casting[__index__(int8)] +array_api_tests/test_array_object.py::test_scalar_casting[__index__(int16)] +array_api_tests/test_array_object.py::test_scalar_casting[__index__(int32)] +array_api_tests/test_array_object.py::test_scalar_casting[__index__(int64)] +# copy=False is not yet implemented +array_api_tests/test_creation_functions.py::test_asarray_arrays +# finfo test is testing that the result is a float instead of float32 (see +# also https://github.com/data-apis/array-api/issues/405) +array_api_tests/test_data_type_functions.py::test_finfo[float32] + +# Some array attributes are missing, and we do not wrap the array object +array_api_tests/test_has_names.py::test_has_names[array_method-__array_namespace__] +array_api_tests/test_has_names.py::test_has_names[array_method-__index__] +array_api_tests/test_has_names.py::test_has_names[array_method-to_device] +array_api_tests/test_has_names.py::test_has_names[array_attribute-mT] + +# Some linalg tests depend on .mT instead of matrix_transpose() +# and some require https://github.com/data-apis/array-api-tests/pull/101 to +array_api_tests/test_linalg.py::test_eigvalsh +array_api_tests/test_linalg.py::test_matrix_norm +array_api_tests/test_linalg.py::test_solve +array_api_tests/test_linalg.py::test_svd +array_api_tests/test_linalg.py::test_svdvals +# cupy uses 2023.12 trace() behavior https://github.com/data-apis/array-api/pull/502 +array_api_tests/test_linalg.py::test_trace +# We cannot modify array methods +array_api_tests/test_operators_and_elementwise_functions.py::test_divide[__truediv__(x, s)] +array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__mod__(x1, x2)] +array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__imod__(x1, x2)] +# +# array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[remainder(x1, x2)] +array_api_tests/test_searching_functions.py::test_argmax +array_api_tests/test_signatures.py::test_func_signature[meshgrid] +array_api_tests/test_signatures.py::test_array_method_signature[__array_namespace__] +array_api_tests/test_signatures.py::test_array_method_signature[__index__] +array_api_tests/test_signatures.py::test_array_method_signature[to_device] +array_api_tests/test_sorting_functions.py::test_argsort +array_api_tests/test_sorting_functions.py::test_sort +array_api_tests/test_special_cases.py::test_unary[abs(x_i is -0) -> +0] +array_api_tests/test_special_cases.py::test_unary[__abs__(x_i is -0) -> +0] +array_api_tests/test_special_cases.py::test_unary[asin(x_i is -0) -> -0] +array_api_tests/test_special_cases.py::test_unary[asinh(x_i is -0) -> -0] +array_api_tests/test_special_cases.py::test_unary[atan(x_i is -0) -> -0] +array_api_tests/test_special_cases.py::test_unary[atanh(x_i is -0) -> -0] +array_api_tests/test_special_cases.py::test_unary[ceil(x_i is -0) -> -0] +array_api_tests/test_special_cases.py::test_unary[cos(x_i is -0) -> 1] +array_api_tests/test_special_cases.py::test_unary[cosh(x_i is -0) -> 1] +array_api_tests/test_special_cases.py::test_unary[exp(x_i is -0) -> 1] +array_api_tests/test_special_cases.py::test_unary[expm1(x_i is -0) -> -0] +array_api_tests/test_special_cases.py::test_unary[floor(x_i is -0) -> -0] +array_api_tests/test_special_cases.py::test_unary[log1p(x_i is -0) -> -0] +array_api_tests/test_special_cases.py::test_unary[round(x_i is -0) -> -0] +array_api_tests/test_special_cases.py::test_unary[sin(x_i is -0) -> -0] +array_api_tests/test_special_cases.py::test_unary[sinh(x_i is -0) -> -0] +array_api_tests/test_special_cases.py::test_unary[sqrt(x_i is -0) -> -0] +array_api_tests/test_special_cases.py::test_unary[tan(x_i is -0) -> -0] +array_api_tests/test_special_cases.py::test_unary[tanh(x_i is -0) -> -0] +array_api_tests/test_special_cases.py::test_unary[trunc(x_i is -0) -> -0] +array_api_tests/test_special_cases.py::test_binary[add(x1_i is -0 and x2_i is -0) -> -0] +array_api_tests/test_special_cases.py::test_binary[add(x1_i is -0 and x2_i is +0) -> +0] +array_api_tests/test_special_cases.py::test_binary[add(x1_i is +0 and x2_i is -0) -> +0] +array_api_tests/test_special_cases.py::test_binary[add(isfinite(x1_i) and x1_i != 0 and x2_i == -x1_i) -> +0] +array_api_tests/test_special_cases.py::test_binary[__add__(x1_i is -0 and x2_i is -0) -> -0] +array_api_tests/test_special_cases.py::test_binary[__add__(x1_i is -0 and x2_i is +0) -> +0] +array_api_tests/test_special_cases.py::test_binary[__add__(x1_i is +0 and x2_i is -0) -> +0] +array_api_tests/test_special_cases.py::test_binary[atan2(x1_i > 0 and x2_i is -0) -> roughly +pi/2] +array_api_tests/test_special_cases.py::test_binary[atan2(x1_i is +0 and x2_i is -0) -> roughly +pi] +array_api_tests/test_special_cases.py::test_binary[atan2(x1_i is -0 and x2_i > 0) -> -0] +array_api_tests/test_special_cases.py::test_binary[atan2(x1_i is -0 and x2_i is +0) -> -0] +array_api_tests/test_special_cases.py::test_binary[atan2(x1_i is -0 and x2_i is -0) -> roughly -pi] +array_api_tests/test_special_cases.py::test_binary[atan2(x1_i is -0 and x2_i < 0) -> roughly -pi] +array_api_tests/test_special_cases.py::test_binary[atan2(x1_i < 0 and x2_i is -0) -> roughly -pi/2] +array_api_tests/test_special_cases.py::test_binary[divide(x1_i is -0 and x2_i > 0) -> -0] +array_api_tests/test_special_cases.py::test_binary[divide(x1_i is -0 and x2_i < 0) -> +0] +array_api_tests/test_special_cases.py::test_binary[divide(x1_i > 0 and x2_i is -0) -> -infinity] +array_api_tests/test_special_cases.py::test_binary[divide(x1_i < 0 and x2_i is -0) -> +infinity] +array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i is -0 and x2_i > 0) -> -0] +array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i is -0 and x2_i < 0) -> +0] +array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i > 0 and x2_i is -0) -> -infinity] +array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i < 0 and x2_i is -0) -> +infinity] +array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is -0 and x2_i > 0) -> -0] +array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is -0 and x2_i < 0) -> +0] +array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i > 0 and x2_i is -0) -> -infinity] +array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i < 0 and x2_i is -0) -> +infinity] +array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is -0 and x2_i > 0) -> -0] +array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is -0 and x2_i < 0) -> +0] +array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i > 0 and x2_i is -0) -> -infinity] +array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i < 0 and x2_i is -0) -> +infinity] +array_api_tests/test_special_cases.py::test_binary[pow(x2_i is -0) -> 1] +array_api_tests/test_special_cases.py::test_binary[pow(x1_i is -0 and x2_i > 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -0] +array_api_tests/test_special_cases.py::test_binary[pow(x1_i is -0 and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +0] +array_api_tests/test_special_cases.py::test_binary[pow(x1_i is -0 and x2_i < 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -infinity] +array_api_tests/test_special_cases.py::test_binary[pow(x1_i is -0 and x2_i < 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +infinity] +array_api_tests/test_special_cases.py::test_binary[__pow__(x2_i is -0) -> 1] +array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -0 and x2_i > 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -0] +array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -0 and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +0] +array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -0 and x2_i < 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -infinity] +array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -0 and x2_i < 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +infinity] +array_api_tests/test_special_cases.py::test_binary[remainder(x1_i is +0 and x2_i > 0) -> +0] +array_api_tests/test_special_cases.py::test_binary[remainder(x1_i is -0 and x2_i > 0) -> +0] +array_api_tests/test_special_cases.py::test_binary[remainder(x1_i is +0 and x2_i < 0) -> -0] +array_api_tests/test_special_cases.py::test_binary[remainder(x1_i is -0 and x2_i < 0) -> -0] +array_api_tests/test_special_cases.py::test_binary[remainder(x1_i > 0 and x2_i is -0) -> NaN] +array_api_tests/test_special_cases.py::test_binary[remainder(x1_i < 0 and x2_i is -0) -> NaN] +array_api_tests/test_special_cases.py::test_binary[remainder(isfinite(x1_i) and x1_i > 0 and x2_i is +infinity) -> x1_i] +array_api_tests/test_special_cases.py::test_binary[remainder(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> x2_i] +array_api_tests/test_special_cases.py::test_binary[remainder(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> x2_i] +array_api_tests/test_special_cases.py::test_binary[remainder(isfinite(x1_i) and x1_i < 0 and x2_i is -infinity) -> x1_i] +array_api_tests/test_special_cases.py::test_binary[__mod__(x1_i is +0 and x2_i > 0) -> +0] +array_api_tests/test_special_cases.py::test_binary[__mod__(x1_i is -0 and x2_i > 0) -> +0] +array_api_tests/test_special_cases.py::test_binary[__mod__(x1_i is +0 and x2_i < 0) -> -0] +array_api_tests/test_special_cases.py::test_binary[__mod__(x1_i is -0 and x2_i < 0) -> -0] +array_api_tests/test_special_cases.py::test_binary[__mod__(x1_i > 0 and x2_i is -0) -> NaN] +array_api_tests/test_special_cases.py::test_binary[__mod__(x1_i < 0 and x2_i is -0) -> NaN] +array_api_tests/test_special_cases.py::test_binary[__mod__(isfinite(x1_i) and x1_i > 0 and x2_i is +infinity) -> x1_i] +array_api_tests/test_special_cases.py::test_binary[__mod__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> x2_i] +array_api_tests/test_special_cases.py::test_binary[__mod__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> x2_i] +array_api_tests/test_special_cases.py::test_binary[__mod__(isfinite(x1_i) and x1_i < 0 and x2_i is -infinity) -> x1_i] +array_api_tests/test_special_cases.py::test_iop[__iadd__(x1_i is -0 and x2_i is -0) -> -0] +array_api_tests/test_special_cases.py::test_iop[__iadd__(x1_i is -0 and x2_i is +0) -> +0] +array_api_tests/test_special_cases.py::test_iop[__iadd__(x1_i is +0 and x2_i is -0) -> +0] +array_api_tests/test_special_cases.py::test_iop[__iadd__(isfinite(x1_i) and x1_i != 0 and x2_i == -x1_i) -> +0] +array_api_tests/test_special_cases.py::test_iop[__itruediv__(x1_i is -0 and x2_i > 0) -> -0] +array_api_tests/test_special_cases.py::test_iop[__itruediv__(x1_i is -0 and x2_i < 0) -> +0] +array_api_tests/test_special_cases.py::test_iop[__itruediv__(x1_i > 0 and x2_i is -0) -> -infinity] +array_api_tests/test_special_cases.py::test_iop[__itruediv__(x1_i < 0 and x2_i is -0) -> +infinity] +array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is -0 and x2_i > 0) -> -0] +array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is -0 and x2_i < 0) -> +0] +array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i > 0 and x2_i is -0) -> -infinity] +array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i < 0 and x2_i is -0) -> +infinity] +array_api_tests/test_special_cases.py::test_iop[__ipow__(x2_i is -0) -> 1] +array_api_tests/test_special_cases.py::test_iop[__ipow__(x1_i is -0 and x2_i > 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -0] +array_api_tests/test_special_cases.py::test_iop[__ipow__(x1_i is -0 and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +0] +array_api_tests/test_special_cases.py::test_iop[__ipow__(x1_i is -0 and x2_i < 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -infinity] +array_api_tests/test_special_cases.py::test_iop[__ipow__(x1_i is -0 and x2_i < 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +infinity] +array_api_tests/test_special_cases.py::test_iop[__imod__(x1_i is +0 and x2_i > 0) -> +0] +array_api_tests/test_special_cases.py::test_iop[__imod__(x1_i is -0 and x2_i > 0) -> +0] +array_api_tests/test_special_cases.py::test_iop[__imod__(x1_i is +0 and x2_i < 0) -> -0] +array_api_tests/test_special_cases.py::test_iop[__imod__(x1_i is -0 and x2_i < 0) -> -0] +array_api_tests/test_special_cases.py::test_iop[__imod__(x1_i > 0 and x2_i is -0) -> NaN] +array_api_tests/test_special_cases.py::test_iop[__imod__(x1_i < 0 and x2_i is -0) -> NaN] +array_api_tests/test_special_cases.py::test_iop[__imod__(isfinite(x1_i) and x1_i > 0 and x2_i is +infinity) -> x1_i] +array_api_tests/test_special_cases.py::test_iop[__imod__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> x2_i] +array_api_tests/test_special_cases.py::test_iop[__imod__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> x2_i] +array_api_tests/test_special_cases.py::test_iop[__imod__(isfinite(x1_i) and x1_i < 0 and x2_i is -infinity) -> x1_i] +array_api_tests/test_statistical_functions.py::test_min diff --git a/test_cupy.sh b/test_cupy.sh index 89a1da8c..6109ec83 100755 --- a/test_cupy.sh +++ b/test_cupy.sh @@ -23,4 +23,4 @@ git checkout asmeurer/xfails-file git submodule update --init export ARRAY_API_TESTS_MODULE=array_api_compat.cupy -pytest ${PYTEST_ARGS} --xfails-file $SCRIPT_DIR/numpy-xfails.txt +pytest ${PYTEST_ARGS} --xfails-file $SCRIPT_DIR/cupy-xfails.txt --skips-file $SCRIPT_DIR/cupy-skips.txt From 2b456c25489544b34aa7b2c1187d23dee1e0a431 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Fri, 24 Feb 2023 15:09:46 -0700 Subject: [PATCH 79/90] Allow passing pytest args through in test_cupy.sh --- test_cupy.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_cupy.sh b/test_cupy.sh index 6109ec83..169a2be7 100755 --- a/test_cupy.sh +++ b/test_cupy.sh @@ -23,4 +23,4 @@ git checkout asmeurer/xfails-file git submodule update --init export ARRAY_API_TESTS_MODULE=array_api_compat.cupy -pytest ${PYTEST_ARGS} --xfails-file $SCRIPT_DIR/cupy-xfails.txt --skips-file $SCRIPT_DIR/cupy-skips.txt +pytest ${PYTEST_ARGS} --xfails-file $SCRIPT_DIR/cupy-xfails.txt --skips-file $SCRIPT_DIR/cupy-skips.txt $@ From 046ffd0164c099f3552e9ec2e096a811aecbe4cb Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Fri, 24 Feb 2023 15:10:22 -0700 Subject: [PATCH 80/90] Add a shebang to test_cupy.sh --- test_cupy.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/test_cupy.sh b/test_cupy.sh index 169a2be7..8839352b 100755 --- a/test_cupy.sh +++ b/test_cupy.sh @@ -1,3 +1,4 @@ +#!/usr/bin/env bash # We cannot test cupy on CI so this script will test it manually. Assumes it # is being run in an environment that has cupy and the array-api-tests # dependencies installed From c3eb0d5739b8525d3036f7244f709b5b24ace993 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Fri, 24 Feb 2023 15:16:45 -0700 Subject: [PATCH 81/90] Make the hypothesis examples database persistent in test_cupy.sh --- test_cupy.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test_cupy.sh b/test_cupy.sh index 8839352b..e28720c6 100755 --- a/test_cupy.sh +++ b/test_cupy.sh @@ -23,5 +23,10 @@ git checkout asmeurer/xfails-file git submodule update --init +# store the hypothesis examples database in this directory, so that failures +# will be remembered across runs +mkdir -p $SCRIPT_DIR/.hypothesis +ln -s $SCRIPT_DIR/.hypothesis .hypothesis + export ARRAY_API_TESTS_MODULE=array_api_compat.cupy pytest ${PYTEST_ARGS} --xfails-file $SCRIPT_DIR/cupy-xfails.txt --skips-file $SCRIPT_DIR/cupy-skips.txt $@ From 111a12264a140717af2c016dae5864e2dff4f59a Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Fri, 24 Feb 2023 16:28:04 -0700 Subject: [PATCH 82/90] Fix sort() and argsort() with cupy --- array_api_compat/common/_aliases.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/array_api_compat/common/_aliases.py b/array_api_compat/common/_aliases.py index 68d80838..72bb3f2d 100644 --- a/array_api_compat/common/_aliases.py +++ b/array_api_compat/common/_aliases.py @@ -332,17 +332,19 @@ def argsort( **kwargs, ) -> ndarray: # Note: this keyword argument is different, and the default is different. - kind = "stable" if stable else "quicksort" + # We set it in kwargs like this because numpy.sort uses kind='quicksort' + # as the default whereas cupy.sort uses kind=None. + if stable: + kwargs['kind'] = "stable" if not descending: - res = xp.argsort(x, axis=axis, kind=kind, **kwargs) + res = xp.argsort(x, axis=axis, **kwargs) else: # As NumPy has no native descending sort, we imitate it here. Note that # simply flipping the results of xp.argsort(x, ...) would not # respect the relative order like it would in native descending sorts. res = xp.flip( - xp.argsort(xp.flip(x, axis=axis), axis=axis, kind=kind), + xp.argsort(xp.flip(x, axis=axis), axis=axis, **kwargs), axis=axis, - **kwargs, ) # Rely on flip()/argsort() to validate axis normalised_axis = axis if axis >= 0 else x.ndim + axis @@ -355,8 +357,11 @@ def sort( **kwargs, ) -> ndarray: # Note: this keyword argument is different, and the default is different. - kind = "stable" if stable else "quicksort" - res = xp.sort(x, axis=axis, kind=kind, **kwargs) + # We set it in kwargs like this because numpy.sort uses kind='quicksort' + # as the default whereas cupy.sort uses kind=None. + if stable: + kwargs['kind'] = "stable" + res = xp.sort(x, axis=axis, **kwargs) if descending: res = xp.flip(res, axis=axis) return res From 09b5a6f49dfd0ca25e2134b4d1866e3024c0f5b6 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Fri, 24 Feb 2023 16:31:27 -0700 Subject: [PATCH 83/90] Add comments for the rest of the cupy xfails --- cupy-xfails.txt | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/cupy-xfails.txt b/cupy-xfails.txt index a34bdb1e..e90fd076 100644 --- a/cupy-xfails.txt +++ b/cupy-xfails.txt @@ -32,15 +32,27 @@ array_api_tests/test_linalg.py::test_trace array_api_tests/test_operators_and_elementwise_functions.py::test_divide[__truediv__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__mod__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__imod__(x1, x2)] -# -# array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[remainder(x1, x2)] +# floating point inaccuracy +array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[remainder(x1, x2)] + +# cupy (arg)min/max wrong with infinities +# https://github.com/cupy/cupy/issues/7424 array_api_tests/test_searching_functions.py::test_argmax +array_api_tests/test_searching_functions.py::test_argmin +array_api_tests/test_statistical_functions.py::test_min +array_api_tests/test_statistical_functions.py::test_max + +# testsuite incorrectly thinks meshgrid doesn't have indexing argument +# (https://github.com/data-apis/array-api-tests/issues/171) array_api_tests/test_signatures.py::test_func_signature[meshgrid] + +# We cannot add array attributes array_api_tests/test_signatures.py::test_array_method_signature[__array_namespace__] array_api_tests/test_signatures.py::test_array_method_signature[__index__] array_api_tests/test_signatures.py::test_array_method_signature[to_device] -array_api_tests/test_sorting_functions.py::test_argsort -array_api_tests/test_sorting_functions.py::test_sort + +# We do not attempt to workaround special cases (and the operator method ones + array_api_tests/test_special_cases.py::test_unary[abs(x_i is -0) -> +0] array_api_tests/test_special_cases.py::test_unary[__abs__(x_i is -0) -> +0] array_api_tests/test_special_cases.py::test_unary[asin(x_i is -0) -> -0] @@ -148,4 +160,3 @@ array_api_tests/test_special_cases.py::test_iop[__imod__(isfinite(x1_i) and x1_i array_api_tests/test_special_cases.py::test_iop[__imod__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> x2_i] array_api_tests/test_special_cases.py::test_iop[__imod__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> x2_i] array_api_tests/test_special_cases.py::test_iop[__imod__(isfinite(x1_i) and x1_i < 0 and x2_i is -infinity) -> x1_i] -array_api_tests/test_statistical_functions.py::test_min From 1908a00ff6d55fab1a6f28ff1bccdcb2b5d50b1a Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Fri, 24 Feb 2023 17:24:29 -0700 Subject: [PATCH 84/90] Fix argument quoting in test_cupy.sh --- test_cupy.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_cupy.sh b/test_cupy.sh index e28720c6..6debba32 100755 --- a/test_cupy.sh +++ b/test_cupy.sh @@ -29,4 +29,4 @@ mkdir -p $SCRIPT_DIR/.hypothesis ln -s $SCRIPT_DIR/.hypothesis .hypothesis export ARRAY_API_TESTS_MODULE=array_api_compat.cupy -pytest ${PYTEST_ARGS} --xfails-file $SCRIPT_DIR/cupy-xfails.txt --skips-file $SCRIPT_DIR/cupy-skips.txt $@ +pytest ${PYTEST_ARGS} --xfails-file $SCRIPT_DIR/cupy-xfails.txt --skips-file $SCRIPT_DIR/cupy-skips.txt "$@" From 5cbd1c05cd3fa08ace419ae33c2c929fa72f767a Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Fri, 24 Feb 2023 17:24:45 -0700 Subject: [PATCH 85/90] Update cupy skips and xfails --- cupy-skips.txt | 1 + cupy-xfails.txt | 9 +++++++++ 2 files changed, 10 insertions(+) diff --git a/cupy-skips.txt b/cupy-skips.txt index fed737c3..e618603f 100644 --- a/cupy-skips.txt +++ b/cupy-skips.txt @@ -1,2 +1,3 @@ # Hangs array_api_tests/test_linalg.py::test_qr +array_api_tests/test_linalg.py::test_matrix_rank diff --git a/cupy-xfails.txt b/cupy-xfails.txt index e90fd076..39d333d1 100644 --- a/cupy-xfails.txt +++ b/cupy-xfails.txt @@ -7,8 +7,13 @@ array_api_tests/test_array_object.py::test_scalar_casting[__index__(int8)] array_api_tests/test_array_object.py::test_scalar_casting[__index__(int16)] array_api_tests/test_array_object.py::test_scalar_casting[__index__(int32)] array_api_tests/test_array_object.py::test_scalar_casting[__index__(int64)] + +# testsuite bug (https://github.com/data-apis/array-api-tests/issues/172) +array_api_tests/test_array_object.py::test_getitem + # copy=False is not yet implemented array_api_tests/test_creation_functions.py::test_asarray_arrays + # finfo test is testing that the result is a float instead of float32 (see # also https://github.com/data-apis/array-api/issues/405) array_api_tests/test_data_type_functions.py::test_finfo[float32] @@ -23,6 +28,7 @@ array_api_tests/test_has_names.py::test_has_names[array_attribute-mT] # and some require https://github.com/data-apis/array-api-tests/pull/101 to array_api_tests/test_linalg.py::test_eigvalsh array_api_tests/test_linalg.py::test_matrix_norm +array_api_tests/test_linalg.py::test_matrix_power array_api_tests/test_linalg.py::test_solve array_api_tests/test_linalg.py::test_svd array_api_tests/test_linalg.py::test_svdvals @@ -32,6 +38,8 @@ array_api_tests/test_linalg.py::test_trace array_api_tests/test_operators_and_elementwise_functions.py::test_divide[__truediv__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__mod__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__imod__(x1, x2)] +array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__mod__(x, s)] +array_api_tests/test_operators_and_elementwise_functions.py::test_subtract[__sub__(x, s)] # floating point inaccuracy array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[remainder(x1, x2)] @@ -80,6 +88,7 @@ array_api_tests/test_special_cases.py::test_binary[add(isfinite(x1_i) and x1_i ! array_api_tests/test_special_cases.py::test_binary[__add__(x1_i is -0 and x2_i is -0) -> -0] array_api_tests/test_special_cases.py::test_binary[__add__(x1_i is -0 and x2_i is +0) -> +0] array_api_tests/test_special_cases.py::test_binary[__add__(x1_i is +0 and x2_i is -0) -> +0] +array_api_tests/test_special_cases.py::test_binary[__add__(isfinite(x1_i) and x1_i != 0 and x2_i == -x1_i) -> +0] array_api_tests/test_special_cases.py::test_binary[atan2(x1_i > 0 and x2_i is -0) -> roughly +pi/2] array_api_tests/test_special_cases.py::test_binary[atan2(x1_i is +0 and x2_i is -0) -> roughly +pi] array_api_tests/test_special_cases.py::test_binary[atan2(x1_i is -0 and x2_i > 0) -> -0] From 6a63d5c15ea5fab41a6037e1e89043efd99c5042 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Fri, 24 Feb 2023 17:36:38 -0700 Subject: [PATCH 86/90] Update cupy xfails --- cupy-xfails.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/cupy-xfails.txt b/cupy-xfails.txt index 39d333d1..86b24611 100644 --- a/cupy-xfails.txt +++ b/cupy-xfails.txt @@ -36,6 +36,7 @@ array_api_tests/test_linalg.py::test_svdvals array_api_tests/test_linalg.py::test_trace # We cannot modify array methods array_api_tests/test_operators_and_elementwise_functions.py::test_divide[__truediv__(x, s)] +array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[__floordiv__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__mod__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__imod__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__mod__(x, s)] From 175f195417c1a85ebc98c38b16de72cc8a267a03 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Fri, 24 Feb 2023 17:50:45 -0700 Subject: [PATCH 87/90] Update test_cupy.sh to run the vendoring tests --- test_cupy.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test_cupy.sh b/test_cupy.sh index 6debba32..d023a860 100755 --- a/test_cupy.sh +++ b/test_cupy.sh @@ -5,6 +5,9 @@ set -x set -e +# Run the vendoring tests in this repo +pytest + tmpdir=$(mktemp -d) SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) export PYTHONPATH=$SCRIPT_DIR From d1c7999a30ed404d9d7b41173ad0baad8cbcfc6c Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Fri, 24 Feb 2023 17:53:23 -0700 Subject: [PATCH 88/90] Add a minor CHANGELOG entry --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9585f676..109d5b0f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,8 @@ - Added CI to run against the [array API testsuite](https://github.com/data-apis/array-api-tests). +- Fix `sort(stable=False)` and `argsort(stable=False)` with CuPy. + # 1.0 (2022-12-05) ## Major Changes From 2fb0a0a1b3c96b248a9201a015b160be346c7d65 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Fri, 24 Feb 2023 17:53:39 -0700 Subject: [PATCH 89/90] Bump the version to 1.1 --- CHANGELOG.md | 2 +- array_api_compat/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 109d5b0f..b32a2d33 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -# 1.1 (2023-02-23) +# 1.1 (2023-02-24) ## Major Changes diff --git a/array_api_compat/__init__.py b/array_api_compat/__init__.py index ca195443..1b054683 100644 --- a/array_api_compat/__init__.py +++ b/array_api_compat/__init__.py @@ -17,6 +17,6 @@ this implementation for the default when working with NumPy arrays. """ -__version__ = '1.0' +__version__ = '1.1' from .common import * From 3470b36544d96a93ab92286127b1a177e0b0b588 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Fri, 24 Feb 2023 18:02:13 -0700 Subject: [PATCH 90/90] Add a missing torch skip --- torch-skips.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/torch-skips.txt b/torch-skips.txt index 54f311a3..f2d0f202 100644 --- a/torch-skips.txt +++ b/torch-skips.txt @@ -7,3 +7,4 @@ array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__im array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[floor_divide(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[__floordiv__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[__ifloordiv__(x1, x2)] +array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[__ifloordiv__(x, s)]