From a09549bc00d84085d54a215bb57835022be798ad Mon Sep 17 00:00:00 2001 From: Natalia Polina Date: Thu, 19 Oct 2023 03:46:27 -0500 Subject: [PATCH 01/38] Update dpnp.geomspace and dpnp.logspace functions --- dpnp/dpnp_algo/dpnp_algo_arraycreation.pxi | 113 ------- dpnp/dpnp_algo/dpnp_arraycreation.py | 277 ++++++++++++++++++ dpnp/dpnp_iface_arraycreation.py | 152 +++++----- tests/skipped_tests.tbl | 1 - tests/skipped_tests_gpu.tbl | 2 - tests/test_arraycreation.py | 17 +- tests/test_sycl_queue.py | 12 +- tests/test_usm_type.py | 26 +- .../cupy/creation_tests/test_ranges.py | 8 - 9 files changed, 401 insertions(+), 207 deletions(-) create mode 100644 dpnp/dpnp_algo/dpnp_arraycreation.py diff --git a/dpnp/dpnp_algo/dpnp_algo_arraycreation.pxi b/dpnp/dpnp_algo/dpnp_algo_arraycreation.pxi index 5ebb8d157a79..7b90ff1285fa 100644 --- a/dpnp/dpnp_algo/dpnp_algo_arraycreation.pxi +++ b/dpnp/dpnp_algo/dpnp_algo_arraycreation.pxi @@ -38,9 +38,6 @@ and the rest of the library __all__ += [ "dpnp_copy", "dpnp_diag", - "dpnp_geomspace", - "dpnp_linspace", - "dpnp_logspace", "dpnp_ptp", "dpnp_trace", "dpnp_vander", @@ -138,116 +135,6 @@ cpdef utils.dpnp_descriptor dpnp_diag(utils.dpnp_descriptor v, int k): return result -cpdef utils.dpnp_descriptor dpnp_geomspace(start, stop, num, endpoint, dtype, axis): - cdef shape_type_c obj_shape = utils._object_to_tuple(num) - cdef utils.dpnp_descriptor result = utils_py.create_output_descriptor_py(obj_shape, dtype, None) - - if endpoint: - steps_count = num - 1 - else: - steps_count = num - - # if there are steps, then fill values - if steps_count > 0: - step = dpnp.power(dpnp.float64(stop) / start, 1.0 / steps_count) - mult = step - for i in range(1, result.size): - result.get_pyobj()[i] = start * mult - mult = mult * step - else: - step = dpnp.nan - - # if result is not empty, then fiil first and last elements - if num > 0: - result.get_pyobj()[0] = start - if endpoint and result.size > 1: - result.get_pyobj()[result.size - 1] = stop - - return result - - -def dpnp_linspace(start, stop, num, dtype=None, device=None, usm_type=None, sycl_queue=None, endpoint=True, retstep=False, axis=0): - usm_type_alloc, sycl_queue_alloc = utils_py.get_usm_allocations([start, stop]) - - # Get sycl_queue. - if sycl_queue is None and device is None: - sycl_queue = sycl_queue_alloc - sycl_queue_normalized = dpnp.get_normalized_queue_device(sycl_queue=sycl_queue, device=device) - - # Get temporary usm_type for getting dtype. - if usm_type is None: - _usm_type = "device" if usm_type_alloc is None else usm_type_alloc - else: - _usm_type = usm_type - - # Get dtype. - if not hasattr(start, "dtype") and not dpnp.isscalar(start): - start = dpnp.asarray(start, usm_type=_usm_type, sycl_queue=sycl_queue_normalized) - if not hasattr(stop, "dtype") and not dpnp.isscalar(stop): - stop = dpnp.asarray(stop, usm_type=_usm_type, sycl_queue=sycl_queue_normalized) - dt = numpy.result_type(start, stop, float(num)) - dt = utils_py.map_dtype_to_device(dt, sycl_queue_normalized.sycl_device) - if dtype is None: - dtype = dt - - if dpnp.isscalar(start) and dpnp.isscalar(stop): - # Call linspace() function for scalars. - res = dpnp_container.linspace(start, - stop, - num, - dtype=dt, - usm_type=_usm_type, - sycl_queue=sycl_queue_normalized, - endpoint=endpoint) - else: - num = operator.index(num) - if num < 0: - raise ValueError("Number of points must be non-negative") - - # Get final usm_type and copy arrays if needed with current dtype, usm_type and sycl_queue. - # Do not need to copy usm_ndarray by usm_type if it is not explicitly stated. - if usm_type is None: - usm_type = _usm_type - if not hasattr(start, "usm_type"): - _start = dpnp.asarray(start, dtype=dt, usm_type=usm_type, sycl_queue=sycl_queue_normalized) - else: - _start = dpnp.asarray(start, dtype=dt, sycl_queue=sycl_queue_normalized) - if not hasattr(stop, "usm_type"): - _stop = dpnp.asarray(stop, dtype=dt, usm_type=usm_type, sycl_queue=sycl_queue_normalized) - else: - _stop = dpnp.asarray(stop, dtype=dt, sycl_queue=sycl_queue_normalized) - else: - _start = dpnp.asarray(start, dtype=dt, usm_type=usm_type, sycl_queue=sycl_queue_normalized) - _stop = dpnp.asarray(stop, dtype=dt, usm_type=usm_type, sycl_queue=sycl_queue_normalized) - - # FIXME: issue #1304. Mathematical operations with scalar don't follow data type. - _num = dpnp.asarray((num - 1) if endpoint else num, dtype=dt, usm_type=usm_type, sycl_queue=sycl_queue_normalized) - - step = (_stop - _start) / _num - - res = dpnp_container.arange(0, - stop=num, - step=1, - dtype=dt, - usm_type=usm_type, - sycl_queue=sycl_queue_normalized) - - res = res.reshape((-1,) + (1,) * step.ndim) - res = res * step + _start - - if endpoint and num > 1: - res[-1] = dpnp_container.full(step.shape, _stop) - - if numpy.issubdtype(dtype, dpnp.integer): - dpnp.floor(res, out=res) - return res.astype(dtype) - - -cpdef utils.dpnp_descriptor dpnp_logspace(start, stop, num, endpoint, base, dtype, axis): - temp = dpnp.linspace(start, stop, num=num, endpoint=endpoint) - return dpnp.get_dpnp_descriptor(dpnp.astype(dpnp.power(base, temp), dtype)) - - cpdef dpnp_ptp(utils.dpnp_descriptor arr, axis=None): cdef shape_type_c shape_arr = arr.shape cdef shape_type_c output_shape diff --git a/dpnp/dpnp_algo/dpnp_arraycreation.py b/dpnp/dpnp_algo/dpnp_arraycreation.py new file mode 100644 index 000000000000..dd75d62c6866 --- /dev/null +++ b/dpnp/dpnp_algo/dpnp_arraycreation.py @@ -0,0 +1,277 @@ +import operator + +import numpy + +import dpnp +import dpnp.dpnp_container as dpnp_container +import dpnp.dpnp_utils as utils + +__all__ = [ + "dpnp_geomspace", + "dpnp_linspace", + "dpnp_logspace", +] + + +def _get_sycl_queue(sycl_queue_alloc, sycl_queue, device): + if sycl_queue is None and device is None: + sycl_queue = sycl_queue_alloc + return dpnp.get_normalized_queue_device( + sycl_queue=sycl_queue, device=device + ) + + +def _get_temporary_usm_type(usm_type_alloc, usm_type): + if usm_type is None: + usm_type = "device" if usm_type_alloc is None else usm_type_alloc + return usm_type + + +def _list_to_array(a, usm_type, sycl_queue, scalar=False): + if not hasattr(a, "dtype") and (scalar or not dpnp.isscalar(a)): + return dpnp.asarray(a, usm_type=usm_type, sycl_queue=sycl_queue) + return a + + +def _copy_by_usm_type(a, dtype, usm_type, tmp_usm_type, sycl_queue_normalized): + if usm_type is None: + usm_type = tmp_usm_type + if not hasattr(a, "usm_type"): + res = dpnp.asarray( + a, + dtype=dtype, + usm_type=usm_type, + sycl_queue=sycl_queue_normalized, + ) + else: + res = dpnp.asarray(a, dtype=dtype, sycl_queue=sycl_queue_normalized) + else: + res = dpnp.asarray( + a, dtype=dtype, usm_type=usm_type, sycl_queue=sycl_queue_normalized + ) + return res + + +def dpnp_geomspace( + start, + stop, + num, + dtype=None, + device=None, + usm_type=None, + sycl_queue=None, + endpoint=True, + axis=0, +): + usm_type_alloc, sycl_queue_alloc = utils.get_usm_allocations([start, stop]) + + sycl_queue_normalized = _get_sycl_queue( + sycl_queue_alloc, sycl_queue, device + ) + + _usm_type = _get_temporary_usm_type(usm_type_alloc, usm_type) + + start = _list_to_array(start, _usm_type, sycl_queue_normalized, scalar=True) + stop = _list_to_array(stop, _usm_type, sycl_queue_normalized, scalar=True) + + dt = numpy.result_type(start, stop, float(num)) + dt = utils.map_dtype_to_device(dt, sycl_queue_normalized.sycl_device) + if dtype is None: + dtype = dt + + if dpnp.any(start == 0) or dpnp.any(stop == 0): + raise ValueError("Geometric sequence cannot include zero") + + out_sign = dpnp.ones( + dpnp.broadcast_arrays(start, stop)[0].shape, + dtype=dt, + usm_type=_usm_type, + sycl_queue=sycl_queue_normalized, + ) + # Avoid negligible real or imaginary parts in output by rotating to + # positive real, calculating, then undoing rotation + if dpnp.issubdtype(dt, dpnp.complexfloating): + all_imag = (start.real == 0.0) & (stop.real == 0.0) + if dpnp.any(all_imag): + start[all_imag] = start[all_imag].imag + stop[all_imag] = stop[all_imag].imag + out_sign[all_imag] = 1j + + both_negative = (dpnp.sign(start) == -1) & (dpnp.sign(stop) == -1) + if dpnp.any(both_negative): + dpnp.negative(start, out=start, where=both_negative) + dpnp.negative(stop, out=stop, where=both_negative) + dpnp.negative(out_sign, out=out_sign, where=both_negative) + + log_start = dpnp.log10(start) + log_stop = dpnp.log10(stop) + result = dpnp_logspace( + log_start, + log_stop, + num=num, + endpoint=endpoint, + base=10.0, + dtype=dtype, + usm_type=_usm_type, + sycl_queue=sycl_queue_normalized, + ) + + if num > 0: + result[0] = start + if num > 1 and endpoint: + result[-1] = stop + + result = out_sign * result + + if axis != 0: + result = dpnp.moveaxis(result, 0, axis) + + return result.astype(dtype, copy=False) + + +def dpnp_linspace( + start, + stop, + num, + dtype=None, + device=None, + usm_type=None, + sycl_queue=None, + endpoint=True, + axis=0, +): + usm_type_alloc, sycl_queue_alloc = utils.get_usm_allocations([start, stop]) + + sycl_queue_normalized = _get_sycl_queue( + sycl_queue_alloc, sycl_queue, device + ) + + _usm_type = _get_temporary_usm_type(usm_type_alloc, usm_type) + + start = _list_to_array(start, _usm_type, sycl_queue_normalized) + stop = _list_to_array(stop, _usm_type, sycl_queue_normalized) + + dt = numpy.result_type(start, stop, float(num)) + dt = utils.map_dtype_to_device(dt, sycl_queue_normalized.sycl_device) + if dtype is None: + dtype = dt + + if dpnp.isscalar(start) and dpnp.isscalar(stop): + # Call linspace() function for scalars. + res = dpnp_container.linspace( + start, + stop, + num, + dtype=dt, + usm_type=_usm_type, + sycl_queue=sycl_queue_normalized, + endpoint=endpoint, + ) + else: + num = operator.index(num) + if num < 0: + raise ValueError("Number of points must be non-negative") + + _start = _copy_by_usm_type( + start, dt, usm_type, _usm_type, sycl_queue_normalized + ) + _stop = _copy_by_usm_type( + stop, dt, usm_type, _usm_type, sycl_queue_normalized + ) + + _num = (num - 1) if endpoint else num + + step = (_stop - _start) / _num + + res = dpnp_container.arange( + 0, + stop=num, + step=1, + dtype=dt, + usm_type=_usm_type, + sycl_queue=sycl_queue_normalized, + ) + + res = res.reshape((-1,) + (1,) * step.ndim) + res = res * step + _start + + if endpoint and num > 1: + res[-1] = dpnp_container.full(step.shape, _stop) + + if axis != 0: + res = dpnp.moveaxis(res, 0, axis) + + if numpy.issubdtype(dtype, dpnp.integer): + dpnp.floor(res, out=res) + + return res.astype(dtype, copy=False) + + +def dpnp_logspace( + start, + stop, + num=50, + device=None, + usm_type=None, + sycl_queue=None, + endpoint=True, + base=10.0, + dtype=None, + axis=0, +): + usm_type_alloc, sycl_queue_alloc = utils.get_usm_allocations( + [start, stop, base] + ) + + sycl_queue_normalized = _get_sycl_queue( + sycl_queue_alloc, sycl_queue, device + ) + + _usm_type = _get_temporary_usm_type(usm_type_alloc, usm_type) + + start = _list_to_array(start, _usm_type, sycl_queue_normalized) + stop = _list_to_array(stop, _usm_type, sycl_queue_normalized) + base = _list_to_array(base, _usm_type, sycl_queue_normalized) + + dt = numpy.result_type(start, stop, float(num)) + dt = utils.map_dtype_to_device(dt, sycl_queue_normalized.sycl_device) + if dtype is None: + dtype = dt + + if dpnp.isscalar(start) and dpnp.isscalar(stop): + # Call linspace() function for scalars. + res = dpnp_container.linspace( + start, + stop, + num, + dtype=dt, + usm_type=_usm_type, + sycl_queue=sycl_queue_normalized, + endpoint=endpoint, + ) + + else: + _start = _copy_by_usm_type( + start, dt, usm_type, _usm_type, sycl_queue_normalized + ) + _stop = _copy_by_usm_type( + stop, dt, usm_type, _usm_type, sycl_queue_normalized + ) + res = dpnp_linspace( + _start, + _stop, + num=num, + usm_type=_usm_type, + sycl_queue=sycl_queue_normalized, + endpoint=endpoint, + axis=axis, + ) + + _base = _copy_by_usm_type( + base, dt, usm_type, _usm_type, sycl_queue_normalized + ) + + _base = dpnp.expand_dims(_base, axis=axis) + if dtype is None: + return dpnp.power(_base, res) + return dpnp.power(_base, res).astype(dtype, copy=False) diff --git a/dpnp/dpnp_iface_arraycreation.py b/dpnp/dpnp_iface_arraycreation.py index 2a8d80fc3891..172ee5ea794f 100644 --- a/dpnp/dpnp_iface_arraycreation.py +++ b/dpnp/dpnp_iface_arraycreation.py @@ -50,6 +50,12 @@ from dpnp.dpnp_algo import * from dpnp.dpnp_utils import * +from .dpnp_algo.dpnp_arraycreation import ( + dpnp_geomspace, + dpnp_linspace, + dpnp_logspace, +) + __all__ = [ "arange", "array", @@ -1019,16 +1025,24 @@ def full_like( return numpy.full_like(x1, fill_value, dtype, order, subok, shape) -def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): +def geomspace( + start, + stop, + /, + num, + *, + dtype=None, + device=None, + usm_type=None, + sycl_queue=None, + endpoint=True, + axis=0, +): """ Return numbers spaced evenly on a log scale (a geometric progression). For full documentation refer to :obj:`numpy.geomspace`. - Limitations - ----------- - Parameter `axis` is supported only with default value ``0``. - See Also -------- :obj:`dpnp.logspace` : Similar to geomspace, but with endpoints specified @@ -1041,24 +1055,25 @@ def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): Examples -------- >>> import dpnp as np - >>> x = np.geomspace(1, 1000, num=4) - >>> [i for i in x] - [1.0, 10.0, 100.0, 1000.0] + >>> np.geomspace(1, 1000, num=4) + array([ 1., 10., 100., 1000.]) + >>> x2 = np.geomspace(1, 1000, num=4, endpoint=False) - >>> [i for i in x2] - [1.0, 5.62341325, 31.6227766, 177.827941] + array([ 1. , 5.62341325, 31.6227766 , 177.827941 ]) """ - if not use_origin_backend(): - if axis != 0: - pass - else: - return dpnp_geomspace( - start, stop, num, endpoint, dtype, axis - ).get_pyobj() - - return call_origin(numpy.geomspace, start, stop, num, endpoint, dtype, axis) + return dpnp_geomspace( + start, + stop, + num, + dtype=dtype, + device=device, + usm_type=usm_type, + sycl_queue=sycl_queue, + endpoint=endpoint, + axis=axis, + ) def identity( @@ -1135,9 +1150,8 @@ def linspace( Limitations ----------- - Parameter `axis` is supported only with default value ``0``. Parameter `retstep` is supported only with default value ``False``. - Otherwise the function will be executed sequentially on CPU. + Otherwise ``NotImplementedError`` exception will be raised. See Also -------- @@ -1151,36 +1165,29 @@ def linspace( Examples -------- >>> import dpnp as np - >>> x = np.linspace(2.0, 3.0, num=5) - >>> [i for i in x] - [2.0, 2.25, 2.5, 2.75, 3.0] - >>> x2 = np.linspace(2.0, 3.0, num=5, endpoint=False) - >>> [i for i in x2] - [2.0, 2.2, 2.4, 2.6, 2.8] - >>> x3, step = np.linspace(2.0, 3.0, num=5, retstep=True) - >>> [i for i in x3], step - ([2.0, 2.25, 2.5, 2.75, 3.0], 0.25) + >>> np.linspace(2.0, 3.0, num=5) + array([2. , 2.25, 2.5 , 2.75, 3. ]) + + >>> np.linspace(2.0, 3.0, num=5, endpoint=False) + array([2. , 2.2, 2.4, 2.6, 2.8]) """ if retstep is not False: - pass - elif axis != 0: - pass - else: - return dpnp_linspace( - start, - stop, - num, - dtype=dtype, - device=device, - usm_type=usm_type, - sycl_queue=sycl_queue, - endpoint=endpoint, + raise NotImplementedError( + f"retstep={retstep} is currently not supported" ) - return call_origin( - numpy.linspace, start, stop, num, endpoint, retstep, dtype, axis + return dpnp_linspace( + start, + stop, + num, + dtype=dtype, + device=device, + usm_type=usm_type, + sycl_queue=sycl_queue, + endpoint=endpoint, + axis=axis, ) @@ -1210,16 +1217,25 @@ def loadtxt(fname, **kwargs): return call_origin(numpy.loadtxt, fname, **kwargs) -def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0): +def logspace( + start, + stop, + /, + num=50, + *, + device=None, + usm_type=None, + sycl_queue=None, + endpoint=True, + base=10.0, + dtype=None, + axis=0, +): """ Return numbers spaced evenly on a log scale. For full documentation refer to :obj:`numpy.logspace`. - Limitations - ----------- - Parameter `axis` is supported only with default value ``0``. - See Also -------- :obj:`dpnp.arange` : Similar to linspace, with the step size specified @@ -1234,28 +1250,28 @@ def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0): Examples -------- >>> import dpnp as np - >>> x = np.logspace(2.0, 3.0, num=4) - >>> [i for i in x] - [100.0, 215.443469, 464.15888336, 1000.0] - >>> x2 = np.logspace(2.0, 3.0, num=4, endpoint=False) - >>> [i for i in x2] - [100.0, 177.827941, 316.22776602, 562.34132519] - >>> x3 = np.logspace(2.0, 3.0, num=4, base=2.0) - >>> [i for i in x3] - [4.0, 5.0396842, 6.34960421, 8.0] + >>> np.logspace(2.0, 3.0, num=4) + array([ 100. , 215.443469 , 464.15888336, 1000. ]) - """ + >>> np.logspace(2.0, 3.0, num=4, endpoint=False) + array([100. , 177.827941 , 316.22776602, 562.34132519]) - if not use_origin_backend(): - if axis != 0: - checker_throw_value_error("linspace", "axis", axis, 0) + >>> np.logspace(2.0, 3.0, num=4, base=2.0) + array([4. , 5.0396842 , 6.34960421, 8. ]) - return dpnp_logspace( - start, stop, num, endpoint, base, dtype, axis - ).get_pyobj() + """ - return call_origin( - numpy.logspace, start, stop, num, endpoint, base, dtype, axis + return dpnp_logspace( + start, + stop, + num=num, + device=device, + usm_type=usm_type, + sycl_queue=sycl_queue, + endpoint=endpoint, + base=base, + dtype=dtype, + axis=axis, ) diff --git a/tests/skipped_tests.tbl b/tests/skipped_tests.tbl index 7cfab9a6263a..e2c41ceb7df7 100644 --- a/tests/skipped_tests.tbl +++ b/tests/skipped_tests.tbl @@ -221,7 +221,6 @@ tests/third_party/cupy/creation_tests/test_ranges.py::TestMgrid::test_mgrid3 tests/third_party/cupy/creation_tests/test_ranges.py::TestOgrid::test_ogrid3 tests/third_party/cupy/creation_tests/test_ranges.py::TestOgrid::test_ogrid4 tests/third_party/cupy/creation_tests/test_ranges.py::TestOgrid::test_ogrid5 -tests/third_party/cupy/creation_tests/test_ranges.py::TestRanges::test_linspace_array_start_stop_axis1 tests/third_party/cupy/creation_tests/test_ranges.py::TestRanges::test_linspace_one_num_no_endopoint_with_retstep tests/third_party/cupy/creation_tests/test_ranges.py::TestRanges::test_linspace_with_retstep tests/third_party/cupy/creation_tests/test_ranges.py::TestRanges::test_linspace_zero_num_no_endopoint_with_retstep diff --git a/tests/skipped_tests_gpu.tbl b/tests/skipped_tests_gpu.tbl index 7ba437240c42..9bc18c56dd8f 100644 --- a/tests/skipped_tests_gpu.tbl +++ b/tests/skipped_tests_gpu.tbl @@ -343,11 +343,9 @@ tests/third_party/cupy/creation_tests/test_ranges.py::TestOgrid::test_ogrid4 tests/third_party/cupy/creation_tests/test_ranges.py::TestOgrid::test_ogrid5 tests/third_party/cupy/creation_tests/test_ranges.py::TestRanges::test_arange_negative_size tests/third_party/cupy/creation_tests/test_ranges.py::TestRanges::test_arange_no_dtype_int -tests/third_party/cupy/creation_tests/test_ranges.py::TestRanges::test_linspace_array_start_stop_axis1 tests/third_party/cupy/creation_tests/test_ranges.py::TestRanges::test_linspace_one_num_no_endopoint_with_retstep tests/third_party/cupy/creation_tests/test_ranges.py::TestRanges::test_linspace_with_retstep tests/third_party/cupy/creation_tests/test_ranges.py::TestRanges::test_linspace_zero_num_no_endopoint_with_retstep -tests/third_party/cupy/creation_tests/test_ranges.py::TestRanges::test_logspace_zero_num tests/third_party/cupy/fft_tests/test_fft.py::TestFft2_param_1_{axes=None, norm=None, s=(1, None), shape=(3, 4)}::test_fft2 tests/third_party/cupy/fft_tests/test_fft.py::TestFft2_param_7_{axes=(), norm=None, s=None, shape=(3, 4)}::test_fft2 diff --git a/tests/test_arraycreation.py b/tests/test_arraycreation.py index 9c7fd6bf0602..652ddd6ee69d 100644 --- a/tests/test_arraycreation.py +++ b/tests/test_arraycreation.py @@ -201,7 +201,6 @@ def test_fromstring(dtype): assert_array_equal(func(dpnp), func(numpy)) -@pytest.mark.usefixtures("allow_fall_back_on_numpy") @pytest.mark.parametrize("dtype", get_all_dtypes()) @pytest.mark.parametrize("num", [2, 4, 8, 3, 9, 27]) @pytest.mark.parametrize("endpoint", [True, False]) @@ -209,17 +208,14 @@ def test_geomspace(dtype, num, endpoint): start = 2 stop = 256 - func = lambda xp: xp.geomspace(start, stop, num, endpoint, dtype) + func = lambda xp: xp.geomspace( + start, stop, num, endpoint=endpoint, dtype=dtype + ) np_res = func(numpy) dpnp_res = func(dpnp) - # Note that the above may not produce exact integers: - # (c) https://numpy.org/doc/stable/reference/generated/numpy.geomspace.html - if dtype in [numpy.int64, numpy.int32]: - assert_allclose(dpnp_res, np_res, atol=1) - else: - assert_allclose(dpnp_res, np_res) + assert_allclose(dpnp_res, np_res) @pytest.mark.parametrize("n", [0, 1, 4], ids=["0", "1", "4"]) @@ -716,3 +712,8 @@ def test_set_shape(shape): da.shape = shape assert_array_equal(na, da) + + +def test_linspace_retstep_error(): + with pytest.raises(NotImplementedError): + dpnp.linspace(2, 5, 3, retstep=True) diff --git a/tests/test_sycl_queue.py b/tests/test_sycl_queue.py index e7e649de2909..4d7e29c1400f 100644 --- a/tests/test_sycl_queue.py +++ b/tests/test_sycl_queue.py @@ -85,8 +85,10 @@ def vvsort(val, vec, size, xp): pytest.param("arange", [-25.7], {"stop": 10**8, "step": 15}), pytest.param("full", [(2, 2)], {"fill_value": 5}), pytest.param("eye", [4, 2], {}), + pytest.param("geomspace", [1, 4, 8], {}), pytest.param("identity", [4], {}), pytest.param("linspace", [0, 4, 8], {}), + pytest.param("logspace", [0, 4, 8], {}), pytest.param("ones", [(2, 2)], {}), pytest.param("tri", [3, 5, 2], {}), pytest.param("zeros", [(2, 2)], {}), @@ -140,12 +142,16 @@ def test_empty_like(device_x, device_y): "func, args, kwargs", [ pytest.param("full_like", ["x0"], {"fill_value": 5}), + pytest.param("geomspace", ["x0[0:3]", "8", "4"], {}), + pytest.param("geomspace", ["1", "x0[3:5]", "4"], {}), + pytest.param("linspace", ["x0[0:2]", "8", "4"], {}), + pytest.param("linspace", ["0", "x0[3:5]", "4"], {}), + pytest.param("logspace", ["x0[0:2]", "8", "4"], {}), + pytest.param("logspace", ["0", "x0[3:5]", "4"], {}), pytest.param("ones_like", ["x0"], {}), - pytest.param("zeros_like", ["x0"], {}), pytest.param("tril", ["x0.reshape((2,2))"], {}), pytest.param("triu", ["x0.reshape((2,2))"], {}), - pytest.param("linspace", ["x0", "4", "4"], {}), - pytest.param("linspace", ["1", "x0", "4"], {}), + pytest.param("zeros_like", ["x0"], {}), ], ) @pytest.mark.parametrize( diff --git a/tests/test_usm_type.py b/tests/test_usm_type.py index 122f71651581..1192384d5721 100644 --- a/tests/test_usm_type.py +++ b/tests/test_usm_type.py @@ -140,13 +140,17 @@ def test_coerced_usm_types_power(usm_type_x, usm_type_y): @pytest.mark.parametrize( "func, args", [ + pytest.param("empty_like", ["x0"]), pytest.param("full", ["10", "x0[3]"]), pytest.param("full_like", ["x0", "4"]), - pytest.param("zeros_like", ["x0"]), - pytest.param("ones_like", ["x0"]), - pytest.param("empty_like", ["x0"]), - pytest.param("linspace", ["x0[0:2]", "4", "4"]), + pytest.param("geomspace", ["x0[0:3]", "8", "4"]), + pytest.param("geomspace", ["1", "x0[3:5]", "4"]), + pytest.param("linspace", ["x0[0:2]", "8", "4"]), pytest.param("linspace", ["0", "x0[3:5]", "4"]), + pytest.param("logspace", ["x0[0:2]", "8", "4"]), + pytest.param("logspace", ["0", "x0[3:5]", "4"]), + pytest.param("ones_like", ["x0"]), + pytest.param("zeros_like", ["x0"]), ], ) @pytest.mark.parametrize("usm_type_x", list_of_usm_types, ids=list_of_usm_types) @@ -168,8 +172,10 @@ def test_array_creation_from_an_array(func, args, usm_type_x, usm_type_y): pytest.param("arange", [-25.7], {"stop": 10**8, "step": 15}), pytest.param("full", [(2, 2)], {"fill_value": 5}), pytest.param("eye", [4, 2], {}), + pytest.param("geomspace", [1, 4, 8], {}), pytest.param("identity", [4], {}), pytest.param("linspace", [0, 4, 8], {}), + pytest.param("logspace", [0, 4, 8], {}), pytest.param("ones", [(2, 2)], {}), pytest.param("tri", [3, 5, 2], {}), pytest.param("zeros", [(2, 2)], {}), @@ -189,6 +195,18 @@ def test_array_creation_from_scratch(func, arg, kwargs, usm_type): assert dpnp_array.usm_type == usm_type +@pytest.mark.parametrize("usm_type_x", list_of_usm_types, ids=list_of_usm_types) +@pytest.mark.parametrize("usm_type_y", list_of_usm_types, ids=list_of_usm_types) +def test_logspace_base(usm_type_x, usm_type_y): + x0 = dp.full(10, 2, usm_type=usm_type_x) + + x = dp.logspace([2, 2], 8, 4, base=x0[3:5]) + y = dp.logspace([2, 2], 8, 4, base=x0[3:5], usm_type=usm_type_y) + + assert x.usm_type == usm_type_x + assert y.usm_type == usm_type_y + + @pytest.mark.parametrize( "func", [ diff --git a/tests/third_party/cupy/creation_tests/test_ranges.py b/tests/third_party/cupy/creation_tests/test_ranges.py index be2f113a3181..46b1fbe9180b 100644 --- a/tests/third_party/cupy/creation_tests/test_ranges.py +++ b/tests/third_party/cupy/creation_tests/test_ranges.py @@ -214,13 +214,11 @@ def test_linspace_start_stop_list(self, xp, dtype): stop = [100, 16] return xp.linspace(start, stop, num=50, dtype=dtype) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes(no_bool=True) @testing.numpy_cupy_allclose() def test_logspace(self, xp, dtype): return xp.logspace(0, 2, 5, dtype=dtype) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes(no_bool=True) @testing.numpy_cupy_allclose() def test_logspace2(self, xp, dtype): @@ -231,29 +229,24 @@ def test_logspace2(self, xp, dtype): def test_logspace_zero_num(self, xp, dtype): return xp.logspace(0, 2, 0, dtype=dtype) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes(no_bool=True) @testing.numpy_cupy_allclose() def test_logspace_one_num(self, xp, dtype): return xp.logspace(0, 2, 1, dtype=dtype) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes(no_bool=True) @testing.numpy_cupy_allclose() def test_logspace_no_endpoint(self, xp, dtype): return xp.logspace(0, 2, 5, dtype=dtype, endpoint=False) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.numpy_cupy_allclose(rtol=1e-4, type_check=has_support_aspect64()) def test_logspace_no_dtype_int(self, xp): return xp.logspace(0, 2) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.numpy_cupy_allclose(rtol=1e-4, type_check=has_support_aspect64()) def test_logspace_no_dtype_float(self, xp): return xp.logspace(0.0, 2.0) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.numpy_cupy_allclose() def test_logspace_float_args_with_int_dtype(self, xp): return xp.logspace(0.1, 2.1, 11, dtype=int) @@ -263,7 +256,6 @@ def test_logspace_neg_num(self): with pytest.raises(ValueError): xp.logspace(0, 10, -1) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes(no_bool=True) @testing.numpy_cupy_allclose(rtol=1e-04) def test_logspace_base(self, xp, dtype): From 1f559fba8deb1139fd60b1c7e0ad9d0452bbc4a5 Mon Sep 17 00:00:00 2001 From: Natalia Polina Date: Thu, 19 Oct 2023 12:53:20 -0500 Subject: [PATCH 02/38] Added more test for geomspace and logspace functions --- dpnp/dpnp_algo/dpnp_arraycreation.py | 8 +-- tests/test_arraycreation.py | 90 ++++++++++++++++++++++------ 2 files changed, 77 insertions(+), 21 deletions(-) diff --git a/dpnp/dpnp_algo/dpnp_arraycreation.py b/dpnp/dpnp_algo/dpnp_arraycreation.py index dd75d62c6866..6f2ebe1516a5 100644 --- a/dpnp/dpnp_algo/dpnp_arraycreation.py +++ b/dpnp/dpnp_algo/dpnp_arraycreation.py @@ -99,9 +99,9 @@ def dpnp_geomspace( both_negative = (dpnp.sign(start) == -1) & (dpnp.sign(stop) == -1) if dpnp.any(both_negative): - dpnp.negative(start, out=start, where=both_negative) - dpnp.negative(stop, out=stop, where=both_negative) - dpnp.negative(out_sign, out=out_sign, where=both_negative) + dpnp.negative(start[both_negative], out=start[both_negative]) + dpnp.negative(stop[both_negative], out=stop[both_negative]) + dpnp.negative(out_sign[both_negative], out=out_sign[both_negative]) log_start = dpnp.log10(start) log_stop = dpnp.log10(stop) @@ -271,7 +271,7 @@ def dpnp_logspace( base, dt, usm_type, _usm_type, sycl_queue_normalized ) - _base = dpnp.expand_dims(_base, axis=axis) + print(res.shape) if dtype is None: return dpnp.power(_base, res) return dpnp.power(_base, res).astype(dtype, copy=False) diff --git a/tests/test_arraycreation.py b/tests/test_arraycreation.py index 652ddd6ee69d..e159b8a78853 100644 --- a/tests/test_arraycreation.py +++ b/tests/test_arraycreation.py @@ -201,23 +201,6 @@ def test_fromstring(dtype): assert_array_equal(func(dpnp), func(numpy)) -@pytest.mark.parametrize("dtype", get_all_dtypes()) -@pytest.mark.parametrize("num", [2, 4, 8, 3, 9, 27]) -@pytest.mark.parametrize("endpoint", [True, False]) -def test_geomspace(dtype, num, endpoint): - start = 2 - stop = 256 - - func = lambda xp: xp.geomspace( - start, stop, num, endpoint=endpoint, dtype=dtype - ) - - np_res = func(numpy) - dpnp_res = func(dpnp) - - assert_allclose(dpnp_res, np_res) - - @pytest.mark.parametrize("n", [0, 1, 4], ids=["0", "1", "4"]) @pytest.mark.parametrize("dtype", get_all_dtypes()) def test_identity(n, dtype): @@ -690,6 +673,12 @@ def test_linspace_complex(): assert_allclose(func(numpy), func(dpnp)) +@pytest.mark.parametrize("axis", [0, 1]) +def test_linspace_axis(axis): + func = lambda xp: xp.linspace([2, 3], [20, 15], num=10, axis=axis) + assert_allclose(func(numpy), func(dpnp)) + + @pytest.mark.parametrize( "arrays", [[], [[1]], [[1, 2, 3], [4, 5, 6]], [[1, 2], [3, 4], [5, 6]]], @@ -717,3 +706,70 @@ def test_set_shape(shape): def test_linspace_retstep_error(): with pytest.raises(NotImplementedError): dpnp.linspace(2, 5, 3, retstep=True) + + +def test_space_num_error(): + with pytest.raises(ValueError): + dpnp.linspace(2, 5, -3) + dpnp.geomspace(2, 5, -3) + dpnp.logspace(2, 5, -3) + + +@pytest.mark.parametrize("sign", [-1, 1]) +@pytest.mark.parametrize("dtype", get_all_dtypes()) +@pytest.mark.parametrize("num", [2, 4, 8, 3, 9, 27]) +@pytest.mark.parametrize("endpoint", [True, False]) +def test_geomspace(sign, dtype, num, endpoint): + start = 2 * sign + stop = 256 * sign + + func = lambda xp: xp.geomspace( + start, stop, num, endpoint=endpoint, dtype=dtype + ) + + np_res = func(numpy) + dpnp_res = func(dpnp) + + assert_allclose(dpnp_res, np_res) + + +@pytest.mark.usefixtures("allow_fall_back_on_numpy") +# dpnp.sign raise numpy fall back for complex dtype +def test_geomspace_complex(): + func = lambda xp: xp.geomspace(1j, 10j, num=10) + assert_allclose(func(numpy), func(dpnp)) + + +@pytest.mark.parametrize("axis", [0, 1]) +def test_geomspace_axis(axis): + func = lambda xp: xp.geomspace([2, 3], [20, 15], num=10, axis=axis) + assert_allclose(func(numpy), func(dpnp)) + + +@pytest.mark.parametrize("dtype", get_all_dtypes()) +@pytest.mark.parametrize("num", [2, 4, 8, 3, 9, 27]) +@pytest.mark.parametrize("base", [-2, 2]) +@pytest.mark.parametrize("endpoint", [True, False]) +def test_logspace(dtype, num, endpoint, base): + start = 2 + stop = 5 + + func = lambda xp: xp.logspace( + start, stop, num, endpoint=endpoint, dtype=dtype, base=base + ) + + np_res = func(numpy) + dpnp_res = func(dpnp) + + if dtype in [numpy.int64, numpy.int32]: + assert_allclose(dpnp_res, np_res, rtol=1) + else: + assert_allclose(dpnp_res, np_res) + + +@pytest.mark.parametrize("axis", [0, 1]) +def test_logspace_axis(axis): + func = lambda xp: xp.logspace( + [2, 3], [20, 15], num=2, base=[[1, 3], [5, 7]], axis=axis + ) + assert_allclose(func(numpy), func(dpnp)) From e64576f8c6b18a3c5e6bf0ce1437e51ac4ce5991 Mon Sep 17 00:00:00 2001 From: Natalia Polina Date: Thu, 19 Oct 2023 15:40:41 -0500 Subject: [PATCH 03/38] Added tests for geomspace zero error --- dpnp/dpnp_algo/dpnp_arraycreation.py | 2 -- tests/test_arraycreation.py | 10 ++++++++++ 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/dpnp/dpnp_algo/dpnp_arraycreation.py b/dpnp/dpnp_algo/dpnp_arraycreation.py index 6f2ebe1516a5..28a8d1e8df5a 100644 --- a/dpnp/dpnp_algo/dpnp_arraycreation.py +++ b/dpnp/dpnp_algo/dpnp_arraycreation.py @@ -235,8 +235,6 @@ def dpnp_logspace( dt = numpy.result_type(start, stop, float(num)) dt = utils.map_dtype_to_device(dt, sycl_queue_normalized.sycl_device) - if dtype is None: - dtype = dt if dpnp.isscalar(start) and dpnp.isscalar(stop): # Call linspace() function for scalars. diff --git a/tests/test_arraycreation.py b/tests/test_arraycreation.py index e159b8a78853..36d82acc1a6c 100644 --- a/tests/test_arraycreation.py +++ b/tests/test_arraycreation.py @@ -708,11 +708,21 @@ def test_linspace_retstep_error(): dpnp.linspace(2, 5, 3, retstep=True) +def test_geomspace_zero_error(): + with pytest.raises(ValueError): + dpnp.geomspace(0, 5, 3) + dpnp.geomspace(2, 0, 3) + dpnp.geomspace(0, 0, 3) + + def test_space_num_error(): with pytest.raises(ValueError): dpnp.linspace(2, 5, -3) dpnp.geomspace(2, 5, -3) dpnp.logspace(2, 5, -3) + dpnp.linspace([2, 3], 5, -3) + dpnp.geomspace([2, 3], 5, -3) + dpnp.logspace([2, 3], 5, -3) @pytest.mark.parametrize("sign", [-1, 1]) From 3470248a51b5ef3c8c419e87d32628de6c9b8cd9 Mon Sep 17 00:00:00 2001 From: Natalia Polina Date: Tue, 31 Oct 2023 03:30:16 -0500 Subject: [PATCH 04/38] address comments --- dpnp/dpnp_algo/dpnp_arraycreation.py | 205 ++++++++---------- dpnp/dpnp_iface_arraycreation.py | 11 +- tests/skipped_tests.tbl | 3 - tests/skipped_tests_gpu.tbl | 3 - tests/test_arraycreation.py | 40 ++-- .../cupy/creation_tests/test_ranges.py | 41 ++++ 6 files changed, 158 insertions(+), 145 deletions(-) diff --git a/dpnp/dpnp_algo/dpnp_arraycreation.py b/dpnp/dpnp_algo/dpnp_arraycreation.py index 28a8d1e8df5a..f7c315c42e52 100644 --- a/dpnp/dpnp_algo/dpnp_arraycreation.py +++ b/dpnp/dpnp_algo/dpnp_arraycreation.py @@ -13,45 +13,6 @@ ] -def _get_sycl_queue(sycl_queue_alloc, sycl_queue, device): - if sycl_queue is None and device is None: - sycl_queue = sycl_queue_alloc - return dpnp.get_normalized_queue_device( - sycl_queue=sycl_queue, device=device - ) - - -def _get_temporary_usm_type(usm_type_alloc, usm_type): - if usm_type is None: - usm_type = "device" if usm_type_alloc is None else usm_type_alloc - return usm_type - - -def _list_to_array(a, usm_type, sycl_queue, scalar=False): - if not hasattr(a, "dtype") and (scalar or not dpnp.isscalar(a)): - return dpnp.asarray(a, usm_type=usm_type, sycl_queue=sycl_queue) - return a - - -def _copy_by_usm_type(a, dtype, usm_type, tmp_usm_type, sycl_queue_normalized): - if usm_type is None: - usm_type = tmp_usm_type - if not hasattr(a, "usm_type"): - res = dpnp.asarray( - a, - dtype=dtype, - usm_type=usm_type, - sycl_queue=sycl_queue_normalized, - ) - else: - res = dpnp.asarray(a, dtype=dtype, sycl_queue=sycl_queue_normalized) - else: - res = dpnp.asarray( - a, dtype=dtype, usm_type=usm_type, sycl_queue=sycl_queue_normalized - ) - return res - - def dpnp_geomspace( start, stop, @@ -65,14 +26,25 @@ def dpnp_geomspace( ): usm_type_alloc, sycl_queue_alloc = utils.get_usm_allocations([start, stop]) - sycl_queue_normalized = _get_sycl_queue( - sycl_queue_alloc, sycl_queue, device + if sycl_queue is None and device is None: + sycl_queue = sycl_queue_alloc + sycl_queue_normalized = dpnp.get_normalized_queue_device( + sycl_queue=sycl_queue, device=device ) - _usm_type = _get_temporary_usm_type(usm_type_alloc, usm_type) + if usm_type is None: + _usm_type = "device" if usm_type_alloc is None else usm_type_alloc + else: + _usm_type = usm_type - start = _list_to_array(start, _usm_type, sycl_queue_normalized, scalar=True) - stop = _list_to_array(stop, _usm_type, sycl_queue_normalized, scalar=True) + if not hasattr(start, "dtype"): + start = dpnp.asarray( + start, usm_type=_usm_type, sycl_queue=sycl_queue_normalized + ) + if not hasattr(stop, "dtype"): + stop = dpnp.asarray( + stop, usm_type=_usm_type, sycl_queue=sycl_queue_normalized + ) dt = numpy.result_type(start, stop, float(num)) dt = utils.map_dtype_to_device(dt, sycl_queue_normalized.sycl_device) @@ -138,24 +110,46 @@ def dpnp_linspace( usm_type=None, sycl_queue=None, endpoint=True, + retstep=False, axis=0, ): usm_type_alloc, sycl_queue_alloc = utils.get_usm_allocations([start, stop]) - sycl_queue_normalized = _get_sycl_queue( - sycl_queue_alloc, sycl_queue, device + if sycl_queue is None and device is None: + sycl_queue = sycl_queue_alloc + sycl_queue_normalized = dpnp.get_normalized_queue_device( + sycl_queue=sycl_queue, device=device ) - _usm_type = _get_temporary_usm_type(usm_type_alloc, usm_type) + if usm_type is None: + _usm_type = "device" if usm_type_alloc is None else usm_type_alloc + else: + _usm_type = usm_type - start = _list_to_array(start, _usm_type, sycl_queue_normalized) - stop = _list_to_array(stop, _usm_type, sycl_queue_normalized) + if not hasattr(start, "dtype") and not dpnp.isscalar(start): + start = dpnp.asarray( + start, usm_type=_usm_type, sycl_queue=sycl_queue_normalized + ) + if not hasattr(stop, "dtype") and not dpnp.isscalar(stop): + stop = dpnp.asarray( + stop, usm_type=_usm_type, sycl_queue=sycl_queue_normalized + ) dt = numpy.result_type(start, stop, float(num)) dt = utils.map_dtype_to_device(dt, sycl_queue_normalized.sycl_device) if dtype is None: dtype = dt + num = operator.index(num) + if num < 0: + raise ValueError("Number of points must be non-negative") + step_num = (num - 1) if endpoint else num + + step_nan = False + if step_num == 0: + step_nan = True + step = dpnp.nan + if dpnp.isscalar(start) and dpnp.isscalar(stop): # Call linspace() function for scalars. res = dpnp_container.linspace( @@ -167,22 +161,19 @@ def dpnp_linspace( sycl_queue=sycl_queue_normalized, endpoint=endpoint, ) + if retstep is True and step_nan is False: + step = (stop - start) / step_num else: - num = operator.index(num) - if num < 0: - raise ValueError("Number of points must be non-negative") - - _start = _copy_by_usm_type( - start, dt, usm_type, _usm_type, sycl_queue_normalized + _start = dpnp.asarray( + start, + dtype=dt, + usm_type=_usm_type, + sycl_queue=sycl_queue_normalized, ) - _stop = _copy_by_usm_type( - stop, dt, usm_type, _usm_type, sycl_queue_normalized + _stop = dpnp.asarray( + stop, dtype=dt, usm_type=_usm_type, sycl_queue=sycl_queue_normalized ) - _num = (num - 1) if endpoint else num - - step = (_stop - _start) / _num - res = dpnp_container.arange( 0, stop=num, @@ -192,8 +183,10 @@ def dpnp_linspace( sycl_queue=sycl_queue_normalized, ) - res = res.reshape((-1,) + (1,) * step.ndim) - res = res * step + _start + if step_nan is False: + step = (_stop - _start) / step_num + res = res.reshape((-1,) + (1,) * step.ndim) + res = res * step + _start if endpoint and num > 1: res[-1] = dpnp_container.full(step.shape, _stop) @@ -204,7 +197,16 @@ def dpnp_linspace( if numpy.issubdtype(dtype, dpnp.integer): dpnp.floor(res, out=res) - return res.astype(dtype, copy=False) + res = res.astype(dtype, copy=False) + + if retstep is True: + if dpnp.isscalar(step): + step = dpnp.asarray( + step, usm_type=res.usm_type, sycl_queue=res.sycl_queue + ) + return [res, step] + + return res def dpnp_logspace( @@ -219,57 +221,38 @@ def dpnp_logspace( dtype=None, axis=0, ): - usm_type_alloc, sycl_queue_alloc = utils.get_usm_allocations( - [start, stop, base] - ) - - sycl_queue_normalized = _get_sycl_queue( - sycl_queue_alloc, sycl_queue, device - ) - - _usm_type = _get_temporary_usm_type(usm_type_alloc, usm_type) - - start = _list_to_array(start, _usm_type, sycl_queue_normalized) - stop = _list_to_array(stop, _usm_type, sycl_queue_normalized) - base = _list_to_array(base, _usm_type, sycl_queue_normalized) - - dt = numpy.result_type(start, stop, float(num)) - dt = utils.map_dtype_to_device(dt, sycl_queue_normalized.sycl_device) - - if dpnp.isscalar(start) and dpnp.isscalar(stop): - # Call linspace() function for scalars. - res = dpnp_container.linspace( - start, - stop, - num, - dtype=dt, - usm_type=_usm_type, - sycl_queue=sycl_queue_normalized, - endpoint=endpoint, + if not dpnp.isscalar(base): + usm_type_alloc, sycl_queue_alloc = utils.get_usm_allocations( + [start, stop, base] ) - else: - _start = _copy_by_usm_type( - start, dt, usm_type, _usm_type, sycl_queue_normalized - ) - _stop = _copy_by_usm_type( - stop, dt, usm_type, _usm_type, sycl_queue_normalized - ) - res = dpnp_linspace( - _start, - _stop, - num=num, - usm_type=_usm_type, - sycl_queue=sycl_queue_normalized, - endpoint=endpoint, - axis=axis, + if sycl_queue is None and device is None: + sycl_queue = sycl_queue_alloc + sycl_queue = dpnp.get_normalized_queue_device( + sycl_queue=sycl_queue, device=device ) - _base = _copy_by_usm_type( - base, dt, usm_type, _usm_type, sycl_queue_normalized + if usm_type is None: + usm_type = "device" if usm_type_alloc is None else usm_type_alloc + else: + usm_type = usm_type + start = dpnp.asarray(start, usm_type=usm_type, sycl_queue=sycl_queue) + stop = dpnp.asarray(stop, usm_type=usm_type, sycl_queue=sycl_queue) + base = dpnp.asarray(base, usm_type=usm_type, sycl_queue=sycl_queue) + [start, stop, base] = dpnp.broadcast_arrays(start, stop, base) + base = dpnp.expand_dims(base, axis=axis) + + res = dpnp_linspace( + start, + stop, + num=num, + device=device, + usm_type=usm_type, + sycl_queue=sycl_queue, + endpoint=endpoint, + axis=axis, ) - print(res.shape) if dtype is None: - return dpnp.power(_base, res) - return dpnp.power(_base, res).astype(dtype, copy=False) + return dpnp.power(base, res) + return dpnp.power(base, res).astype(dtype, copy=False) diff --git a/dpnp/dpnp_iface_arraycreation.py b/dpnp/dpnp_iface_arraycreation.py index 172ee5ea794f..8163395fbccf 100644 --- a/dpnp/dpnp_iface_arraycreation.py +++ b/dpnp/dpnp_iface_arraycreation.py @@ -1148,11 +1148,6 @@ def linspace( For full documentation refer to :obj:`numpy.linspace`. - Limitations - ----------- - Parameter `retstep` is supported only with default value ``False``. - Otherwise ``NotImplementedError`` exception will be raised. - See Also -------- :obj:`dpnp.arange` : Similar to `linspace`, but uses a step size (instead @@ -1173,11 +1168,6 @@ def linspace( """ - if retstep is not False: - raise NotImplementedError( - f"retstep={retstep} is currently not supported" - ) - return dpnp_linspace( start, stop, @@ -1187,6 +1177,7 @@ def linspace( usm_type=usm_type, sycl_queue=sycl_queue, endpoint=endpoint, + retstep=retstep, axis=axis, ) diff --git a/tests/skipped_tests.tbl b/tests/skipped_tests.tbl index 5bc6d3ca838e..c79089098315 100644 --- a/tests/skipped_tests.tbl +++ b/tests/skipped_tests.tbl @@ -204,9 +204,6 @@ tests/third_party/cupy/creation_tests/test_ranges.py::TestMgrid::test_mgrid3 tests/third_party/cupy/creation_tests/test_ranges.py::TestOgrid::test_ogrid3 tests/third_party/cupy/creation_tests/test_ranges.py::TestOgrid::test_ogrid4 tests/third_party/cupy/creation_tests/test_ranges.py::TestOgrid::test_ogrid5 -tests/third_party/cupy/creation_tests/test_ranges.py::TestRanges::test_linspace_one_num_no_endopoint_with_retstep -tests/third_party/cupy/creation_tests/test_ranges.py::TestRanges::test_linspace_with_retstep -tests/third_party/cupy/creation_tests/test_ranges.py::TestRanges::test_linspace_zero_num_no_endopoint_with_retstep tests/third_party/cupy/indexing_tests/test_generate.py::TestAxisConcatenator::test_AxisConcatenator_init1 tests/third_party/cupy/indexing_tests/test_generate.py::TestAxisConcatenator::test_len tests/third_party/cupy/indexing_tests/test_generate.py::TestC_::test_c_1 diff --git a/tests/skipped_tests_gpu.tbl b/tests/skipped_tests_gpu.tbl index 64630dca237c..c3ed9154b53e 100644 --- a/tests/skipped_tests_gpu.tbl +++ b/tests/skipped_tests_gpu.tbl @@ -322,9 +322,6 @@ tests/third_party/cupy/creation_tests/test_ranges.py::TestOgrid::test_ogrid4 tests/third_party/cupy/creation_tests/test_ranges.py::TestOgrid::test_ogrid5 tests/third_party/cupy/creation_tests/test_ranges.py::TestRanges::test_arange_negative_size tests/third_party/cupy/creation_tests/test_ranges.py::TestRanges::test_arange_no_dtype_int -tests/third_party/cupy/creation_tests/test_ranges.py::TestRanges::test_linspace_one_num_no_endopoint_with_retstep -tests/third_party/cupy/creation_tests/test_ranges.py::TestRanges::test_linspace_with_retstep -tests/third_party/cupy/creation_tests/test_ranges.py::TestRanges::test_linspace_zero_num_no_endopoint_with_retstep tests/third_party/cupy/fft_tests/test_fft.py::TestFft2_param_1_{axes=None, norm=None, s=(1, None), shape=(3, 4)}::test_fft2 tests/third_party/cupy/fft_tests/test_fft.py::TestFft2_param_7_{axes=(), norm=None, s=None, shape=(3, 4)}::test_fft2 diff --git a/tests/test_arraycreation.py b/tests/test_arraycreation.py index 36d82acc1a6c..51ba1c87bc18 100644 --- a/tests/test_arraycreation.py +++ b/tests/test_arraycreation.py @@ -608,23 +608,28 @@ def test_dpctl_tensor_input(func, args): ) @pytest.mark.parametrize( "num", - [5, numpy.array(10), dpnp.array(17), dpt.asarray(100)], - ids=["5", "numpy.array(10)", "dpnp.array(17)", "dpt.asarray(100)"], + [1, 5, numpy.array(10), dpnp.array(17), dpt.asarray(100)], + ids=["1", "5", "numpy.array(10)", "dpnp.array(17)", "dpt.asarray(100)"], ) @pytest.mark.parametrize( "dtype", get_all_dtypes(no_bool=True, no_float16=False) ) -def test_linspace(start, stop, num, dtype): - func = lambda xp: xp.linspace(start, stop, num, dtype=dtype) +@pytest.mark.parametrize("retstep", [True, False], ids=["True", "False"]) +def test_linspace(start, stop, num, dtype, retstep): + res_np = numpy.linspace(start, stop, num, dtype=dtype, retstep=retstep) + res_dp = dpnp.linspace(start, stop, num, dtype=dtype, retstep=retstep) + + if retstep: + [res_np, step_np] = res_np + [res_dp, step_dp] = res_dp + assert_allclose(step_np, step_dp) if numpy.issubdtype(dtype, dpnp.integer): - assert_allclose(func(numpy), func(dpnp), rtol=1) + assert_allclose(res_np, res_dp, rtol=1) else: if dtype is None and not has_support_aspect64(): dtype = dpnp.float32 - assert_allclose( - func(numpy), func(dpnp), rtol=1e-06, atol=numpy.finfo(dtype).eps - ) + assert_allclose(res_np, res_dp, rtol=1e-06, atol=dpnp.finfo(dtype).eps) @pytest.mark.parametrize( @@ -670,13 +675,13 @@ def test_linspace_arrays(start, stop): def test_linspace_complex(): func = lambda xp: xp.linspace(0, 3 + 2j, num=1000) - assert_allclose(func(numpy), func(dpnp)) + assert_allclose(func(dpnp), func(numpy)) @pytest.mark.parametrize("axis", [0, 1]) def test_linspace_axis(axis): func = lambda xp: xp.linspace([2, 3], [20, 15], num=10, axis=axis) - assert_allclose(func(numpy), func(dpnp)) + assert_allclose(func(dpnp), func(numpy)) @pytest.mark.parametrize( @@ -703,11 +708,6 @@ def test_set_shape(shape): assert_array_equal(na, da) -def test_linspace_retstep_error(): - with pytest.raises(NotImplementedError): - dpnp.linspace(2, 5, 3, retstep=True) - - def test_geomspace_zero_error(): with pytest.raises(ValueError): dpnp.geomspace(0, 5, 3) @@ -747,13 +747,13 @@ def test_geomspace(sign, dtype, num, endpoint): # dpnp.sign raise numpy fall back for complex dtype def test_geomspace_complex(): func = lambda xp: xp.geomspace(1j, 10j, num=10) - assert_allclose(func(numpy), func(dpnp)) + assert_allclose(func(dpnp), func(numpy)) @pytest.mark.parametrize("axis", [0, 1]) def test_geomspace_axis(axis): func = lambda xp: xp.geomspace([2, 3], [20, 15], num=10, axis=axis) - assert_allclose(func(numpy), func(dpnp)) + assert_allclose(func(dpnp), func(numpy)) @pytest.mark.parametrize("dtype", get_all_dtypes()) @@ -779,7 +779,11 @@ def test_logspace(dtype, num, endpoint, base): @pytest.mark.parametrize("axis", [0, 1]) def test_logspace_axis(axis): + if numpy.lib.NumpyVersion(numpy.__version__) < "1.25.0": + pytest.skip( + "numpy.logspace supports a non-scalar base argument since 1.25.0" + ) func = lambda xp: xp.logspace( [2, 3], [20, 15], num=2, base=[[1, 3], [5, 7]], axis=axis ) - assert_allclose(func(numpy), func(dpnp)) + assert_allclose(func(dpnp), func(numpy)) diff --git a/tests/third_party/cupy/creation_tests/test_ranges.py b/tests/third_party/cupy/creation_tests/test_ranges.py index 46b1fbe9180b..20e01ca8301f 100644 --- a/tests/third_party/cupy/creation_tests/test_ranges.py +++ b/tests/third_party/cupy/creation_tests/test_ranges.py @@ -1,3 +1,4 @@ +import functools import math import sys import unittest @@ -10,6 +11,27 @@ from tests.third_party.cupy import testing +def skip_int_equality_before_numpy_1_20(names=("dtype",)): + """Require numpy/numpy#16841 or skip the equality check.""" + + def decorator(wrapped): + if numpy.lib.NumpyVersion(numpy.__version__) >= "1.20.0": + return wrapped + + @functools.wraps(wrapped) + def wrapper(self, *args, **kwargs): + xp = kwargs["xp"] + dtypes = [kwargs[name] for name in names] + ret = wrapped(self, *args, **kwargs) + if any(numpy.issubdtype(dtype, numpy.integer) for dtype in dtypes): + ret = xp.zeros_like(ret) + return ret + + return wrapper + + return decorator + + @testing.gpu class TestRanges(unittest.TestCase): @testing.for_all_dtypes(no_bool=True) @@ -79,6 +101,14 @@ def test_linspace(self, xp, dtype): def test_linspace2(self, xp, dtype): return xp.linspace(10, 0, 5, dtype=dtype) + @testing.for_all_dtypes(no_bool=True) + @testing.numpy_cupy_array_equal() + @skip_int_equality_before_numpy_1_20() + def test_linspace3(self, xp, dtype): + if xp.dtype(dtype).kind == "u": + pytest.skip() + return xp.linspace(-10, 8, 9, dtype=dtype) + @testing.for_all_dtypes(no_bool=True) @testing.numpy_cupy_array_equal() def test_linspace_zero_num(self, xp, dtype): @@ -261,6 +291,17 @@ def test_logspace_neg_num(self): def test_logspace_base(self, xp, dtype): return xp.logspace(0, 2, 5, base=2.0, dtype=dtype) + # See #7946 and https://github.com/numpy/numpy/issues/24957 + @testing.with_requires("numpy>=1.16, !=1.25.*, !=1.26.*") + @testing.for_all_dtypes_combination( + names=("dtype_range", "dtype_out"), no_bool=True, no_complex=True + ) + @testing.numpy_cupy_allclose(rtol=1e-6, contiguous_check=False) + def test_logspace_array_start_stop_axis1(self, xp, dtype_range, dtype_out): + start = xp.array([0, 2], dtype=dtype_range) + stop = xp.array([2, 0], dtype=dtype_range) + return xp.logspace(start, stop, num=5, dtype=dtype_out, axis=1) + @testing.parameterize( *testing.product( From ce2ccffd53c70157bf7b7d4e5172b1025b90685f Mon Sep 17 00:00:00 2001 From: Natalia Polina Date: Tue, 31 Oct 2023 14:34:08 -0500 Subject: [PATCH 05/38] Added tests --- dpnp/dpnp_algo/dpnp_arraycreation.py | 2 +- dpnp/dpnp_iface_arraycreation.py | 45 +++++++++++++++++- tests/test_arraycreation.py | 47 +++++++++++++++---- tests/test_sycl_queue.py | 24 ++++++++-- .../cupy/creation_tests/test_ranges.py | 2 +- 5 files changed, 104 insertions(+), 16 deletions(-) diff --git a/dpnp/dpnp_algo/dpnp_arraycreation.py b/dpnp/dpnp_algo/dpnp_arraycreation.py index f7c315c42e52..52456550c788 100644 --- a/dpnp/dpnp_algo/dpnp_arraycreation.py +++ b/dpnp/dpnp_algo/dpnp_arraycreation.py @@ -204,7 +204,7 @@ def dpnp_linspace( step = dpnp.asarray( step, usm_type=res.usm_type, sycl_queue=res.sycl_queue ) - return [res, step] + return (res, step) return res diff --git a/dpnp/dpnp_iface_arraycreation.py b/dpnp/dpnp_iface_arraycreation.py index 8163395fbccf..c43d75be81fd 100644 --- a/dpnp/dpnp_iface_arraycreation.py +++ b/dpnp/dpnp_iface_arraycreation.py @@ -1043,6 +1043,11 @@ def geomspace( For full documentation refer to :obj:`numpy.geomspace`. + Returns + ------- + samples : dpnp.ndarray + num samples, equally spaced on a log scale. + See Also -------- :obj:`dpnp.logspace` : Similar to geomspace, but with endpoints specified @@ -1057,9 +1062,22 @@ def geomspace( >>> import dpnp as np >>> np.geomspace(1, 1000, num=4) array([ 1., 10., 100., 1000.]) - - >>> x2 = np.geomspace(1, 1000, num=4, endpoint=False) + >>> np.geomspace(1, 1000, num=3, endpoint=False) + array([ 1., 10., 100.]) + >>> np.geomspace(1, 1000, num=4, endpoint=False) array([ 1. , 5.62341325, 31.6227766 , 177.827941 ]) + >>> np.geomspace(1, 256, num=9) + array([ 1., 2., 4., 8., 16., 32., 64., 128., 256.]) + + >>> np.geomspace(1, 256, num=9, dtype=int) + array([ 1, 2, 4, 7, 16, 32, 63, 127, 256]) + >>> np.around(np.geomspace(1, 256, num=9)).astype(int) + array([ 1, 2, 4, 8, 16, 32, 64, 128, 256]) + + >>> np.geomspace(1000, 1, num=4) + array([1000., 100., 10., 1.]) + >>> np.geomspace(-1000, -1, num=4) + array([-1000., -100., -10., -1.]) """ @@ -1148,6 +1166,17 @@ def linspace( For full documentation refer to :obj:`numpy.linspace`. + Returns + ------- + samples : dpnp.ndarray + There are num equally spaced samples in the closed interval + [`start`, `stop`] or the half-open interval [`start`, `stop`) + (depending on whether `endpoint` is ``True`` or ``False``). + step : float, optional + Only returned if `retstep` is ``True`` + + Size of spacing between samples. + See Also -------- :obj:`dpnp.arange` : Similar to `linspace`, but uses a step size (instead @@ -1166,6 +1195,9 @@ def linspace( >>> np.linspace(2.0, 3.0, num=5, endpoint=False) array([2. , 2.2, 2.4, 2.6, 2.8]) + >>> np.linspace(2.0, 3.0, num=5, retstep=True) + (array([2. , 2.25, 2.5 , 2.75, 3. ]), array(0.25)) + """ return dpnp_linspace( @@ -1227,6 +1259,11 @@ def logspace( For full documentation refer to :obj:`numpy.logspace`. + Returns + ------- + samples : dpnp.ndarray + num samples, equally spaced on a log scale. + See Also -------- :obj:`dpnp.arange` : Similar to linspace, with the step size specified @@ -1250,6 +1287,10 @@ def logspace( >>> np.logspace(2.0, 3.0, num=4, base=2.0) array([4. , 5.0396842 , 6.34960421, 8. ]) + >>> np.logspace(2.0, 3.0, num=4, base=[2.0, 3.0], axis=-1) + array([[ 4. , 5.0396842 , 6.34960421, 8. ], + [ 9. , 12.98024613, 18.72075441, 27. ]]) + """ return dpnp_logspace( diff --git a/tests/test_arraycreation.py b/tests/test_arraycreation.py index 51ba1c87bc18..fa02c127e67e 100644 --- a/tests/test_arraycreation.py +++ b/tests/test_arraycreation.py @@ -684,6 +684,21 @@ def test_linspace_axis(axis): assert_allclose(func(dpnp), func(numpy)) +def test_linspace_step_nan(): + func = lambda xp: xp.linspace(1, 2, num=0, endpoint=False) + assert_allclose(func(dpnp), func(numpy)) + + +@pytest.mark.parametrize("start", [1, [1, 1]]) +@pytest.mark.parametrize("stop", [10, [10 + 10]]) +def test_linspace_retstep(start, stop): + func = lambda xp: xp.linspace(start, stop, num=10, retstep=True) + np_res = func(numpy) + dpnp_res = func(dpnp) + assert_allclose(dpnp_res[0], np_res[0]) + assert_allclose(dpnp_res[1], np_res[1]) + + @pytest.mark.parametrize( "arrays", [[], [[1]], [[1, 2, 3], [4, 5, 6]], [[1, 2], [3, 4], [5, 6]]], @@ -740,29 +755,45 @@ def test_geomspace(sign, dtype, num, endpoint): np_res = func(numpy) dpnp_res = func(dpnp) - assert_allclose(dpnp_res, np_res) + if dtype in [numpy.int64, numpy.int32]: + assert_allclose(dpnp_res, np_res, rtol=1) + else: + assert_allclose(dpnp_res, np_res, rtol=1e-04) @pytest.mark.usefixtures("allow_fall_back_on_numpy") +@pytest.mark.parametrize("start", [1j, 1 + 1j]) +@pytest.mark.parametrize("stop", [10j, 10 + 10j]) # dpnp.sign raise numpy fall back for complex dtype -def test_geomspace_complex(): - func = lambda xp: xp.geomspace(1j, 10j, num=10) - assert_allclose(func(dpnp), func(numpy)) +def test_geomspace_complex(start, stop): + func = lambda xp: xp.geomspace(start, stop, num=10) + np_res = func(numpy) + dpnp_res = func(dpnp) + assert_allclose(dpnp_res, np_res, rtol=1e-04) @pytest.mark.parametrize("axis", [0, 1]) def test_geomspace_axis(axis): func = lambda xp: xp.geomspace([2, 3], [20, 15], num=10, axis=axis) - assert_allclose(func(dpnp), func(numpy)) + np_res = func(numpy) + dpnp_res = func(dpnp) + assert_allclose(dpnp_res, np_res, rtol=1e-04) + + +def test_geomspace_num0(): + func = lambda xp: xp.geomspace(1, 10, num=0, endpoint=False) + np_res = func(numpy) + dpnp_res = func(dpnp) + assert_allclose(dpnp_res, np_res, rtol=1e-04) @pytest.mark.parametrize("dtype", get_all_dtypes()) @pytest.mark.parametrize("num", [2, 4, 8, 3, 9, 27]) -@pytest.mark.parametrize("base", [-2, 2]) @pytest.mark.parametrize("endpoint", [True, False]) -def test_logspace(dtype, num, endpoint, base): +def test_logspace(dtype, num, endpoint): start = 2 stop = 5 + base = 2 func = lambda xp: xp.logspace( start, stop, num, endpoint=endpoint, dtype=dtype, base=base @@ -774,7 +805,7 @@ def test_logspace(dtype, num, endpoint, base): if dtype in [numpy.int64, numpy.int32]: assert_allclose(dpnp_res, np_res, rtol=1) else: - assert_allclose(dpnp_res, np_res) + assert_allclose(dpnp_res, np_res, rtol=1e-04) @pytest.mark.parametrize("axis", [0, 1]) diff --git a/tests/test_sycl_queue.py b/tests/test_sycl_queue.py index 4d7e29c1400f..33da6e5af4c8 100644 --- a/tests/test_sycl_queue.py +++ b/tests/test_sycl_queue.py @@ -143,11 +143,11 @@ def test_empty_like(device_x, device_y): [ pytest.param("full_like", ["x0"], {"fill_value": 5}), pytest.param("geomspace", ["x0[0:3]", "8", "4"], {}), - pytest.param("geomspace", ["1", "x0[3:5]", "4"], {}), + pytest.param("geomspace", ["1", "x0[2:4]", "4"], {}), pytest.param("linspace", ["x0[0:2]", "8", "4"], {}), - pytest.param("linspace", ["0", "x0[3:5]", "4"], {}), + pytest.param("linspace", ["0", "x0[2:4]", "4"], {}), pytest.param("logspace", ["x0[0:2]", "8", "4"], {}), - pytest.param("logspace", ["0", "x0[3:5]", "4"], {}), + pytest.param("logspace", ["0", "x0[2:4]", "4"], {}), pytest.param("ones_like", ["x0"], {}), pytest.param("tril", ["x0.reshape((2,2))"], {}), pytest.param("triu", ["x0.reshape((2,2))"], {}), @@ -168,7 +168,23 @@ def test_array_creation_follow_device(func, args, kwargs, device): dpnp_args = [eval(val, {"x0": x}) for val in args] y = getattr(dpnp, func)(*dpnp_args, **kwargs) - assert_allclose(y_orig, y) + assert_allclose(y_orig, y, rtol=1e-04) + assert_sycl_queue_equal(y.sycl_queue, x.sycl_queue) + + +@pytest.mark.parametrize( + "device", + valid_devices, + ids=[device.filter_string for device in valid_devices], +) +def test_array_creation_follow_device_logspace_base(device): + x_orig = numpy.array([1, 2, 3, 4]) + y_orig = numpy.logspace(0, 8, 4, base=x_orig[1:3]) + + x = dpnp.array([1, 2, 3, 4], device=device) + y = dpnp.logspace(0, 8, 4, base=x[1:3]) + + assert_allclose(y_orig, y, rtol=1e-04) assert_sycl_queue_equal(y.sycl_queue, x.sycl_queue) diff --git a/tests/third_party/cupy/creation_tests/test_ranges.py b/tests/third_party/cupy/creation_tests/test_ranges.py index 20e01ca8301f..623adc409b7e 100644 --- a/tests/third_party/cupy/creation_tests/test_ranges.py +++ b/tests/third_party/cupy/creation_tests/test_ranges.py @@ -222,7 +222,7 @@ def test_linspace_mixed_start_stop2(self, xp, dtype_range, dtype_out): @testing.for_all_dtypes_combination( names=("dtype_range", "dtype_out"), no_bool=True, no_complex=True ) - @testing.numpy_cupy_array_equal() + @testing.numpy_cupy_allclose(rtol=1e-04) def test_linspace_array_start_stop_axis1(self, xp, dtype_range, dtype_out): start = xp.array([0, 120], dtype=dtype_range) stop = xp.array([100, 0], dtype=dtype_range) From 61241ccd850ab5c13b103616eb4a9e0a404b5591 Mon Sep 17 00:00:00 2001 From: Natalia Polina Date: Tue, 31 Oct 2023 15:35:34 -0500 Subject: [PATCH 06/38] skip test logspace base --- tests/test_sycl_queue.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/test_sycl_queue.py b/tests/test_sycl_queue.py index 33da6e5af4c8..025a3673bcb3 100644 --- a/tests/test_sycl_queue.py +++ b/tests/test_sycl_queue.py @@ -172,6 +172,10 @@ def test_array_creation_follow_device(func, args, kwargs, device): assert_sycl_queue_equal(y.sycl_queue, x.sycl_queue) +@pytest.mark.skipif( + numpy.lib.NumpyVersion(numpy.__version__) < "1.25.0", + reason="numpy.logspace supports a non-scalar base argument since 1.25.0", +) @pytest.mark.parametrize( "device", valid_devices, From 440fe6a3fd0f54a53679e2e2ae3fd9b30110d216 Mon Sep 17 00:00:00 2001 From: Natalia Polina Date: Wed, 1 Nov 2023 22:35:36 -0500 Subject: [PATCH 07/38] Leverage dpctl.tensor.repeat() implementation --- dpnp/backend/include/dpnp_iface_fptr.hpp | 4 - .../kernels/dpnp_krnl_manipulation.cpp | 18 --- dpnp/dpnp_algo/CMakeLists.txt | 1 - dpnp/dpnp_algo/dpnp_algo.pxd | 12 -- dpnp/dpnp_algo/dpnp_algo.pyx | 49 -------- dpnp/dpnp_algo/dpnp_algo_manipulation.pxi | 80 ------------- dpnp/dpnp_array.py | 73 +++++++++++- dpnp/dpnp_iface_manipulation.py | 98 ++++++++++------ dpnp/linalg/dpnp_algo_linalg.pyx | 6 +- tests/skipped_tests.tbl | 9 -- tests/skipped_tests_gpu.tbl | 10 -- tests/test_arraymanipulation.py | 111 ++++++++++++++++++ tests/test_dparray.py | 13 ++ .../cupy/manipulation_tests/test_shape.py | 11 +- 14 files changed, 267 insertions(+), 228 deletions(-) delete mode 100644 dpnp/dpnp_algo/dpnp_algo_manipulation.pxi diff --git a/dpnp/backend/include/dpnp_iface_fptr.hpp b/dpnp/backend/include/dpnp_iface_fptr.hpp index 3df04000413c..311e65972508 100644 --- a/dpnp/backend/include/dpnp_iface_fptr.hpp +++ b/dpnp/backend/include/dpnp_iface_fptr.hpp @@ -178,8 +178,6 @@ enum class DPNPFuncName : size_t DPNP_FN_FILL_DIAGONAL_EXT, /**< Used in numpy.fill_diagonal() impl, requires extra parameters */ DPNP_FN_FLATTEN, /**< Used in numpy.flatten() impl */ - DPNP_FN_FLATTEN_EXT, /**< Used in numpy.flatten() impl, requires extra - parameters */ DPNP_FN_FLOOR, /**< Used in numpy.floor() impl */ DPNP_FN_FLOOR_DIVIDE, /**< Used in numpy.floor_divide() impl */ DPNP_FN_FLOOR_DIVIDE_EXT, /**< Used in numpy.floor_divide() impl, requires @@ -265,8 +263,6 @@ enum class DPNPFuncName : size_t DPNP_FN_RECIP_EXT, /**< Used in numpy.recip() impl, requires extra parameters */ DPNP_FN_REPEAT, /**< Used in numpy.repeat() impl */ - DPNP_FN_REPEAT_EXT, /**< Used in numpy.repeat() impl, requires extra - parameters */ DPNP_FN_RIGHT_SHIFT, /**< Used in numpy.right_shift() impl */ DPNP_FN_RNG_BETA, /**< Used in numpy.random.beta() impl */ DPNP_FN_RNG_BETA_EXT, /**< Used in numpy.random.beta() impl, requires extra diff --git a/dpnp/backend/kernels/dpnp_krnl_manipulation.cpp b/dpnp/backend/kernels/dpnp_krnl_manipulation.cpp index 12e275e90b90..315e1c211f96 100644 --- a/dpnp/backend/kernels/dpnp_krnl_manipulation.cpp +++ b/dpnp/backend/kernels/dpnp_krnl_manipulation.cpp @@ -103,15 +103,6 @@ void (*dpnp_repeat_default_c)(const void *, const size_t, const size_t) = dpnp_repeat_c<_DataType>; -template -DPCTLSyclEventRef (*dpnp_repeat_ext_c)(DPCTLSyclQueueRef, - const void *, - void *, - const size_t, - const size_t, - const DPCTLEventVectorRef) = - dpnp_repeat_c<_DataType>; - template class dpnp_elemwise_transpose_c_kernel; @@ -232,15 +223,6 @@ void func_map_init_manipulation(func_map_t &fmap) fmap[DPNPFuncName::DPNP_FN_REPEAT][eft_DBL][eft_DBL] = { eft_DBL, (void *)dpnp_repeat_default_c}; - fmap[DPNPFuncName::DPNP_FN_REPEAT_EXT][eft_INT][eft_INT] = { - eft_INT, (void *)dpnp_repeat_ext_c}; - fmap[DPNPFuncName::DPNP_FN_REPEAT_EXT][eft_LNG][eft_LNG] = { - eft_LNG, (void *)dpnp_repeat_ext_c}; - fmap[DPNPFuncName::DPNP_FN_REPEAT_EXT][eft_FLT][eft_FLT] = { - eft_FLT, (void *)dpnp_repeat_ext_c}; - fmap[DPNPFuncName::DPNP_FN_REPEAT_EXT][eft_DBL][eft_DBL] = { - eft_DBL, (void *)dpnp_repeat_ext_c}; - fmap[DPNPFuncName::DPNP_FN_TRANSPOSE][eft_INT][eft_INT] = { eft_INT, (void *)dpnp_elemwise_transpose_default_c}; fmap[DPNPFuncName::DPNP_FN_TRANSPOSE][eft_LNG][eft_LNG] = { diff --git a/dpnp/dpnp_algo/CMakeLists.txt b/dpnp/dpnp_algo/CMakeLists.txt index 8aa419220def..ad91b4babf97 100644 --- a/dpnp/dpnp_algo/CMakeLists.txt +++ b/dpnp/dpnp_algo/CMakeLists.txt @@ -1,7 +1,6 @@ set(dpnp_algo_pyx_deps ${CMAKE_CURRENT_SOURCE_DIR}/dpnp_algo_linearalgebra.pxi - ${CMAKE_CURRENT_SOURCE_DIR}/dpnp_algo_manipulation.pxi ${CMAKE_CURRENT_SOURCE_DIR}/dpnp_algo_counting.pxi ${CMAKE_CURRENT_SOURCE_DIR}/dpnp_algo_statistics.pxi ${CMAKE_CURRENT_SOURCE_DIR}/dpnp_algo_trigonometric.pxi diff --git a/dpnp/dpnp_algo/dpnp_algo.pxd b/dpnp/dpnp_algo/dpnp_algo.pxd index da6aef0ccd9c..065d3502dafd 100644 --- a/dpnp/dpnp_algo/dpnp_algo.pxd +++ b/dpnp/dpnp_algo/dpnp_algo.pxd @@ -94,8 +94,6 @@ cdef extern from "dpnp_iface_fptr.hpp" namespace "DPNPFuncName": # need this na DPNP_FN_FFT_RFFT_EXT DPNP_FN_FILL_DIAGONAL DPNP_FN_FILL_DIAGONAL_EXT - DPNP_FN_FLATTEN - DPNP_FN_FLATTEN_EXT DPNP_FN_FMOD DPNP_FN_FMOD_EXT DPNP_FN_FULL @@ -138,8 +136,6 @@ cdef extern from "dpnp_iface_fptr.hpp" namespace "DPNPFuncName": # need this na DPNP_FN_RADIANS_EXT DPNP_FN_RECIP DPNP_FN_RECIP_EXT - DPNP_FN_REPEAT - DPNP_FN_REPEAT_EXT DPNP_FN_RNG_BETA DPNP_FN_RNG_BETA_EXT DPNP_FN_RNG_BINOMIAL @@ -331,8 +327,6 @@ ctypedef c_dpctl.DPCTLSyclEventRef(*dpnp_reduction_c_t)(c_dpctl.DPCTLSyclQueueRe const long*, const c_dpctl.DPCTLEventVectorRef) -cpdef dpnp_descriptor dpnp_flatten(dpnp_descriptor x1) - """ Internal functions @@ -367,12 +361,6 @@ cpdef dpnp_descriptor dpnp_fmax(dpnp_descriptor x1_obj, dpnp_descriptor x2_obj, dpnp_descriptor out=*, object where=*) cpdef dpnp_descriptor dpnp_fmin(dpnp_descriptor x1_obj, dpnp_descriptor x2_obj, object dtype=*, dpnp_descriptor out=*, object where=*) -""" -Array manipulation routines -""" -cpdef dpnp_descriptor dpnp_repeat(dpnp_descriptor array1, repeats, axes=*) - - """ Statistics functions """ diff --git a/dpnp/dpnp_algo/dpnp_algo.pyx b/dpnp/dpnp_algo/dpnp_algo.pyx index 2d3be5f88a0a..5138e72ee831 100644 --- a/dpnp/dpnp_algo/dpnp_algo.pyx +++ b/dpnp/dpnp_algo/dpnp_algo.pyx @@ -54,7 +54,6 @@ import operator import numpy __all__ = [ - "dpnp_flatten", "dpnp_queue_initialize", ] @@ -64,7 +63,6 @@ include "dpnp_algo_counting.pxi" include "dpnp_algo_indexing.pxi" include "dpnp_algo_linearalgebra.pxi" include "dpnp_algo_logic.pxi" -include "dpnp_algo_manipulation.pxi" include "dpnp_algo_mathematical.pxi" include "dpnp_algo_searching.pxi" include "dpnp_algo_sorting.pxi" @@ -82,53 +80,6 @@ ctypedef c_dpctl.DPCTLSyclEventRef(*fptr_dpnp_flatten_t)(c_dpctl.DPCTLSyclQueueR const c_dpctl.DPCTLEventVectorRef) -cpdef utils.dpnp_descriptor dpnp_flatten(utils.dpnp_descriptor x1): - cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(x1.dtype) - - cdef DPNPFuncData kernel_data = get_dpnp_function_ptr(DPNP_FN_FLATTEN_EXT, param1_type, param1_type) - - cdef shape_type_c x1_shape = x1.shape - cdef shape_type_c x1_strides = utils.strides_to_vector(x1.strides, x1_shape) - - x1_obj = x1.get_array() - - # create result array with type given by FPTR data - cdef shape_type_c result_shape = (x1.size,) - cdef utils.dpnp_descriptor result = utils.create_output_descriptor(result_shape, - kernel_data.return_type, - None, - device=x1_obj.sycl_device, - usm_type=x1_obj.usm_type, - sycl_queue=x1_obj.sycl_queue) - - result_sycl_queue = result.get_array().sycl_queue - - cdef c_dpctl.SyclQueue q = result_sycl_queue - cdef c_dpctl.DPCTLSyclQueueRef q_ref = q.get_queue_ref() - - cdef shape_type_c result_strides = utils.strides_to_vector(result.strides, result_shape) - - cdef fptr_dpnp_flatten_t func = kernel_data.ptr - cdef c_dpctl.DPCTLSyclEventRef event_ref = func(q_ref, - result.get_data(), - result.size, - result.ndim, - result_shape.data(), - result_strides.data(), - x1.get_data(), - x1.size, - x1.ndim, - x1_shape.data(), - x1_strides.data(), - NULL, - NULL) # dep_events_ref - - with nogil: c_dpctl.DPCTLEvent_WaitAndThrow(event_ref) - c_dpctl.DPCTLEvent_Delete(event_ref) - - return result - - cpdef dpnp_queue_initialize(): """ Initialize SYCL queue which will be used for any library operations. diff --git a/dpnp/dpnp_algo/dpnp_algo_manipulation.pxi b/dpnp/dpnp_algo/dpnp_algo_manipulation.pxi deleted file mode 100644 index 7a517ac261fc..000000000000 --- a/dpnp/dpnp_algo/dpnp_algo_manipulation.pxi +++ /dev/null @@ -1,80 +0,0 @@ -# cython: language_level=3 -# cython: linetrace=True -# -*- coding: utf-8 -*- -# ***************************************************************************** -# Copyright (c) 2016-2023, Intel Corporation -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# - Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# - Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF -# THE POSSIBILITY OF SUCH DAMAGE. -# ***************************************************************************** - -"""Module Backend (Array manipulation routines) - -This module contains interface functions between C backend layer -and the rest of the library - -""" - -# NO IMPORTs here. All imports must be placed into main "dpnp_algo.pyx" file - -__all__ += [ - "dpnp_repeat", -] - - -# C function pointer to the C library template functions -ctypedef c_dpctl.DPCTLSyclEventRef(*fptr_dpnp_repeat_t)(c_dpctl.DPCTLSyclQueueRef, - const void *, void * , const size_t , const size_t, - const c_dpctl.DPCTLEventVectorRef) - - -cpdef utils.dpnp_descriptor dpnp_repeat(utils.dpnp_descriptor array1, repeats, axes=None): - cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(array1.dtype) - - cdef DPNPFuncData kernel_data = get_dpnp_function_ptr(DPNP_FN_REPEAT_EXT, param1_type, param1_type) - - array1_obj = array1.get_array() - - # create result array with type given by FPTR data - cdef shape_type_c result_shape = (array1.size * repeats, ) - cdef utils.dpnp_descriptor result = utils.create_output_descriptor(result_shape, - kernel_data.return_type, - None, - device=array1_obj.sycl_device, - usm_type=array1_obj.usm_type, - sycl_queue=array1_obj.sycl_queue) - result_sycl_queue = result.get_array().sycl_queue - - cdef c_dpctl.SyclQueue q = result_sycl_queue - cdef c_dpctl.DPCTLSyclQueueRef q_ref = q.get_queue_ref() - - cdef fptr_dpnp_repeat_t func = kernel_data.ptr - cdef c_dpctl.DPCTLSyclEventRef event_ref = func(q_ref, - array1.get_data(), - result.get_data(), - repeats, - array1.size, - NULL) # dep_events_ref - - with nogil: c_dpctl.DPCTLEvent_WaitAndThrow(event_ref) - c_dpctl.DPCTLEvent_Delete(event_ref) - - return result diff --git a/dpnp/dpnp_array.py b/dpnp/dpnp_array.py index ce80de31be57..dd4739c81ef4 100644 --- a/dpnp/dpnp_array.py +++ b/dpnp/dpnp_array.py @@ -1045,7 +1045,47 @@ def put(self, indices, vals, /, *, axis=None, mode="wrap"): return dpnp.put(self, indices, vals, axis=axis, mode=mode) - # 'ravel', + def ravel(self, order="C"): + """ + Return a contiguous flattened array. + + For full documentation refer to :obj:`numpy.ndarray.ravel`. + + Parameters + ---------- + order : {'C', 'F'}, optional + The elements of a are read using this index order. ``C`` means to index + the elements in row-major, C-style order, with the last axis index + changing fastest, back to the first axis index changing slowest. ``F`` + means to index the elements in column-major, Fortran-style order, with + the first index changing fastest, and the last index changing slowest. + By default, ``C`` index order is used. + + Returns + ------- + y : dpnp_array + `y` is a contiguous 1-D array of the same subtype as a, with shape (a.size,) + + See Also + -------- + :obj:`dpnp.reshape` : Change the shape of an array without changing its data. + + Examples + -------- + >>> import dpnp as np + >>> x = np.array([[1, 2, 3], [4, 5, 6]]) + >>> x.ravel() + array([1, 2, 3, 4, 5, 6]) + + >>> x.reshape(-1) + array([1, 2, 3, 4, 5, 6]) + + >>> x.ravel(order='F') + array([1, 4, 2, 5, 3, 6]) + + """ + + return dpnp.ravel(self, order=order) @property def real(self): @@ -1082,7 +1122,36 @@ def real(self, value): """ dpnp.copyto(self._array_obj.real, value) - # 'repeat', + def repeat(self, repeats, axis=None): + """ + Repeat elements of an array. + + For full documentation refer to :obj:`numpy.ndarray.repeat`. + + Parameters + ---------- + repeat : Union[int, Tuple[int, ...]] + The number of repetitions for each element. + `repeats` is broadcasted to fit the shape of the given axis. + axis : Optional[int] + The axis along which to repeat values. The `axis` is required + if input array has more than one dimension. + + Returns + ------- + out : dpnp_array + Array with repeated elements. + + Examples + -------- + >>> import dpnp as np + >>> x = np.array([3]) + >>> x.repeat(4) + array([3, 3, 3, 3]) + + """ + + return dpnp.repeat(self, repeats, axis=axis) def reshape(self, *sh, **kwargs): """ diff --git a/dpnp/dpnp_iface_manipulation.py b/dpnp/dpnp_iface_manipulation.py index 3c2e18a5d4ca..3759ceb72740 100644 --- a/dpnp/dpnp_iface_manipulation.py +++ b/dpnp/dpnp_iface_manipulation.py @@ -969,27 +969,44 @@ def ravel(a, order="C"): For full documentation refer to :obj:`numpy.ravel`. - Limitations - ----------- - Input array is supported as :obj:`dpnp.ndarray`. - Otherwise the function will be executed sequentially on CPU. - Input array data types are limited by supported DPNP :ref:`Data types`. + Parameters + ---------- + x : {dpnp_array, usm_ndarray} + Input array. The elements in a are read in the order specified by order, + and packed as a 1-D array. + order : {'C', 'F'}, optional + The elements of a are read using this index order. ``C`` means to index + the elements in row-major, C-style order, with the last axis index + changing fastest, back to the first axis index changing slowest. ``F`` + means to index the elements in column-major, Fortran-style order, with + the first index changing fastest, and the last index changing slowest. + By default, ``C`` index order is used. + + Returns + ------- + y : dpnp_array + `y` is a contiguous 1-D array of the same subtype as a, with shape (a.size,) + + See Also + -------- + :obj:`dpnp.reshape` : Change the shape of an array without changing its data. Examples -------- >>> import dpnp as np >>> x = np.array([[1, 2, 3], [4, 5, 6]]) - >>> out = np.ravel(x) - >>> [i for i in out] - [1, 2, 3, 4, 5, 6] + >>> np.ravel(x) + array([1, 2, 3, 4, 5, 6]) - """ + >>> x.reshape(-1) + array([1, 2, 3, 4, 5, 6]) - a_desc = dpnp.get_dpnp_descriptor(a, copy_when_nondefault_queue=False) - if a_desc: - return dpnp_flatten(a_desc).get_pyobj() + >>> np.ravel(x, order='F') + array([1, 4, 2, 5, 3, 6]) - return call_origin(numpy.ravel, a, order=order) + """ + + return dpnp.reshape(a, -1, order=order) def repeat(a, repeats, axis=None): @@ -998,39 +1015,44 @@ def repeat(a, repeats, axis=None): For full documentation refer to :obj:`numpy.repeat`. - Limitations - ----------- - Input array is supported as :obj:`dpnp.ndarray`. - Parameter `axis` is supported with value either ``None`` or ``0``. - Dimension of input array are supported to be less than ``2``. - Otherwise the function will be executed sequentially on CPU. - If `repeats` is ``tuple`` or ``list``, should be ``len(repeats) > 1``. - Input array data types are limited by supported DPNP :ref:`Data types`. + Parameters + ---------- + x : {dpnp_array, usm_ndarray} + Input array. + repeat : Union[int, Tuple[int, ...]] + The number of repetitions for each element. + `repeats` is broadcasted to fit the shape of the given axis. + axis : Optional[int] + The axis along which to repeat values. The `axis` is required + if input array has more than one dimension. - .. seealso:: :obj:`numpy.tile` tile an array. + Returns + ------- + out : dpnp_array + Array with repeated elements. + + See Also + -------- + :obj:`dpnp.tile` : Construct an array by repeating A the number of times given by reps. Examples -------- >>> import dpnp as np - >>> x = np.repeat(3, 4) - >>> [i for i in x] - [3, 3, 3, 3] + >>> x = np.array([3]) + >>> np.repeat(x, 4) + array([3, 3, 3, 3]) """ - a_desc = dpnp.get_dpnp_descriptor(a, copy_when_nondefault_queue=False) - if a_desc: - if axis is not None and axis != 0: - pass - elif a_desc.ndim >= 2: - pass - elif not dpnp.isscalar(repeats) and len(repeats) > 1: - pass - else: - repeat_val = repeats if dpnp.isscalar(repeats) else repeats[0] - return dpnp_repeat(a_desc, repeat_val, axis).get_pyobj() - - return call_origin(numpy.repeat, a, repeats, axis) + rep = repeats + if isinstance(repeats, dpnp_array): + rep = dpnp.get_usm_ndarray(repeats) + if axis is None and a.ndim > 1: + usm_arr = dpnp.get_usm_ndarray(a.flatten()) + else: + usm_arr = dpnp.get_usm_ndarray(a) + usm_arr = dpt.repeat(usm_arr, rep, axis=axis) + return dpnp_array._create_from_usm_ndarray(usm_arr) def reshape(a, /, newshape, order="C", copy=None): diff --git a/dpnp/linalg/dpnp_algo_linalg.pyx b/dpnp/linalg/dpnp_algo_linalg.pyx index 31eff554dc2b..c86b869acd3c 100644 --- a/dpnp/linalg/dpnp_algo_linalg.pyx +++ b/dpnp/linalg/dpnp_algo_linalg.pyx @@ -118,7 +118,8 @@ cpdef utils.dpnp_descriptor dpnp_cholesky(utils.dpnp_descriptor input_): cpdef object dpnp_cond(object input, object p): if p in ('f', 'fro'): - input = dpnp.ravel(input, order='K') + # TODO: change order='K' when support is implemented + input = dpnp.ravel(input, order='C') sqnorm = dpnp.dot(input, input) res = dpnp.sqrt(sqnorm) ret = dpnp.array([res]) @@ -368,7 +369,8 @@ cpdef object dpnp_norm(object input, ord=None, axis=None): (ord in ('f', 'fro') and ndim == 2) or (ord == 2 and ndim == 1)): - input = dpnp.ravel(input, order='K') + # TODO: change order='K' when support is implemented + input = dpnp.ravel(input, order='C') sqnorm = dpnp.dot(input, input) ret = dpnp.sqrt([sqnorm], dtype=res_type) return dpnp.array(ret.reshape(1, *ret.shape), dtype=res_type) diff --git a/tests/skipped_tests.tbl b/tests/skipped_tests.tbl index c1b4fcb37c9d..55c67b94d83f 100644 --- a/tests/skipped_tests.tbl +++ b/tests/skipped_tests.tbl @@ -424,15 +424,6 @@ tests/third_party/cupy/manipulation_tests/test_dims.py::TestInvalidBroadcast_par tests/third_party/cupy/manipulation_tests/test_dims.py::TestInvalidBroadcast_param_2_{shapes=[(3, 2), (3, 4)]}::test_invalid_broadcast tests/third_party/cupy/manipulation_tests/test_dims.py::TestInvalidBroadcast_param_3_{shapes=[(0,), (2,)]}::test_invalid_broadcast -tests/third_party/cupy/manipulation_tests/test_shape.py::TestRavel::test_ravel2 -tests/third_party/cupy/manipulation_tests/test_shape.py::TestRavel::test_ravel3 -tests/third_party/cupy/manipulation_tests/test_shape.py::TestRavel::test_external_ravel -tests/third_party/cupy/manipulation_tests/test_shape.py::TestRavel::test_ravel -tests/third_party/cupy/manipulation_tests/test_shape.py::TestReshape::test_reshape_zerosize -tests/third_party/cupy/manipulation_tests/test_shape.py::TestReshape::test_reshape_zerosize2 - -tests/third_party/cupy/manipulation_tests/test_tiling.py::TestRepeatRepeatsNdarray::test_func -tests/third_party/cupy/manipulation_tests/test_tiling.py::TestRepeatRepeatsNdarray::test_method tests/third_party/cupy/math_tests/test_arithmetic.py::TestArithmeticBinary2_param_457_{arg1=array([[1, 2, 3], [4, 5, 6]], dtype=int32), arg2=array([[0, 1, 2], [3, 4, 5]], dtype=int32), dtype=float64, name='fmod', use_dtype=False}::test_binary tests/third_party/cupy/math_tests/test_arithmetic.py::TestArithmeticBinary2_param_465_{arg1=array([[1, 2, 3], [4, 5, 6]], dtype=int32), arg2=array([[0, 1, 2], [3, 4, 5]]), dtype=float64, name='fmod', use_dtype=False}::test_binary tests/third_party/cupy/math_tests/test_arithmetic.py::TestArithmeticBinary2_param_537_{arg1=array([[1, 2, 3], [4, 5, 6]]), arg2=array([[0, 1, 2], [3, 4, 5]], dtype=int32), dtype=float64, name='fmod', use_dtype=False}::test_binary diff --git a/tests/skipped_tests_gpu.tbl b/tests/skipped_tests_gpu.tbl index e1be1a64647c..538ad05743a4 100644 --- a/tests/skipped_tests_gpu.tbl +++ b/tests/skipped_tests_gpu.tbl @@ -565,16 +565,6 @@ tests/third_party/cupy/manipulation_tests/test_dims.py::TestInvalidBroadcast_par tests/third_party/cupy/manipulation_tests/test_dims.py::TestInvalidBroadcast_param_2_{shapes=[(3, 2), (3, 4)]}::test_invalid_broadcast tests/third_party/cupy/manipulation_tests/test_dims.py::TestInvalidBroadcast_param_3_{shapes=[(0,), (2,)]}::test_invalid_broadcast -tests/third_party/cupy/manipulation_tests/test_shape.py::TestRavel::test_ravel2 -tests/third_party/cupy/manipulation_tests/test_shape.py::TestRavel::test_ravel3 -tests/third_party/cupy/manipulation_tests/test_shape.py::TestRavel::test_external_ravel -tests/third_party/cupy/manipulation_tests/test_shape.py::TestRavel::test_ravel -tests/third_party/cupy/manipulation_tests/test_shape.py::TestReshape::test_reshape_zerosize -tests/third_party/cupy/manipulation_tests/test_shape.py::TestReshape::test_reshape_zerosize2 - -tests/third_party/cupy/manipulation_tests/test_tiling.py::TestRepeatRepeatsNdarray::test_func -tests/third_party/cupy/manipulation_tests/test_tiling.py::TestRepeatRepeatsNdarray::test_method - tests/third_party/cupy/math_tests/test_arithmetic.py::TestArithmeticRaisesWithNumpyInput_param_3_{name='angle', nargs=1}::test_raises_with_numpy_input tests/third_party/cupy/math_tests/test_explog.py::TestExplog::test_logaddexp2 diff --git a/tests/test_arraymanipulation.py b/tests/test_arraymanipulation.py index bf61634e52c8..74409f54c266 100644 --- a/tests/test_arraymanipulation.py +++ b/tests/test_arraymanipulation.py @@ -939,3 +939,114 @@ def test_can_cast(): assert dpnp.can_cast(X, "float32") == numpy.can_cast(X_np, "float32") assert dpnp.can_cast(X, dpnp.int32) == numpy.can_cast(X_np, numpy.int32) assert dpnp.can_cast(X, dpnp.int64) == numpy.can_cast(X_np, numpy.int64) + + +def test_repeat_scalar_sequence_agreement(): + x = dpnp.arange(5, dtype="i4") + expected_res = dpnp.empty(10, dtype="i4") + expected_res[1::2], expected_res[::2] = x, x + + # scalar case + reps = 2 + res = dpnp.repeat(x, reps) + assert dpnp.all(res == expected_res) + + # tuple + reps = (2, 2, 2, 2, 2) + res = dpnp.repeat(x, reps) + assert dpnp.all(res == expected_res) + + +def test_repeat_as_broadcasting(): + reps = 5 + x = dpnp.arange(reps, dtype="i4") + x1 = x[:, dpnp.newaxis] + expected_res = dpnp.broadcast_to(x1, (reps, reps)) + + res = dpnp.repeat(x1, reps, axis=1) + assert dpnp.all(res == expected_res) + + x2 = x[dpnp.newaxis, :] + expected_res = dpnp.broadcast_to(x2, (reps, reps)) + + res = dpnp.repeat(x2, reps, axis=0) + assert dpnp.all(res == expected_res) + + +def test_repeat_axes(): + reps = 2 + x = dpnp.reshape(dpnp.arange(5 * 10, dtype="i4"), (5, 10)) + expected_res = dpnp.empty((x.shape[0] * 2, x.shape[1]), dtype=x.dtype) + expected_res[::2, :], expected_res[1::2] = x, x + res = dpnp.repeat(x, reps, axis=0) + assert dpnp.all(res == expected_res) + + expected_res = dpnp.empty((x.shape[0], x.shape[1] * 2), dtype=x.dtype) + expected_res[:, ::2], expected_res[:, 1::2] = x, x + res = dpnp.repeat(x, reps, axis=1) + assert dpnp.all(res == expected_res) + + +def test_repeat_size_0_outputs(): + x = dpnp.ones((3, 0, 5), dtype="i4") + reps = 10 + res = dpnp.repeat(x, reps, axis=0) + assert res.size == 0 + assert res.shape == (30, 0, 5) + + res = dpnp.repeat(x, reps, axis=1) + assert res.size == 0 + assert res.shape == (3, 0, 5) + + res = dpnp.repeat(x, (2, 2, 2), axis=0) + assert res.size == 0 + assert res.shape == (6, 0, 5) + + x = dpnp.ones((3, 2, 5)) + res = dpnp.repeat(x, 0, axis=1) + assert res.size == 0 + assert res.shape == (3, 0, 5) + + x = dpnp.ones((3, 2, 5)) + res = dpnp.repeat(x, (0, 0), axis=1) + assert res.size == 0 + assert res.shape == (3, 0, 5) + + +def test_repeat_strides(): + reps = 2 + x = dpnp.reshape(dpnp.arange(10 * 10, dtype="i4"), (10, 10)) + x1 = x[:, ::-2] + expected_res = dpnp.empty((10, 10), dtype="i4") + expected_res[:, ::2], expected_res[:, 1::2] = x1, x1 + res = dpnp.repeat(x1, reps, axis=1) + assert dpnp.all(res == expected_res) + res = dpnp.repeat(x1, (reps,) * x1.shape[1], axis=1) + assert dpnp.all(res == expected_res) + + x1 = x[::-2, :] + expected_res = dpnp.empty((10, 10), dtype="i4") + expected_res[::2, :], expected_res[1::2, :] = x1, x1 + res = dpnp.repeat(x1, reps, axis=0) + assert dpnp.all(res == expected_res) + res = dpnp.repeat(x1, (reps,) * x1.shape[0], axis=0) + assert dpnp.all(res == expected_res) + + +def test_repeat_casting(): + x = dpnp.arange(5, dtype="i4") + # i4 is cast to i8 + reps = dpnp.ones(5, dtype="i4") + res = dpnp.repeat(x, reps) + assert res.shape == x.shape + assert dpnp.all(res == x) + + +def test_repeat_strided_repeats(): + x = dpnp.arange(5, dtype="i4") + reps = dpnp.ones(10, dtype="i8") + reps[::2] = 0 + reps = reps[::-2] + res = dpnp.repeat(x, reps) + assert res.shape == x.shape + assert dpnp.all(res == x) diff --git a/tests/test_dparray.py b/tests/test_dparray.py index e3f4e80a4cba..3c57d44bf912 100644 --- a/tests/test_dparray.py +++ b/tests/test_dparray.py @@ -233,3 +233,16 @@ def test_array_as_index(shape, index_dtype): ind_arr = dpnp.ones(shape, dtype=index_dtype) a = numpy.arange(ind_arr.size + 1) assert a[tuple(ind_arr)] == a[1] + + +def test_ravel(): + a = dpnp.ones((2, 2)) + b = a.ravel() + a[0, 0] = 5 + assert_array_equal(a.ravel(), b) + + +def test_repeat(): + numpy_array = numpy.arange(4).repeat(3) + dpnp_array = dpnp.arange(4).repeat(3) + assert_array_equal(numpy_array, dpnp_array) diff --git a/tests/third_party/cupy/manipulation_tests/test_shape.py b/tests/third_party/cupy/manipulation_tests/test_shape.py index 37e29ae3a361..9b672b975539 100644 --- a/tests/third_party/cupy/manipulation_tests/test_shape.py +++ b/tests/third_party/cupy/manipulation_tests/test_shape.py @@ -108,6 +108,7 @@ def test_reshape_zerosize_invalid_unknown(self): with pytest.raises(ValueError): a.reshape((-1, 0)) + @pytest.mark.skip("array.base is not implemented") @testing.numpy_cupy_array_equal() def test_reshape_zerosize(self, xp): a = xp.zeros((0,)) @@ -115,6 +116,7 @@ def test_reshape_zerosize(self, xp): assert b.base is a return b + @pytest.mark.skip("array.base is not implemented") @testing.for_orders(_supported_orders) @testing.numpy_cupy_array_equal(strides_check=True) def test_reshape_zerosize2(self, xp, order): @@ -157,20 +159,23 @@ def test_ndim_limit2(self, dtype, order): @testing.gpu class TestRavel(unittest.TestCase): - @testing.for_orders("CFA") + @testing.for_orders("CF") + # order = 'A' is out of support currently @testing.numpy_cupy_array_equal() def test_ravel(self, xp, order): a = testing.shaped_arange((2, 3, 4), xp) a = a.transpose(2, 0, 1) return a.ravel(order) - @testing.for_orders("CFA") + @testing.for_orders("CF") + # order = 'A' is out of support currently @testing.numpy_cupy_array_equal() def test_ravel2(self, xp, order): a = testing.shaped_arange((2, 3, 4), xp) return a.ravel(order) - @testing.for_orders("CFA") + @testing.for_orders("CF") + # order = 'A' is out of support currently @testing.numpy_cupy_array_equal() def test_ravel3(self, xp, order): a = testing.shaped_arange((2, 3, 4), xp) From 3c2d3300c3f714b34dacfeb739c8c2c7d6f933f8 Mon Sep 17 00:00:00 2001 From: Natalia Polina Date: Fri, 3 Nov 2023 10:46:25 -0500 Subject: [PATCH 08/38] clean up flatten function --- dpnp/backend/kernels/dpnp_krnl_elemwise.cpp | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/dpnp/backend/kernels/dpnp_krnl_elemwise.cpp b/dpnp/backend/kernels/dpnp_krnl_elemwise.cpp index 5b0b9b047533..ec11be0db1d1 100644 --- a/dpnp/backend/kernels/dpnp_krnl_elemwise.cpp +++ b/dpnp/backend/kernels/dpnp_krnl_elemwise.cpp @@ -940,19 +940,6 @@ static void func_map_init_elemwise_1arg_1type(func_map_t &fmap) fmap[DPNPFuncName::DPNP_FN_FLATTEN][eft_C128][eft_C128] = { eft_C128, (void *)dpnp_copy_c_default>}; - fmap[DPNPFuncName::DPNP_FN_FLATTEN_EXT][eft_BLN][eft_BLN] = { - eft_BLN, (void *)dpnp_copy_c_ext}; - fmap[DPNPFuncName::DPNP_FN_FLATTEN_EXT][eft_INT][eft_INT] = { - eft_INT, (void *)dpnp_copy_c_ext}; - fmap[DPNPFuncName::DPNP_FN_FLATTEN_EXT][eft_LNG][eft_LNG] = { - eft_LNG, (void *)dpnp_copy_c_ext}; - fmap[DPNPFuncName::DPNP_FN_FLATTEN_EXT][eft_FLT][eft_FLT] = { - eft_FLT, (void *)dpnp_copy_c_ext}; - fmap[DPNPFuncName::DPNP_FN_FLATTEN_EXT][eft_DBL][eft_DBL] = { - eft_DBL, (void *)dpnp_copy_c_ext}; - fmap[DPNPFuncName::DPNP_FN_FLATTEN_EXT][eft_C128][eft_C128] = { - eft_C128, (void *)dpnp_copy_c_ext>}; - fmap[DPNPFuncName::DPNP_FN_NEGATIVE][eft_INT][eft_INT] = { eft_INT, (void *)dpnp_negative_c_default}; fmap[DPNPFuncName::DPNP_FN_NEGATIVE][eft_LNG][eft_LNG] = { From 296102b6f77e1e853378414e494f2bd29c27903f Mon Sep 17 00:00:00 2001 From: Natalia Polina Date: Mon, 6 Nov 2023 11:39:04 -0600 Subject: [PATCH 09/38] address comments --- dpnp/dpnp_algo/dpnp_arraycreation.py | 4 ++-- dpnp/dpnp_iface_arraycreation.py | 4 ++-- tests/test_arraycreation.py | 9 +++++++-- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/dpnp/dpnp_algo/dpnp_arraycreation.py b/dpnp/dpnp_algo/dpnp_arraycreation.py index 52456550c788..b6d3c6120683 100644 --- a/dpnp/dpnp_algo/dpnp_arraycreation.py +++ b/dpnp/dpnp_algo/dpnp_arraycreation.py @@ -37,11 +37,11 @@ def dpnp_geomspace( else: _usm_type = usm_type - if not hasattr(start, "dtype"): + if not dpnp.is_supported_array_type(start): start = dpnp.asarray( start, usm_type=_usm_type, sycl_queue=sycl_queue_normalized ) - if not hasattr(stop, "dtype"): + if not dpnp.is_supported_array_type(stop): stop = dpnp.asarray( stop, usm_type=_usm_type, sycl_queue=sycl_queue_normalized ) diff --git a/dpnp/dpnp_iface_arraycreation.py b/dpnp/dpnp_iface_arraycreation.py index c43d75be81fd..06e365238e19 100644 --- a/dpnp/dpnp_iface_arraycreation.py +++ b/dpnp/dpnp_iface_arraycreation.py @@ -1045,7 +1045,7 @@ def geomspace( Returns ------- - samples : dpnp.ndarray + out : dpnp.ndarray num samples, equally spaced on a log scale. See Also @@ -1168,7 +1168,7 @@ def linspace( Returns ------- - samples : dpnp.ndarray + out : dpnp.ndarray There are num equally spaced samples in the closed interval [`start`, `stop`] or the half-open interval [`start`, `stop`) (depending on whether `endpoint` is ``True`` or ``False``). diff --git a/tests/test_arraycreation.py b/tests/test_arraycreation.py index fa02c127e67e..7c674d265dad 100644 --- a/tests/test_arraycreation.py +++ b/tests/test_arraycreation.py @@ -632,6 +632,11 @@ def test_linspace(start, stop, num, dtype, retstep): assert_allclose(res_np, res_dp, rtol=1e-06, atol=dpnp.finfo(dtype).eps) +@pytest.mark.parametrize( + "func", + ["geomspace", "linspace", "logspace"], + ids=["geomspace", "linspace", "logspace"], +) @pytest.mark.parametrize( "start_dtype", [numpy.float64, numpy.float32, numpy.int64, numpy.int32], @@ -642,10 +647,10 @@ def test_linspace(start, stop, num, dtype, retstep): [numpy.float64, numpy.float32, numpy.int64, numpy.int32], ids=["float64", "float32", "int64", "int32"], ) -def test_linspace_dtype(start_dtype, stop_dtype): +def test_space_numpy_dtype(func, start_dtype, stop_dtype): start = numpy.array([1, 2, 3], dtype=start_dtype) stop = numpy.array([11, 7, -2], dtype=stop_dtype) - dpnp.linspace(start, stop, 10) + getattr(dpnp, func)(start, stop, 10) @pytest.mark.parametrize( From 1f6ee26ce8d002dd25099f12bbcd12db0ddda7fd Mon Sep 17 00:00:00 2001 From: Natalia Polina Date: Mon, 6 Nov 2023 16:07:11 -0800 Subject: [PATCH 10/38] Fix linspace docs --- dpnp/dpnp_iface_arraycreation.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/dpnp/dpnp_iface_arraycreation.py b/dpnp/dpnp_iface_arraycreation.py index 06e365238e19..51b9966194d6 100644 --- a/dpnp/dpnp_iface_arraycreation.py +++ b/dpnp/dpnp_iface_arraycreation.py @@ -1173,9 +1173,8 @@ def linspace( [`start`, `stop`] or the half-open interval [`start`, `stop`) (depending on whether `endpoint` is ``True`` or ``False``). step : float, optional - Only returned if `retstep` is ``True`` - - Size of spacing between samples. + Only returned if `retstep` is ``True``. + Size of spacing between samples. See Also -------- From db127d4c2ea48cd5fcf02b76f53c16afc77d4a08 Mon Sep 17 00:00:00 2001 From: vtavana <120411540+vtavana@users.noreply.github.com> Date: Tue, 7 Nov 2023 13:19:25 -0600 Subject: [PATCH 11/38] implement dpnp.prod and dpnp.nanprod (#1613) * implement dpnp.prod and dpnp.nanprod * address comments * updates for nanprod input array * allow fall back on numpy - needed for Win tests --- dpnp/backend/include/dpnp_iface_fptr.hpp | 4 +- dpnp/backend/kernels/dpnp_krnl_reduction.cpp | 49 ----- dpnp/dpnp_algo/dpnp_algo.pxd | 2 - dpnp/dpnp_algo/dpnp_algo_mathematical.pxi | 84 -------- dpnp/dpnp_array.py | 4 +- dpnp/dpnp_iface_mathematical.py | 185 +++++++++++++----- tests/skipped_tests.tbl | 12 +- tests/skipped_tests_gpu.tbl | 66 ++----- tests/test_arithmetic.py | 4 +- tests/test_mathematical.py | 105 ++++++++++ tests/test_usm_type.py | 2 + .../cupy/math_tests/test_sumprod.py | 25 ++- 12 files changed, 279 insertions(+), 263 deletions(-) diff --git a/dpnp/backend/include/dpnp_iface_fptr.hpp b/dpnp/backend/include/dpnp_iface_fptr.hpp index 6f282e1e1f60..be64c6727f93 100644 --- a/dpnp/backend/include/dpnp_iface_fptr.hpp +++ b/dpnp/backend/include/dpnp_iface_fptr.hpp @@ -244,9 +244,7 @@ enum class DPNPFuncName : size_t DPNP_FN_PLACE, /**< Used in numpy.place() impl */ DPNP_FN_POWER, /**< Used in numpy.power() impl */ DPNP_FN_PROD, /**< Used in numpy.prod() impl */ - DPNP_FN_PROD_EXT, /**< Used in numpy.prod() impl, requires extra parameters - */ - DPNP_FN_PTP, /**< Used in numpy.ptp() impl */ + DPNP_FN_PTP, /**< Used in numpy.ptp() impl */ DPNP_FN_PTP_EXT, /**< Used in numpy.ptp() impl, requires extra parameters */ DPNP_FN_PUT, /**< Used in numpy.put() impl */ DPNP_FN_PUT_ALONG_AXIS, /**< Used in numpy.put_along_axis() impl */ diff --git a/dpnp/backend/kernels/dpnp_krnl_reduction.cpp b/dpnp/backend/kernels/dpnp_krnl_reduction.cpp index 439e10d34af1..d95343791025 100644 --- a/dpnp/backend/kernels/dpnp_krnl_reduction.cpp +++ b/dpnp/backend/kernels/dpnp_krnl_reduction.cpp @@ -294,19 +294,6 @@ void (*dpnp_prod_default_c)(void *, const long *) = dpnp_prod_c<_DataType_output, _DataType_input>; -template -DPCTLSyclEventRef (*dpnp_prod_ext_c)(DPCTLSyclQueueRef, - void *, - const void *, - const shape_elem_type *, - const size_t, - const shape_elem_type *, - const size_t, - const void *, - const long *, - const DPCTLEventVectorRef) = - dpnp_prod_c<_DataType_output, _DataType_input>; - void func_map_init_reduction(func_map_t &fmap) { // WARNING. The meaning of the fmap is changed. Second argument represents @@ -349,42 +336,6 @@ void func_map_init_reduction(func_map_t &fmap) fmap[DPNPFuncName::DPNP_FN_PROD][eft_DBL][eft_DBL] = { eft_DBL, (void *)dpnp_prod_default_c}; - fmap[DPNPFuncName::DPNP_FN_PROD_EXT][eft_INT][eft_INT] = { - eft_LNG, (void *)dpnp_prod_ext_c}; - fmap[DPNPFuncName::DPNP_FN_PROD_EXT][eft_INT][eft_LNG] = { - eft_LNG, (void *)dpnp_prod_ext_c}; - fmap[DPNPFuncName::DPNP_FN_PROD_EXT][eft_INT][eft_FLT] = { - eft_FLT, (void *)dpnp_prod_ext_c}; - fmap[DPNPFuncName::DPNP_FN_PROD_EXT][eft_INT][eft_DBL] = { - eft_DBL, (void *)dpnp_prod_ext_c}; - - fmap[DPNPFuncName::DPNP_FN_PROD_EXT][eft_LNG][eft_INT] = { - eft_INT, (void *)dpnp_prod_ext_c}; - fmap[DPNPFuncName::DPNP_FN_PROD_EXT][eft_LNG][eft_LNG] = { - eft_LNG, (void *)dpnp_prod_ext_c}; - fmap[DPNPFuncName::DPNP_FN_PROD_EXT][eft_LNG][eft_FLT] = { - eft_FLT, (void *)dpnp_prod_ext_c}; - fmap[DPNPFuncName::DPNP_FN_PROD_EXT][eft_LNG][eft_DBL] = { - eft_DBL, (void *)dpnp_prod_ext_c}; - - fmap[DPNPFuncName::DPNP_FN_PROD_EXT][eft_FLT][eft_INT] = { - eft_INT, (void *)dpnp_prod_ext_c}; - fmap[DPNPFuncName::DPNP_FN_PROD_EXT][eft_FLT][eft_LNG] = { - eft_LNG, (void *)dpnp_prod_ext_c}; - fmap[DPNPFuncName::DPNP_FN_PROD_EXT][eft_FLT][eft_FLT] = { - eft_FLT, (void *)dpnp_prod_ext_c}; - fmap[DPNPFuncName::DPNP_FN_PROD_EXT][eft_FLT][eft_DBL] = { - eft_DBL, (void *)dpnp_prod_ext_c}; - - fmap[DPNPFuncName::DPNP_FN_PROD_EXT][eft_DBL][eft_INT] = { - eft_INT, (void *)dpnp_prod_ext_c}; - fmap[DPNPFuncName::DPNP_FN_PROD_EXT][eft_DBL][eft_LNG] = { - eft_LNG, (void *)dpnp_prod_ext_c}; - fmap[DPNPFuncName::DPNP_FN_PROD_EXT][eft_DBL][eft_FLT] = { - eft_FLT, (void *)dpnp_prod_ext_c}; - fmap[DPNPFuncName::DPNP_FN_PROD_EXT][eft_DBL][eft_DBL] = { - eft_DBL, (void *)dpnp_prod_ext_c}; - fmap[DPNPFuncName::DPNP_FN_SUM][eft_INT][eft_INT] = { eft_LNG, (void *)dpnp_sum_default_c}; fmap[DPNPFuncName::DPNP_FN_SUM][eft_INT][eft_LNG] = { diff --git a/dpnp/dpnp_algo/dpnp_algo.pxd b/dpnp/dpnp_algo/dpnp_algo.pxd index 7c0d00dd03d7..7a71531c72a7 100644 --- a/dpnp/dpnp_algo/dpnp_algo.pxd +++ b/dpnp/dpnp_algo/dpnp_algo.pxd @@ -126,8 +126,6 @@ cdef extern from "dpnp_iface_fptr.hpp" namespace "DPNPFuncName": # need this na DPNP_FN_PARTITION DPNP_FN_PARTITION_EXT DPNP_FN_PLACE - DPNP_FN_PROD - DPNP_FN_PROD_EXT DPNP_FN_PTP DPNP_FN_PTP_EXT DPNP_FN_QR diff --git a/dpnp/dpnp_algo/dpnp_algo_mathematical.pxi b/dpnp/dpnp_algo/dpnp_algo_mathematical.pxi index 21f7768cf2b0..f9828229b53a 100644 --- a/dpnp/dpnp_algo/dpnp_algo_mathematical.pxi +++ b/dpnp/dpnp_algo/dpnp_algo_mathematical.pxi @@ -50,9 +50,7 @@ __all__ += [ "dpnp_modf", "dpnp_nancumprod", "dpnp_nancumsum", - "dpnp_nanprod", "dpnp_nansum", - "dpnp_prod", "dpnp_sum", "dpnp_trapz", ] @@ -319,26 +317,6 @@ cpdef utils.dpnp_descriptor dpnp_nancumsum(utils.dpnp_descriptor x1): return dpnp_cumsum(x1_desc) -cpdef utils.dpnp_descriptor dpnp_nanprod(utils.dpnp_descriptor x1): - x1_obj = x1.get_array() - cdef utils.dpnp_descriptor result = utils_py.create_output_descriptor_py(x1.shape, - x1.dtype, - None, - device=x1_obj.sycl_device, - usm_type=x1_obj.usm_type, - sycl_queue=x1_obj.sycl_queue) - - for i in range(result.size): - input_elem = x1.get_pyobj().flat[i] - - if dpnp.isnan(input_elem): - result.get_pyobj().flat[i] = 1 - else: - result.get_pyobj().flat[i] = input_elem - - return dpnp_prod(result) - - cpdef utils.dpnp_descriptor dpnp_nansum(utils.dpnp_descriptor x1): x1_obj = x1.get_array() cdef utils.dpnp_descriptor result = utils_py.create_output_descriptor_py(x1.shape, @@ -359,68 +337,6 @@ cpdef utils.dpnp_descriptor dpnp_nansum(utils.dpnp_descriptor x1): return dpnp_sum(result) -cpdef utils.dpnp_descriptor dpnp_prod(utils.dpnp_descriptor x1, - object axis=None, - object dtype=None, - utils.dpnp_descriptor out=None, - cpp_bool keepdims=False, - object initial=None, - object where=True): - """ - input:float64 : output:float64 : name:prod - input:float32 : output:float32 : name:prod - input:int64 : output:int64 : name:prod - input:int32 : output:int64 : name:prod - input:bool : output:int64 : name:prod - input:complex64 : output:complex64 : name:prod - input:complex128: output:complex128: name:prod - """ - - cdef shape_type_c x1_shape = x1.shape - cdef DPNPFuncType x1_c_type = dpnp_dtype_to_DPNPFuncType(x1.dtype) - - cdef shape_type_c axis_shape = utils._object_to_tuple(axis) - - cdef shape_type_c result_shape = utils.get_reduction_output_shape(x1_shape, axis, keepdims) - cdef DPNPFuncType result_c_type = utils.get_output_c_type(DPNP_FN_PROD_EXT, x1_c_type, out, dtype) - - """ select kernel """ - cdef DPNPFuncData kernel_data = get_dpnp_function_ptr(DPNP_FN_PROD_EXT, x1_c_type, result_c_type) - - x1_obj = x1.get_array() - - """ Create result array """ - cdef utils.dpnp_descriptor result = utils.create_output_descriptor(result_shape, - result_c_type, - out, - device=x1_obj.sycl_device, - usm_type=x1_obj.usm_type, - sycl_queue=x1_obj.sycl_queue) - cdef dpnp_reduction_c_t func = kernel_data.ptr - - result_sycl_queue = result.get_array().sycl_queue - - cdef c_dpctl.SyclQueue q = result_sycl_queue - cdef c_dpctl.DPCTLSyclQueueRef q_ref = q.get_queue_ref() - - """ Call FPTR interface function """ - cdef c_dpctl.DPCTLSyclEventRef event_ref = func(q_ref, - result.get_data(), - x1.get_data(), - x1_shape.data(), - x1_shape.size(), - axis_shape.data(), - axis_shape.size(), - NULL, - NULL, - NULL) # dep_events_ref - - with nogil: c_dpctl.DPCTLEvent_WaitAndThrow(event_ref) - c_dpctl.DPCTLEvent_Delete(event_ref) - - return result - - cpdef utils.dpnp_descriptor dpnp_sum(utils.dpnp_descriptor x1, object axis=None, object dtype=None, diff --git a/dpnp/dpnp_array.py b/dpnp/dpnp_array.py index ce80de31be57..9e8a8096a0fe 100644 --- a/dpnp/dpnp_array.py +++ b/dpnp/dpnp_array.py @@ -1026,9 +1026,7 @@ def prod( """ Returns the prod along a given axis. - .. seealso:: - :obj:`dpnp.prod` for full documentation, - :meth:`dpnp.dparray.sum` + For full documentation refer to :obj:`dpnp.prod`. """ diff --git a/dpnp/dpnp_iface_mathematical.py b/dpnp/dpnp_iface_mathematical.py index 650c0478e173..416c8492fc93 100644 --- a/dpnp/dpnp_iface_mathematical.py +++ b/dpnp/dpnp_iface_mathematical.py @@ -41,6 +41,7 @@ import dpctl.tensor as dpt +import dpctl.utils as du import numpy from numpy.core.numeric import normalize_axis_tuple @@ -1750,34 +1751,73 @@ def nancumsum(x1, **kwargs): return call_origin(numpy.nancumsum, x1, **kwargs) -def nanprod(x1, **kwargs): +def nanprod( + a, + axis=None, + dtype=None, + out=None, + keepdims=False, + initial=None, + where=True, +): """ - Calculate prod() function treating 'Not a Numbers' (NaN) as ones. + Return the product of array elements over a given axis treating Not a Numbers (NaNs) as ones. For full documentation refer to :obj:`numpy.nanprod`. + Returns + ------- + out : dpnp.ndarray + A new array holding the result is returned unless `out` is specified, in which case it is returned. + + See Also + -------- + :obj:`dpnp.prod` : Returns product across array propagating NaNs. + :obj:`dpnp.isnan` : Test element-wise for NaN and return result as a boolean array. + Limitations ----------- - Parameter `x1` is supported as :obj:`dpnp.ndarray`. - Keyword argument `kwargs` is currently unsupported. + Input array is only supported as either :class:`dpnp.ndarray` or :class:`dpctl.tensor.usm_ndarray`. + Parameters `initial`, and `where` are only supported with their default values. Otherwise the function will be executed sequentially on CPU. Input array data types are limited by supported DPNP :ref:`Data types`. Examples -------- >>> import dpnp as np - >>> np.nanprod(np.array([1, 2])) - 2 - >>> np.nanprod(np.array([[1, 2], [3, 4]])) - 24 - - """ - - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) - if x1_desc and not kwargs: - return dpnp_nanprod(x1_desc).get_pyobj() + >>> np.nanprod(np.array(1)) + array(1) + >>> np.nanprod(np.array([1])) + array(1) + >>> np.nanprod(np.array([1, np.nan])) + array(1.0) + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nanprod(a) + array(6.0) + >>> np.nanprod(a, axis=0) + array([3., 2.]) + + """ + + if dpnp.is_supported_array_or_scalar(a): + if issubclass(a.dtype.type, dpnp.inexact): + mask = dpnp.isnan(a) + a = dpnp.array(a, copy=True) + dpnp.copyto(a, 1, where=mask) + else: + raise TypeError( + "An array must be any of supported type, but got {}".format(type(a)) + ) - return call_origin(numpy.nanprod, x1, **kwargs) + return dpnp.prod( + a, + axis=axis, + dtype=dtype, + out=out, + keepdims=keepdims, + initial=initial, + where=where, + ) def nansum(x1, **kwargs): @@ -2030,7 +2070,7 @@ def power( def prod( - x1, + a, axis=None, dtype=None, out=None, @@ -2039,54 +2079,105 @@ def prod( where=True, ): """ - Calculate product of array elements over a given axis. + Return the product of array elements over a given axis. For full documentation refer to :obj:`numpy.prod`. + Returns + ------- + out : dpnp.ndarray + A new array holding the result is returned unless `out` is specified, in which case it is returned. + Limitations ----------- - Parameter `where` is unsupported. + Input array is only supported as either :class:`dpnp.ndarray` or :class:`dpctl.tensor.usm_ndarray`. + Parameters `initial`, and `where` are only supported with their default values. + Otherwise the function will be executed sequentially on CPU. Input array data types are limited by DPNP :ref:`Data types`. + See Also + -------- + :obj:`dpnp.nanprod` : Return the product of array elements over a given axis treating Not a Numbers (NaNs) as ones. + Examples -------- >>> import dpnp as np - >>> np.prod(np.array([[1, 2], [3, 4]])) - 24 >>> np.prod(np.array([1, 2])) - 2 + array(2) + + >>> a = np.array([[1, 2], [3, 4]]) + >>> np.prod(a) + array(24) + + >>> np.prod(a, axis=1) + array([ 2, 12]) + >>> np.prod(a, axis=0) + array([3, 8]) + + >>> x = np.array([1, 2, 3], dtype=np.int8) + >>> np.prod(x).dtype == int + True """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) - if x1_desc: - if where is not True: - pass - else: - out_desc = ( - dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) - if out is not None - else None - ) - result_obj = dpnp_prod( - x1_desc, axis, dtype, out_desc, keepdims, initial, where - ).get_pyobj() - result = dpnp.convert_single_elem_array_to_scalar( - result_obj, keepdims - ) + # Product reduction for complex output are known to fail for Gen9 with 2024.0 compiler + # TODO: get rid of this temporary work around when OneAPI 2024.1 is released + if not isinstance(a, (dpnp_array, dpt.usm_ndarray)): + raise TypeError( + "An array must be any of supported type, but got {}".format(type(a)) + ) + _dtypes = (a.dtype, dtype) + _any_complex = any( + dpnp.issubdtype(dt, dpnp.complexfloating) for dt in _dtypes + ) + device_mask = ( + du.intel_device_info(a.sycl_device).get("device_id", 0) & 0xFF00 + ) + if _any_complex and device_mask in [0x3E00, 0x9B00]: + return call_origin( + numpy.prod, + a, + axis=axis, + dtype=dtype, + out=out, + keepdims=keepdims, + initial=initial, + where=where, + ) + elif initial is not None: + raise NotImplementedError( + "initial keyword argument is only supported by its default value." + ) + elif where is not True: + raise NotImplementedError( + "where keyword argument is only supported by its default value." + ) + else: + dpt_array = dpnp.get_usm_ndarray(a) + result = dpnp_array._create_from_usm_ndarray( + dpt.prod(dpt_array, axis=axis, dtype=dtype, keepdims=keepdims) + ) + if out is None: return result + else: + if out.shape != result.shape: + raise ValueError( + f"Output array of shape {result.shape} is needed, got {out.shape}." + ) + elif not isinstance(out, dpnp_array): + if isinstance(out, dpt.usm_ndarray): + out = dpnp_array._create_from_usm_ndarray(out) + else: + raise TypeError( + "Output array must be any of supported type, but got {}".format( + type(out) + ) + ) - return call_origin( - numpy.prod, - x1, - axis=axis, - dtype=dtype, - out=out, - keepdims=keepdims, - initial=initial, - where=where, - ) + dpnp.copyto(out, result, casting="safe") + + return out def proj( diff --git a/tests/skipped_tests.tbl b/tests/skipped_tests.tbl index 60b9408b6909..801b29fdf3c5 100644 --- a/tests/skipped_tests.tbl +++ b/tests/skipped_tests.tbl @@ -11,9 +11,6 @@ tests/test_random.py::TestPermutationsTestShuffle::test_shuffle1[lambda x: (dpnp tests/test_random.py::TestPermutationsTestShuffle::test_shuffle1[lambda x: dpnp.asarray([(i, i) for i in x], [("a", object), ("b", dpnp.int32)])]] tests/test_random.py::TestPermutationsTestShuffle::test_shuffle1[lambda x: dpnp.asarray(x).astype(dpnp.int8)] -tests/test_sycl_queue.py::test_1in_1out[opencl:gpu:0-trapz-data19] -tests/test_sycl_queue.py::test_1in_1out[opencl:cpu:0-trapz-data19] - tests/third_party/cupy/fft_tests/test_fft.py::TestFft2_param_1_{axes=None, norm=None, s=(1, None), shape=(3, 4)}::test_fft2 tests/third_party/cupy/fft_tests/test_fft.py::TestFft2_param_7_{axes=(), norm=None, s=None, shape=(3, 4)}::test_fft2 tests/third_party/cupy/fft_tests/test_fft.py::TestFft2_param_7_{axes=(), norm=None, s=None, shape=(3, 4)}::test_ifft2 @@ -619,14 +616,7 @@ tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodAxes_param_2 tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodAxes_param_3_{axis=(0, 2, 3), shape=(20, 30, 40, 50)}::test_nansum_axes tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodExtra_param_0_{shape=(2, 3, 4)}::test_nansum_out tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodExtra_param_1_{shape=(20, 30, 40)}::test_nansum_out -tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodLong_param_11_{axis=0, func='nanprod', keepdims=True, shape=(20, 30, 40), transpose_axes=False}::test_nansum_all -tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodLong_param_11_{axis=0, func='nanprod', keepdims=True, shape=(20, 30, 40), transpose_axes=False}::test_nansum_axis_transposed -tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodLong_param_13_{axis=0, func='nanprod', keepdims=False, shape=(2, 3, 4), transpose_axes=False}::test_nansum_all -tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodLong_param_13_{axis=0, func='nanprod', keepdims=False, shape=(2, 3, 4), transpose_axes=False}::test_nansum_axis_transposed -tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodLong_param_15_{axis=0, func='nanprod', keepdims=False, shape=(20, 30, 40), transpose_axes=False}::test_nansum_all -tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodLong_param_15_{axis=0, func='nanprod', keepdims=False, shape=(20, 30, 40), transpose_axes=False}::test_nansum_axis_transposed -tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodLong_param_9_{axis=0, func='nanprod', keepdims=True, shape=(2, 3, 4), transpose_axes=False}::test_nansum_all -tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodLong_param_9_{axis=0, func='nanprod', keepdims=True, shape=(2, 3, 4), transpose_axes=False}::test_nansum_axis_transposed + tests/third_party/cupy/math_tests/test_trigonometric.py::TestUnwrap::test_unwrap_1dim tests/third_party/cupy/math_tests/test_trigonometric.py::TestUnwrap::test_unwrap_1dim_with_discont tests/third_party/cupy/math_tests/test_trigonometric.py::TestUnwrap::test_unwrap_1dim_with_period diff --git a/tests/skipped_tests_gpu.tbl b/tests/skipped_tests_gpu.tbl index fc9931e93d67..446ca789f258 100644 --- a/tests/skipped_tests_gpu.tbl +++ b/tests/skipped_tests_gpu.tbl @@ -17,30 +17,6 @@ tests/test_random.py::TestPermutationsTestShuffle::test_shuffle1[lambda x: (dpnp tests/test_random.py::TestPermutationsTestShuffle::test_shuffle1[lambda x: dpnp.asarray([(i, i) for i in x], [("a", object), ("b", dpnp.int32)])]] tests/test_random.py::TestPermutationsTestShuffle::test_shuffle1[lambda x: dpnp.asarray(x).astype(dpnp.int8)] -tests/test_sycl_queue.py::test_1in_1out[opencl:gpu:0-copy-data3] -tests/test_sycl_queue.py::test_1in_1out[opencl:gpu:0-cumprod-data4] -tests/test_sycl_queue.py::test_1in_1out[opencl:gpu:0-cumsum-data5] -tests/test_sycl_queue.py::test_1in_1out[opencl:gpu:0-ediff1d-data7] -tests/test_sycl_queue.py::test_1in_1out[opencl:gpu:0-fabs-data8] - -tests/test_sycl_queue.py::test_1in_1out[level_zero:gpu:0-copy-data3] -tests/test_sycl_queue.py::test_1in_1out[level_zero:gpu:0-cumprod-data4] -tests/test_sycl_queue.py::test_1in_1out[level_zero:gpu:0-cumsum-data5] -tests/test_sycl_queue.py::test_1in_1out[level_zero:gpu:0-ediff1d-data7] -tests/test_sycl_queue.py::test_1in_1out[level_zero:gpu:0-fabs-data8] -tests/test_sycl_queue.py::test_1in_1out[level_zero:gpu:0-nancumprod-data11] -tests/test_sycl_queue.py::test_1in_1out[level_zero:gpu:0-nancumsum-data12] -tests/test_sycl_queue.py::test_1in_1out[level_zero:gpu:0-nanprod-data13] -tests/test_sycl_queue.py::test_1in_1out[level_zero:gpu:0-nansum-data14] -tests/test_sycl_queue.py::test_1in_1out[level_zero:gpu:0-prod-data16] -tests/test_sycl_queue.py::test_1in_1out[level_zero:gpu:0-sum-data18] -tests/test_sycl_queue.py::test_1in_1out[level_zero:gpu:0-trapz-data19] - -tests/test_sycl_queue.py::test_modf[level_zero:gpu:0] - -tests/test_sycl_queue.py::test_1in_1out[opencl:gpu:0-trapz-data19] -tests/test_sycl_queue.py::test_1in_1out[opencl:cpu:0-trapz-data19] - tests/test_umath.py::test_umaths[('divmod', 'ii')] tests/test_umath.py::test_umaths[('divmod', 'll')] tests/test_umath.py::test_umaths[('divmod', 'ff')] @@ -83,20 +59,6 @@ tests/third_party/cupy/indexing_tests/test_insert.py::TestDiagIndicesFrom_param_ tests/third_party/cupy/indexing_tests/test_insert.py::TestDiagIndicesFrom_param_1_{shape=(0, 0)}::test_diag_indices_from tests/third_party/cupy/indexing_tests/test_insert.py::TestDiagIndicesFrom_param_2_{shape=(2, 2, 2)}::test_diag_indices_from -tests/third_party/cupy/math_tests/test_sumprod.py::TestSumprod::test_external_prod_all -tests/third_party/cupy/math_tests/test_sumprod.py::TestSumprod::test_external_prod_axis -tests/third_party/cupy/math_tests/test_sumprod.py::TestSumprod::test_prod_all -tests/third_party/cupy/math_tests/test_sumprod.py::TestSumprod::test_prod_axis -tests/third_party/cupy/math_tests/test_sumprod.py::TestSumprod::test_sum_out -tests/third_party/cupy/math_tests/test_sumprod.py::TestSumprod::test_sum_out_wrong_shape -tests/third_party/cupy/math_tests/test_sumprod.py::TestCumprod::test_cumprod_1dim -tests/third_party/cupy/math_tests/test_sumprod.py::TestCumprod::test_cumprod_2dim_without_axis -tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_0_{axis=0}::test_cumsum -tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_0_{axis=0}::test_cumsum_2dim -tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_1_{axis=1}::test_cumsum -tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_1_{axis=1}::test_cumsum_2dim -tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_2_{axis=2}::test_cumsum -tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_2_{axis=2}::test_cumsum_2dim tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGeometric_param_2_{p_shape=(3, 2), shape=(4, 3, 2)}::test_geometric tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGeometric_param_3_{p_shape=(3, 2), shape=(3, 2)}::test_geometric tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_0_{nbad_shape=(), ngood_shape=(), nsample_dtype=int32, nsample_shape=(), shape=(4, 3, 2)}::test_hypergeometric @@ -197,13 +159,6 @@ tests/third_party/cupy/linalg_tests/test_einsum.py::TestListArgEinSumError::test tests/third_party/cupy/linalg_tests/test_product.py::TestProduct::test_reversed_vdot -tests/third_party/cupy/math_tests/test_sumprod.py::TestCumprod::test_cumprod_out_noncontiguous -tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_0_{axis=0}::test_cumsum_axis_out_noncontiguous -tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_0_{axis=0}::test_cumsum_out_noncontiguous -tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_1_{axis=1}::test_cumsum_axis_out_noncontiguous -tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_1_{axis=1}::test_cumsum_out_noncontiguous -tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_2_{axis=2}::test_cumsum_axis_out_noncontiguous -tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_2_{axis=2}::test_cumsum_out_noncontiguous tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsMultivariateNormal_param_0_{d=2, shape=(4, 3, 2)}::test_normal tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsMultivariateNormal_param_1_{d=2, shape=(3, 2)}::test_normal tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsMultivariateNormal_param_2_{d=4, shape=(4, 3, 2)}::test_normal @@ -273,6 +228,7 @@ tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_pa tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_6_{order='F', shape=(10, 20, 30)}::test_cub_min tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_7_{order='F', shape=(10, 20, 30, 40)}::test_cub_max tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_7_{order='F', shape=(10, 20, 30, 40)}::test_cub_min + tests/third_party/cupy/creation_tests/test_basic.py::TestBasicReshape_param_0_{shape=4}::test_empty_like_K_strides_reshape tests/third_party/cupy/creation_tests/test_basic.py::TestBasicReshape_param_1_{shape=(4,)}::test_empty_like_K_strides_reshape tests/third_party/cupy/creation_tests/test_basic.py::TestBasicReshape_param_2_{shape=(4, 2)}::test_empty_like_K_strides_reshape @@ -729,6 +685,8 @@ tests/third_party/cupy/math_tests/test_misc.py::TestConvolve::test_convolve_diff tests/third_party/cupy/math_tests/test_misc.py::TestConvolve::test_convolve_diff_types[full] tests/third_party/cupy/math_tests/test_rounding.py::TestRounding::test_fix +tests/third_party/cupy/math_tests/test_sumprod.py::TestSumprod::test_sum_out +tests/third_party/cupy/math_tests/test_sumprod.py::TestSumprod::test_sum_out_wrong_shape tests/third_party/cupy/math_tests/test_sumprod.py::TestCumprod::test_ndarray_cumprod_2dim_with_axis tests/third_party/cupy/math_tests/test_sumprod.py::TestDiff::test_diff_1dim tests/third_party/cupy/math_tests/test_sumprod.py::TestDiff::test_diff_1dim_with_n @@ -736,6 +694,15 @@ tests/third_party/cupy/math_tests/test_sumprod.py::TestDiff::test_diff_2dim_with tests/third_party/cupy/math_tests/test_sumprod.py::TestCumprod::test_cumprod_arraylike tests/third_party/cupy/math_tests/test_sumprod.py::TestCumprod::test_cumprod_huge_array tests/third_party/cupy/math_tests/test_sumprod.py::TestCumprod::test_cumprod_numpy_array +tests/third_party/cupy/math_tests/test_sumprod.py::TestCumprod::test_cumprod_out_noncontiguous +tests/third_party/cupy/math_tests/test_sumprod.py::TestCumprod::test_cumprod_1dim +tests/third_party/cupy/math_tests/test_sumprod.py::TestCumprod::test_cumprod_2dim_without_axis +tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_0_{axis=0}::test_cumsum +tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_0_{axis=0}::test_cumsum_2dim +tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_1_{axis=1}::test_cumsum +tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_1_{axis=1}::test_cumsum_2dim +tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_2_{axis=2}::test_cumsum +tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_2_{axis=2}::test_cumsum_2dim tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_0_{axis=0}::test_cumsum_arraylike tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_0_{axis=0}::test_cumsum_numpy_array tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_1_{axis=1}::test_cumsum_arraylike @@ -752,14 +719,7 @@ tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodAxes_param_2 tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodAxes_param_3_{axis=(0, 2, 3), shape=(20, 30, 40, 50)}::test_nansum_axes tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodExtra_param_0_{shape=(2, 3, 4)}::test_nansum_out tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodExtra_param_1_{shape=(20, 30, 40)}::test_nansum_out -tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodLong_param_11_{axis=0, func='nanprod', keepdims=True, shape=(20, 30, 40), transpose_axes=False}::test_nansum_all -tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodLong_param_11_{axis=0, func='nanprod', keepdims=True, shape=(20, 30, 40), transpose_axes=False}::test_nansum_axis_transposed -tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodLong_param_13_{axis=0, func='nanprod', keepdims=False, shape=(2, 3, 4), transpose_axes=False}::test_nansum_all -tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodLong_param_13_{axis=0, func='nanprod', keepdims=False, shape=(2, 3, 4), transpose_axes=False}::test_nansum_axis_transposed -tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodLong_param_15_{axis=0, func='nanprod', keepdims=False, shape=(20, 30, 40), transpose_axes=False}::test_nansum_all -tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodLong_param_15_{axis=0, func='nanprod', keepdims=False, shape=(20, 30, 40), transpose_axes=False}::test_nansum_axis_transposed -tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodLong_param_9_{axis=0, func='nanprod', keepdims=True, shape=(2, 3, 4), transpose_axes=False}::test_nansum_all -tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodLong_param_9_{axis=0, func='nanprod', keepdims=True, shape=(2, 3, 4), transpose_axes=False}::test_nansum_axis_transposed + tests/third_party/cupy/math_tests/test_trigonometric.py::TestUnwrap::test_unwrap_1dim tests/third_party/cupy/math_tests/test_trigonometric.py::TestUnwrap::test_unwrap_1dim_with_discont tests/third_party/cupy/math_tests/test_trigonometric.py::TestUnwrap::test_unwrap_1dim_with_period diff --git a/tests/test_arithmetic.py b/tests/test_arithmetic.py index 9d138e0d8439..60dc7a1c9af0 100644 --- a/tests/test_arithmetic.py +++ b/tests/test_arithmetic.py @@ -22,14 +22,12 @@ def test_modf_part2(self, xp, dtype): return c - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_float_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(type_check=False) def test_nanprod(self, xp, dtype): a = xp.array([-2.5, -1.5, xp.nan, 10.5, 1.5, xp.nan], dtype=dtype) return xp.nanprod(a) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_float_dtypes() @testing.numpy_cupy_allclose() def test_nansum(self, xp, dtype): diff --git a/tests/test_mathematical.py b/tests/test_mathematical.py index 2a25e7de573a..63d5172c08ed 100644 --- a/tests/test_mathematical.py +++ b/tests/test_mathematical.py @@ -1,5 +1,6 @@ from itertools import permutations +import dpctl.tensor as dpt import numpy import pytest from numpy.testing import ( @@ -16,6 +17,7 @@ assert_dtype_allclose, get_all_dtypes, get_complex_dtypes, + get_float_complex_dtypes, get_float_dtypes, has_support_aspect64, is_cpu_device, @@ -459,6 +461,109 @@ def test_positive_boolean(): dpnp.positive(dpnp_a) +@pytest.mark.usefixtures("allow_fall_back_on_numpy") +@pytest.mark.parametrize("func", ["prod", "nanprod"]) +@pytest.mark.parametrize("axis", [None, 0, 1, -1, 2, -2, (1, 2), (0, -2)]) +@pytest.mark.parametrize("keepdims", [False, True]) +@pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) +def test_prod_nanprod(func, axis, keepdims, dtype): + a = numpy.arange(1, 13, dtype=dtype).reshape((2, 2, 3)) + if func == "nanprod" and issubclass(a.dtype.type, dpnp.inexact): + a[1:2:] = numpy.nan + ia = dpnp.array(a) + + np_res = getattr(numpy, func)(a, axis=axis, keepdims=keepdims) + dpnp_res = getattr(dpnp, func)(ia, axis=axis, keepdims=keepdims) + + assert dpnp_res.shape == np_res.shape + assert_allclose(dpnp_res, np_res) + + +@pytest.mark.parametrize("axis", [None, 0, 1, -1, 2, -2, (1, 2), (0, -2)]) +def test_prod_zero_size(axis): + a = numpy.empty((2, 3, 0)) + ia = dpnp.array(a) + + np_res = numpy.prod(a, axis=axis) + dpnp_res = dpnp.prod(ia, axis=axis) + + assert dpnp_res.shape == np_res.shape + assert_allclose(dpnp_res, np_res) + + +@pytest.mark.parametrize("func", ["prod", "nanprod"]) +@pytest.mark.parametrize("axis", [None, 0, 1, -1]) +@pytest.mark.parametrize("keepdims", [False, True]) +def test_prod_nanprod_bool(func, axis, keepdims): + a = numpy.arange(2, dtype=dpnp.bool) + a = numpy.tile(a, (2, 2)) + ia = dpnp.array(a) + + np_res = getattr(numpy, func)(a, axis=axis, keepdims=keepdims) + dpnp_res = getattr(dpnp, func)(ia, axis=axis, keepdims=keepdims) + + assert dpnp_res.shape == np_res.shape + assert_allclose(dpnp_res, np_res) + + +@pytest.mark.usefixtures("allow_fall_back_on_numpy") +@pytest.mark.usefixtures("suppress_complex_warning") +@pytest.mark.usefixtures("suppress_invalid_numpy_warnings") +@pytest.mark.parametrize("func", ["prod", "nanprod"]) +@pytest.mark.parametrize("in_dtype", get_all_dtypes(no_bool=True)) +@pytest.mark.parametrize("out_dtype", get_all_dtypes(no_bool=True)) +def test_prod_nanprod_dtype(func, in_dtype, out_dtype): + a = numpy.arange(1, 13, dtype=in_dtype).reshape((2, 2, 3)) + if func == "nanprod" and issubclass(a.dtype.type, dpnp.inexact): + a[1:2:] = numpy.nan + ia = dpnp.array(a) + + np_res = getattr(numpy, func)(a, dtype=out_dtype) + dpnp_res = getattr(dpnp, func)(ia, dtype=out_dtype) + + if out_dtype is not None: + assert dpnp_res.dtype == out_dtype + assert_allclose(dpnp_res, np_res) + + +@pytest.mark.parametrize("func", ["prod", "nanprod"]) +def test_prod_nanprod_out(func): + a = numpy.arange(1, 7).reshape((2, 3)) + if func == "nanprod" and issubclass(a.dtype.type, dpnp.inexact): + a[1:2:] = numpy.nan + ia = dpnp.array(a) + + np_res = getattr(numpy, func)(a, axis=0) + dpnp_res = dpnp.array(numpy.empty_like(np_res)) + getattr(dpnp, func)(ia, axis=0, out=dpnp_res) + assert_allclose(dpnp_res, np_res) + + dpnp_res = dpt.asarray(numpy.empty_like(np_res)) + getattr(dpnp, func)(ia, axis=0, out=dpnp_res) + assert_allclose(dpnp_res, np_res) + + dpnp_res = numpy.empty_like(np_res) + with pytest.raises(TypeError): + getattr(dpnp, func)(ia, axis=0, out=dpnp_res) + + dpnp_res = dpnp.array(numpy.empty((2, 3))) + with pytest.raises(ValueError): + getattr(dpnp, func)(ia, axis=0, out=dpnp_res) + + +def test_prod_nanprod_Error(): + ia = dpnp.arange(5) + + with pytest.raises(TypeError): + dpnp.prod(dpnp.asnumpy(ia)) + with pytest.raises(TypeError): + dpnp.nanprod(dpnp.asnumpy(ia)) + with pytest.raises(NotImplementedError): + dpnp.prod(ia, where=False) + with pytest.raises(NotImplementedError): + dpnp.prod(ia, initial=6) + + @pytest.mark.parametrize( "data", [[2, 0, -2], [1.1, -1.1]], diff --git a/tests/test_usm_type.py b/tests/test_usm_type.py index fd94ff01fdb7..206cae643269 100644 --- a/tests/test_usm_type.py +++ b/tests/test_usm_type.py @@ -349,8 +349,10 @@ def test_meshgrid(usm_type_x, usm_type_y): pytest.param("log10", [1.0, 2.0, 4.0, 7.0]), pytest.param("log1p", [1.0e-10, 1.0, 2.0, 4.0, 7.0]), pytest.param("log2", [1.0, 2.0, 4.0, 7.0]), + pytest.param("nanprod", [1.0, 2.0, dp.nan]), pytest.param("negative", [1.0, 0.0, -1.0]), pytest.param("positive", [1.0, 0.0, -1.0]), + pytest.param("prod", [1.0, 2.0]), pytest.param("proj", [complex(1.0, 2.0), complex(dp.inf, -1.0)]), pytest.param( "real", [complex(1.0, 2.0), complex(3.0, 4.0), complex(5.0, 6.0)] diff --git a/tests/third_party/cupy/math_tests/test_sumprod.py b/tests/third_party/cupy/math_tests/test_sumprod.py index e6b7adfa5c58..5834ac94fe2f 100644 --- a/tests/third_party/cupy/math_tests/test_sumprod.py +++ b/tests/third_party/cupy/math_tests/test_sumprod.py @@ -214,25 +214,25 @@ def test_sum_out_wrong_shape(self): a.sum(axis=1, out=b) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(type_check=False) def test_prod_all(self, xp, dtype): a = testing.shaped_arange((2, 3), xp, dtype) return a.prod() @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(type_check=False) def test_external_prod_all(self, xp, dtype): a = testing.shaped_arange((2, 3), xp, dtype) return xp.prod(a) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(type_check=False) def test_prod_axis(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) return a.prod(axis=1) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(type_check=False) def test_external_prod_axis(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) return xp.prod(a, axis=1) @@ -267,6 +267,17 @@ def _numpy_nanprod_implemented(self): ) def _test(self, xp, dtype): + if ( + self.func == "nanprod" + and self.shape == (20, 30, 40) + and has_support_aspect64() + ): + # If input type is float, NumPy returns the same data type but + # dpctl (and dpnp) returns default platform float following array api. + # When input is `float32` and output is a very large number, dpnp returns + # the number because it is `float64` but NumPy returns `inf` since it is `float32`. + pytest.skip("Output is a very large number.") + a = testing.shaped_arange(self.shape, xp, dtype) if self.transpose_axes: a = a.transpose(2, 0, 1) @@ -276,7 +287,7 @@ def _test(self, xp, dtype): return func(a, axis=self.axis, keepdims=self.keepdims) @testing.for_all_dtypes(no_bool=True, no_float16=True) - @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) + @testing.numpy_cupy_allclose(type_check=False) def test_nansum_all(self, xp, dtype): if ( not self._numpy_nanprod_implemented() @@ -286,9 +297,7 @@ def test_nansum_all(self, xp, dtype): return self._test(xp, dtype) @testing.for_all_dtypes(no_bool=True, no_float16=True) - @testing.numpy_cupy_allclose( - contiguous_check=False, type_check=has_support_aspect64() - ) + @testing.numpy_cupy_allclose(contiguous_check=False, type_check=False) def test_nansum_axis_transposed(self, xp, dtype): if ( not self._numpy_nanprod_implemented() From 6c94a9d3f330647e38edecd435ef4ce0c794a501 Mon Sep 17 00:00:00 2001 From: Natalia Polina Date: Tue, 7 Nov 2023 12:40:44 -0800 Subject: [PATCH 12/38] Update dpnp/dpnp_iface_arraycreation.py Co-authored-by: Anton <100830759+antonwolfy@users.noreply.github.com> --- dpnp/dpnp_iface_arraycreation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dpnp/dpnp_iface_arraycreation.py b/dpnp/dpnp_iface_arraycreation.py index 51b9966194d6..0ed1187cb1d8 100644 --- a/dpnp/dpnp_iface_arraycreation.py +++ b/dpnp/dpnp_iface_arraycreation.py @@ -1260,7 +1260,7 @@ def logspace( Returns ------- - samples : dpnp.ndarray + out: dpnp.ndarray num samples, equally spaced on a log scale. See Also From 79cb518a7fc8e4cd04ec6a84b67c51fa4a77e24b Mon Sep 17 00:00:00 2001 From: vtavana <120411540+vtavana@users.noreply.github.com> Date: Tue, 7 Nov 2023 22:38:38 -0600 Subject: [PATCH 13/38] implement dpnp.max and dpnp.min using dpctl.tensor functions (#1602) * implement dpnp.max and dpnp.min using dpctl.tensor functions * address comments * fix a few issues * fix doc-string * add axis==None condition for zero-size array * add new tests to improve coverage * update tests to reduce duplication --- .github/workflows/conda-package.yml | 1 + dpnp/backend/include/dpnp_iface_fptr.hpp | 26 +- dpnp/backend/kernels/dpnp_krnl_statistics.cpp | 42 ---- dpnp/dpnp_algo/dpnp_algo.pxd | 10 - dpnp/dpnp_algo/dpnp_algo_statistics.pxi | 171 ------------- dpnp/dpnp_algo/dpnp_elementwise_common.py | 2 - dpnp/dpnp_array.py | 25 +- dpnp/dpnp_iface_mathematical.py | 12 +- dpnp/dpnp_iface_statistics.py | 233 ++++++++++++------ tests/skipped_tests.tbl | 5 - tests/test_amin_amax.py | 10 +- tests/test_statistics.py | 65 ++++- tests/test_sycl_queue.py | 2 + tests/test_usm_type.py | 2 + .../cupy/core_tests/test_ndarray_reduction.py | 59 +++++ 15 files changed, 318 insertions(+), 347 deletions(-) diff --git a/.github/workflows/conda-package.yml b/.github/workflows/conda-package.yml index 76b24b44287f..472c0d6be9a3 100644 --- a/.github/workflows/conda-package.yml +++ b/.github/workflows/conda-package.yml @@ -13,6 +13,7 @@ env: # TODO: to add test_arraymanipulation.py back to the scope once crash on Windows is gone TEST_SCOPE: >- test_arraycreation.py + test_amin_amax.py test_dot.py test_dparray.py test_copy.py diff --git a/dpnp/backend/include/dpnp_iface_fptr.hpp b/dpnp/backend/include/dpnp_iface_fptr.hpp index be64c6727f93..7d8b195935c6 100644 --- a/dpnp/backend/include/dpnp_iface_fptr.hpp +++ b/dpnp/backend/include/dpnp_iface_fptr.hpp @@ -212,20 +212,18 @@ enum class DPNPFuncName : size_t DPNP_FN_MATRIX_RANK_EXT, /**< Used in numpy.linalg.matrix_rank() impl, requires extra parameters */ DPNP_FN_MAX, /**< Used in numpy.max() impl */ - DPNP_FN_MAX_EXT, /**< Used in numpy.max() impl, requires extra parameters */ - DPNP_FN_MAXIMUM, /**< Used in numpy.fmax() impl */ - DPNP_FN_MAXIMUM_EXT, /**< Used in numpy.fmax() impl , requires extra - parameters */ - DPNP_FN_MEAN, /**< Used in numpy.mean() impl */ - DPNP_FN_MEDIAN, /**< Used in numpy.median() impl */ - DPNP_FN_MEDIAN_EXT, /**< Used in numpy.median() impl, requires extra - parameters */ - DPNP_FN_MIN, /**< Used in numpy.min() impl */ - DPNP_FN_MIN_EXT, /**< Used in numpy.min() impl, requires extra parameters */ - DPNP_FN_MINIMUM, /**< Used in numpy.fmin() impl */ - DPNP_FN_MINIMUM_EXT, /**< Used in numpy.fmax() impl, requires extra - parameters */ - DPNP_FN_MODF, /**< Used in numpy.modf() impl */ + DPNP_FN_MAXIMUM, /**< Used in numpy.fmax() impl */ + DPNP_FN_MAXIMUM_EXT, /**< Used in numpy.fmax() impl , requires extra + parameters */ + DPNP_FN_MEAN, /**< Used in numpy.mean() impl */ + DPNP_FN_MEDIAN, /**< Used in numpy.median() impl */ + DPNP_FN_MEDIAN_EXT, /**< Used in numpy.median() impl, requires extra + parameters */ + DPNP_FN_MIN, /**< Used in numpy.min() impl */ + DPNP_FN_MINIMUM, /**< Used in numpy.fmin() impl */ + DPNP_FN_MINIMUM_EXT, /**< Used in numpy.fmax() impl, requires extra + parameters */ + DPNP_FN_MODF, /**< Used in numpy.modf() impl */ DPNP_FN_MODF_EXT, /**< Used in numpy.modf() impl, requires extra parameters */ DPNP_FN_MULTIPLY, /**< Used in numpy.multiply() impl */ diff --git a/dpnp/backend/kernels/dpnp_krnl_statistics.cpp b/dpnp/backend/kernels/dpnp_krnl_statistics.cpp index 3acf53f0de4f..5c0ca1f6591b 100644 --- a/dpnp/backend/kernels/dpnp_krnl_statistics.cpp +++ b/dpnp/backend/kernels/dpnp_krnl_statistics.cpp @@ -503,18 +503,6 @@ void (*dpnp_max_default_c)(void *, const shape_elem_type *, size_t) = dpnp_max_c<_DataType>; -template -DPCTLSyclEventRef (*dpnp_max_ext_c)(DPCTLSyclQueueRef, - void *, - void *, - const size_t, - const shape_elem_type *, - size_t, - const shape_elem_type *, - size_t, - const DPCTLEventVectorRef) = - dpnp_max_c<_DataType>; - template DPCTLSyclEventRef dpnp_mean_c(DPCTLSyclQueueRef q_ref, void *array1_in, @@ -887,18 +875,6 @@ void (*dpnp_min_default_c)(void *, const shape_elem_type *, size_t) = dpnp_min_c<_DataType>; -template -DPCTLSyclEventRef (*dpnp_min_ext_c)(DPCTLSyclQueueRef, - void *, - void *, - const size_t, - const shape_elem_type *, - size_t, - const shape_elem_type *, - size_t, - const DPCTLEventVectorRef) = - dpnp_min_c<_DataType>; - template DPCTLSyclEventRef dpnp_nanvar_c(DPCTLSyclQueueRef q_ref, void *array1_in, @@ -1283,15 +1259,6 @@ void func_map_init_statistics(func_map_t &fmap) fmap[DPNPFuncName::DPNP_FN_MAX][eft_DBL][eft_DBL] = { eft_DBL, (void *)dpnp_max_default_c}; - fmap[DPNPFuncName::DPNP_FN_MAX_EXT][eft_INT][eft_INT] = { - eft_INT, (void *)dpnp_max_ext_c}; - fmap[DPNPFuncName::DPNP_FN_MAX_EXT][eft_LNG][eft_LNG] = { - eft_LNG, (void *)dpnp_max_ext_c}; - fmap[DPNPFuncName::DPNP_FN_MAX_EXT][eft_FLT][eft_FLT] = { - eft_FLT, (void *)dpnp_max_ext_c}; - fmap[DPNPFuncName::DPNP_FN_MAX_EXT][eft_DBL][eft_DBL] = { - eft_DBL, (void *)dpnp_max_ext_c}; - fmap[DPNPFuncName::DPNP_FN_MEAN][eft_INT][eft_INT] = { eft_DBL, (void *)dpnp_mean_default_c}; fmap[DPNPFuncName::DPNP_FN_MEAN][eft_LNG][eft_LNG] = { @@ -1340,15 +1307,6 @@ void func_map_init_statistics(func_map_t &fmap) fmap[DPNPFuncName::DPNP_FN_MIN][eft_DBL][eft_DBL] = { eft_DBL, (void *)dpnp_min_default_c}; - fmap[DPNPFuncName::DPNP_FN_MIN_EXT][eft_INT][eft_INT] = { - eft_INT, (void *)dpnp_min_ext_c}; - fmap[DPNPFuncName::DPNP_FN_MIN_EXT][eft_LNG][eft_LNG] = { - eft_LNG, (void *)dpnp_min_ext_c}; - fmap[DPNPFuncName::DPNP_FN_MIN_EXT][eft_FLT][eft_FLT] = { - eft_FLT, (void *)dpnp_min_ext_c}; - fmap[DPNPFuncName::DPNP_FN_MIN_EXT][eft_DBL][eft_DBL] = { - eft_DBL, (void *)dpnp_min_ext_c}; - fmap[DPNPFuncName::DPNP_FN_NANVAR][eft_INT][eft_INT] = { eft_INT, (void *)dpnp_nanvar_default_c}; fmap[DPNPFuncName::DPNP_FN_NANVAR][eft_LNG][eft_LNG] = { diff --git a/dpnp/dpnp_algo/dpnp_algo.pxd b/dpnp/dpnp_algo/dpnp_algo.pxd index 7a71531c72a7..c2e4747fbdb7 100644 --- a/dpnp/dpnp_algo/dpnp_algo.pxd +++ b/dpnp/dpnp_algo/dpnp_algo.pxd @@ -106,14 +106,10 @@ cdef extern from "dpnp_iface_fptr.hpp" namespace "DPNPFuncName": # need this na DPNP_FN_MATMUL_EXT DPNP_FN_MATRIX_RANK DPNP_FN_MATRIX_RANK_EXT - DPNP_FN_MAX - DPNP_FN_MAX_EXT DPNP_FN_MAXIMUM DPNP_FN_MAXIMUM_EXT DPNP_FN_MEDIAN DPNP_FN_MEDIAN_EXT - DPNP_FN_MIN - DPNP_FN_MIN_EXT DPNP_FN_MINIMUM DPNP_FN_MINIMUM_EXT DPNP_FN_MODF @@ -369,12 +365,6 @@ Array manipulation routines cpdef dpnp_descriptor dpnp_repeat(dpnp_descriptor array1, repeats, axes=*) -""" -Statistics functions -""" -cpdef dpnp_descriptor dpnp_min(dpnp_descriptor a, axis) - - """ Sorting functions """ diff --git a/dpnp/dpnp_algo/dpnp_algo_statistics.pxi b/dpnp/dpnp_algo/dpnp_algo_statistics.pxi index 43463c7791d3..34e0684fcbfc 100644 --- a/dpnp/dpnp_algo/dpnp_algo_statistics.pxi +++ b/dpnp/dpnp_algo/dpnp_algo_statistics.pxi @@ -38,9 +38,7 @@ and the rest of the library __all__ += [ "dpnp_average", "dpnp_correlate", - "dpnp_max", "dpnp_median", - "dpnp_min", "dpnp_nanvar", "dpnp_std", "dpnp_var", @@ -64,16 +62,6 @@ ctypedef c_dpctl.DPCTLSyclEventRef(*custom_statistic_1in_1out_func_ptr_t)(c_dpct void *, void * , shape_elem_type * , size_t, shape_elem_type * , size_t, const c_dpctl.DPCTLEventVectorRef) -ctypedef c_dpctl.DPCTLSyclEventRef(*custom_statistic_1in_1out_func_ptr_t_max)(c_dpctl.DPCTLSyclQueueRef, - void *, - void * , - const size_t, - shape_elem_type * , - size_t, - shape_elem_type * , - size_t, - const c_dpctl.DPCTLEventVectorRef) - cdef utils.dpnp_descriptor call_fptr_custom_std_var_1in_1out(DPNPFuncName fptr_name, utils.dpnp_descriptor x1, ddof): cdef shape_type_c x1_shape = x1.shape @@ -177,86 +165,6 @@ cpdef utils.dpnp_descriptor dpnp_correlate(utils.dpnp_descriptor x1, utils.dpnp_ return result -cdef utils.dpnp_descriptor _dpnp_max(utils.dpnp_descriptor x1, _axis_, shape_type_c result_shape): - cdef shape_type_c x1_shape = x1.shape - cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(x1.dtype) - - cdef DPNPFuncData kernel_data = get_dpnp_function_ptr(DPNP_FN_MAX_EXT, param1_type, param1_type) - - x1_obj = x1.get_array() - - # create result array with type given by FPTR data - cdef utils.dpnp_descriptor result = utils.create_output_descriptor(result_shape, - kernel_data.return_type, - None, - device=x1_obj.sycl_device, - usm_type=x1_obj.usm_type, - sycl_queue=x1_obj.sycl_queue) - - result_sycl_queue = result.get_array().sycl_queue - - cdef c_dpctl.SyclQueue q = result_sycl_queue - cdef c_dpctl.DPCTLSyclQueueRef q_ref = q.get_queue_ref() - - cdef custom_statistic_1in_1out_func_ptr_t_max func = kernel_data.ptr - cdef shape_type_c axis - cdef Py_ssize_t axis_size = 0 - cdef shape_type_c axis_ = axis - - if _axis_ is not None: - axis = _axis_ - axis_.reserve(len(axis)) - for shape_it in axis: - axis_.push_back(shape_it) - axis_size = len(axis) - - cdef c_dpctl.DPCTLSyclEventRef event_ref = func(q_ref, - x1.get_data(), - result.get_data(), - result.size, - x1_shape.data(), - x1.ndim, - axis_.data(), - axis_size, - NULL) # dep_events_ref - - with nogil: c_dpctl.DPCTLEvent_WaitAndThrow(event_ref) - c_dpctl.DPCTLEvent_Delete(event_ref) - - return result - - -cpdef utils.dpnp_descriptor dpnp_max(utils.dpnp_descriptor x1, axis): - cdef shape_type_c x1_shape = x1.shape - cdef shape_type_c output_shape - - if axis is None: - axis_ = axis - output_shape.push_back(1) - else: - if isinstance(axis, int): - if axis < 0: - axis_ = tuple([x1.ndim - axis]) - else: - axis_ = tuple([axis]) - else: - _axis_ = [] - for i in range(len(axis)): - if axis[i] < 0: - _axis_.append(x1.ndim - axis[i]) - else: - _axis_.append(axis[i]) - axis_ = tuple(_axis_) - - output_shape.resize(len(x1_shape) - len(axis_), 0) - ind = 0 - for id, shape_axis in enumerate(x1_shape): - if id not in axis_: - output_shape[ind] = shape_axis - ind += 1 - - return _dpnp_max(x1, axis_, output_shape) - cpdef utils.dpnp_descriptor dpnp_median(utils.dpnp_descriptor array1): cdef shape_type_c x1_shape = array1.shape cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(array1.dtype) @@ -301,85 +209,6 @@ cpdef utils.dpnp_descriptor dpnp_median(utils.dpnp_descriptor array1): return result -cpdef utils.dpnp_descriptor _dpnp_min(utils.dpnp_descriptor x1, _axis_, shape_type_c shape_output): - cdef shape_type_c x1_shape = x1.shape - cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(x1.dtype) - - cdef DPNPFuncData kernel_data = get_dpnp_function_ptr(DPNP_FN_MIN_EXT, param1_type, param1_type) - - x1_obj = x1.get_array() - - cdef utils.dpnp_descriptor result = utils.create_output_descriptor(shape_output, - kernel_data.return_type, - None, - device=x1_obj.sycl_device, - usm_type=x1_obj.usm_type, - sycl_queue=x1_obj.sycl_queue) - - result_sycl_queue = result.get_array().sycl_queue - - cdef c_dpctl.SyclQueue q = result_sycl_queue - cdef c_dpctl.DPCTLSyclQueueRef q_ref = q.get_queue_ref() - - cdef custom_statistic_1in_1out_func_ptr_t_max func = kernel_data.ptr - cdef shape_type_c axis - cdef Py_ssize_t axis_size = 0 - cdef shape_type_c axis_ = axis - - if _axis_ is not None: - axis = _axis_ - axis_.reserve(len(axis)) - for shape_it in axis: - if shape_it < 0: - raise ValueError("DPNP algo::_dpnp_min(): Negative values in 'shape' are not allowed") - axis_.push_back(shape_it) - axis_size = len(axis) - - cdef c_dpctl.DPCTLSyclEventRef event_ref = func(q_ref, - x1.get_data(), - result.get_data(), - result.size, - x1_shape.data(), - x1.ndim, - axis_.data(), - axis_size, - NULL) # dep_events_ref - - with nogil: c_dpctl.DPCTLEvent_WaitAndThrow(event_ref) - c_dpctl.DPCTLEvent_Delete(event_ref) - - return result - - -cpdef utils.dpnp_descriptor dpnp_min(utils.dpnp_descriptor x1, axis): - cdef shape_type_c x1_shape = x1.shape - cdef shape_type_c shape_output - - if axis is None: - axis_ = axis - shape_output = (1,) - else: - if isinstance(axis, int): - if axis < 0: - axis_ = tuple([x1.ndim - axis]) - else: - axis_ = tuple([axis]) - else: - _axis_ = [] - for i in range(len(axis)): - if axis[i] < 0: - _axis_.append(x1.ndim - axis[i]) - else: - _axis_.append(axis[i]) - axis_ = tuple(_axis_) - - for id, shape_axis in enumerate(x1_shape): - if id not in axis_: - shape_output.push_back(shape_axis) - - return _dpnp_min(x1, axis_, shape_output) - - cpdef utils.dpnp_descriptor dpnp_nanvar(utils.dpnp_descriptor arr, ddof): # dpnp_isnan does not support USM array as input in comparison to dpnp.isnan cdef utils.dpnp_descriptor mask_arr = dpnp.get_dpnp_descriptor(dpnp.isnan(arr.get_pyobj()), diff --git a/dpnp/dpnp_algo/dpnp_elementwise_common.py b/dpnp/dpnp_algo/dpnp_elementwise_common.py index 39b2199e914e..315b266c8032 100644 --- a/dpnp/dpnp_algo/dpnp_elementwise_common.py +++ b/dpnp/dpnp_algo/dpnp_elementwise_common.py @@ -1,5 +1,3 @@ -# cython: language_level=3 -# distutils: language = c++ # -*- coding: utf-8 -*- # ***************************************************************************** # Copyright (c) 2023, Intel Corporation diff --git a/dpnp/dpnp_array.py b/dpnp/dpnp_array.py index 9e8a8096a0fe..fb454a786424 100644 --- a/dpnp/dpnp_array.py +++ b/dpnp/dpnp_array.py @@ -25,7 +25,6 @@ # ***************************************************************************** import dpctl.tensor as dpt -import numpy import dpnp @@ -939,11 +938,15 @@ def max( self, axis=None, out=None, - keepdims=numpy._NoValue, - initial=numpy._NoValue, - where=numpy._NoValue, + keepdims=False, + initial=None, + where=True, ): - """Return the maximum along an axis.""" + """ + Return the maximum along an axis. + + Refer to :obj:`dpnp.max` for full documentation. + """ return dpnp.max(self, axis, out, keepdims, initial, where) @@ -956,11 +959,15 @@ def min( self, axis=None, out=None, - keepdims=numpy._NoValue, - initial=numpy._NoValue, - where=numpy._NoValue, + keepdims=False, + initial=None, + where=True, ): - """Return the minimum along a given axis.""" + """ + Return the minimum along a given axis. + + Refer to :obj:`dpnp.min` for full documentation. + """ return dpnp.min(self, axis, out, keepdims, initial, where) diff --git a/dpnp/dpnp_iface_mathematical.py b/dpnp/dpnp_iface_mathematical.py index 416c8492fc93..330179c2ca44 100644 --- a/dpnp/dpnp_iface_mathematical.py +++ b/dpnp/dpnp_iface_mathematical.py @@ -1401,9 +1401,9 @@ def maximum( :obj:`dpnp.fmax` : Element-wise maximum of two arrays, ignores NaNs. :obj:`dpnp.amax` : The maximum value of an array along a given axis, propagates NaNs. :obj:`dpnp.nanmax` : The maximum value of an array along a given axis, ignores NaNs. - :obj:`dpnp.fmin` : Element-wise minimum of two arrays, ignores NaNs. - :obj:`dpnp.amix` : The minimum value of an array along a given axis, propagates NaNs. - :obj:`dpnp.nanmix` : The minimum value of an array along a given axis, ignores NaNs. + :obj:`dpnp.fmax` : Element-wise maximum of two arrays, ignores NaNs. + :obj:`dpnp.amax` : The maximum value of an array along a given axis, propagates NaNs. + :obj:`dpnp.nanmax` : The maximum value of an array along a given axis, ignores NaNs. Examples -------- @@ -1480,9 +1480,9 @@ def minimum( :obj:`dpnp.fmin` : Element-wise minimum of two arrays, ignores NaNs. :obj:`dpnp.amin` : The minimum value of an array along a given axis, propagates NaNs. :obj:`dpnp.nanmin` : The minimum value of an array along a given axis, ignores NaNs. - :obj:`dpnp.fmax` : Element-wise maximum of two arrays, ignores NaNs. - :obj:`dpnp.amax` : The maximum value of an array along a given axis, propagates NaNs. - :obj:`dpnp.nanmax` : The maximum value of an array along a given axis, ignores NaNs. + :obj:`dpnp.fmin` : Element-wise minimum of two arrays, ignores NaNs. + :obj:`dpnp.amin` : The minimum value of an array along a given axis, propagates NaNs. + :obj:`dpnp.nanmin` : The minimum value of an array along a given axis, ignores NaNs. Examples -------- diff --git a/dpnp/dpnp_iface_statistics.py b/dpnp/dpnp_iface_statistics.py index c7254ad6d01f..653b323c9e1d 100644 --- a/dpnp/dpnp_iface_statistics.py +++ b/dpnp/dpnp_iface_statistics.py @@ -1,5 +1,3 @@ -# cython: language_level=3 -# distutils: language = c++ # -*- coding: utf-8 -*- # ***************************************************************************** # Copyright (c) 2016-2023, Intel Corporation @@ -352,69 +350,102 @@ def histogram(a, bins=10, range=None, density=None, weights=None): ) -def max(x1, axis=None, out=None, keepdims=False, initial=None, where=True): +def max(a, axis=None, out=None, keepdims=False, initial=None, where=True): """ Return the maximum of an array or maximum along an axis. + For full documentation refer to :obj:`numpy.max`. + + Returns + ------- + out : dpnp.ndarray + Maximum of `a`. + Limitations ----------- - Input array is supported as :obj:`dpnp.ndarray`. - Otherwise the function will be executed sequentially on CPU. - Parameter `out` is supported only with default value ``None``. + Input and output arrays are only supported as either :class:`dpnp.ndarray` + or :class:`dpctl.tensor.usm_ndarray`. + Parameters `where`, and `initial` are supported only with their default values. + Otherwise ``NotImplementedError`` exception will be raised. Input array data types are limited by supported DPNP :ref:`Data types`. + See Also + -------- + :obj:`dpnp.min` : Return the minimum of an array. + :obj:`dpnp.maximum` : Element-wise maximum of two arrays, propagates NaNs. + :obj:`dpnp.fmax` : Element-wise maximum of two arrays, ignores NaNs. + :obj:`dpnp.amax` : The maximum value of an array along a given axis, propagates NaNs. + :obj:`dpnp.nanmax` : The maximum value of an array along a given axis, ignores NaNs. + Examples -------- >>> import dpnp as np >>> a = np.arange(4).reshape((2,2)) - >>> a.shape - (2, 2) - >>> [i for i in a] - [0, 1, 2, 3] + >>> a + array([[0, 1], + [2, 3]]) >>> np.max(a) - 3 + array(3) + + >>> np.max(a, axis=0) # Maxima along the first axis + array([2, 3]) + >>> np.max(a, axis=1) # Maxima along the second axis + array([1, 3]) + + >>> b = np.arange(5, dtype=float) + >>> b[2] = np.NaN + >>> np.max(b) + array(nan) """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) - if x1_desc: - # Negative values in 'shape' are not allowed in input array - # 306-322 check on negative and duplicate axis - isaxis = True - if axis is not None: - if dpnp.isscalar(axis): - if axis < 0: - isaxis = False - else: - for val in axis: - if val < 0: - isaxis = False - break - if isaxis: - for i in range(len(axis)): - for j in range(len(axis)): - if i != j: - if axis[i] == axis[j]: - isaxis = False - break - - if not isaxis: - pass - elif out is not None: - pass - elif keepdims: - pass - elif initial is not None: - pass - elif where is not True: - pass + if initial is not None: + raise NotImplementedError( + "initial keyword argument is only supported by its default value." + ) + elif where is not True: + raise NotImplementedError( + "where keyword argument is only supported by its default value." + ) + else: + dpt_array = dpnp.get_usm_ndarray(a) + if dpt_array.size == 0: + # TODO: get rid of this if condition when dpctl supports it + axis = (axis,) if isinstance(axis, int) else axis + for i in range(a.ndim): + if a.shape[i] == 0: + if axis is None or i in axis: + raise ValueError( + "reduction does not support zero-size arrays" + ) + else: + indices = [i for i in range(a.ndim) if i not in axis] + res_shape = tuple([a.shape[i] for i in indices]) + result = dpnp.empty(res_shape, dtype=a.dtype) else: - result_obj = dpnp_max(x1_desc, axis).get_pyobj() - result = dpnp.convert_single_elem_array_to_scalar(result_obj) - + result = dpnp_array._create_from_usm_ndarray( + dpt.max(dpt_array, axis=axis, keepdims=keepdims) + ) + if out is None: return result + else: + if out.shape != result.shape: + raise ValueError( + f"Output array of shape {result.shape} is needed, got {out.shape}." + ) + elif not isinstance(out, dpnp_array): + if isinstance(out, dpt.usm_ndarray): + out = dpnp_array._create_from_usm_ndarray(out) + else: + raise TypeError( + "Output array must be any of supported type, but got {}".format( + type(out) + ) + ) + + dpnp.copyto(out, result, casting="safe") - return call_origin(numpy.max, x1, axis, out, keepdims, initial, where) + return out def mean(x, /, *, axis=None, dtype=None, keepdims=False, out=None, where=True): @@ -564,47 +595,101 @@ def median(x1, axis=None, out=None, overwrite_input=False, keepdims=False): return call_origin(numpy.median, x1, axis, out, overwrite_input, keepdims) -def min(x1, axis=None, out=None, keepdims=False, initial=None, where=True): +def min(a, axis=None, out=None, keepdims=False, initial=None, where=True): """ - Return the minimum along a given axis. + Return the minimum of an array or maximum along an axis. + + For full documentation refer to :obj:`numpy.min`. + + Returns + ------- + out : dpnp.ndarray + Minimum of `a`. Limitations ----------- - Input array is supported as :obj:`dpnp.ndarray`. - Otherwise the function will be executed sequentially on CPU. - Parameter `out` is supported only with default value ``None``. + Input and output arrays are only supported as either :class:`dpnp.ndarray` + or :class:`dpctl.tensor.usm_ndarray`. + Parameters `where`, and `initial` are supported only with their default values. + Otherwise ``NotImplementedError`` exception will be raised. Input array data types are limited by supported DPNP :ref:`Data types`. + See Also + -------- + :obj:`dpnp.max` : Return the maximum of an array. + :obj:`dpnp.minimum` : Element-wise minimum of two arrays, propagates NaNs. + :obj:`dpnp.fmin` : Element-wise minimum of two arrays, ignores NaNs. + :obj:`dpnp.amin` : The minimum value of an array along a given axis, propagates NaNs. + :obj:`dpnp.nanmin` : The minimum value of an array along a given axis, ignores NaNs. + Examples -------- >>> import dpnp as np >>> a = np.arange(4).reshape((2,2)) - >>> a.shape - (2, 2) - >>> [i for i in a] - [0, 1, 2, 3] + >>> a + array([[0, 1], + [2, 3]]) >>> np.min(a) - 0 + array(0) + + >>> np.min(a, axis=0) # Minima along the first axis + array([0, 1]) + >>> np.min(a, axis=1) # Minima along the second axis + array([0, 2]) + + >>> b = np.arange(5, dtype=float) + >>> b[2] = np.NaN + >>> np.min(b) + array(nan) """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) - if x1_desc: - if out is not None: - pass - elif keepdims: - pass - elif initial is not None: - pass - elif where is not True: - pass + if initial is not None: + raise NotImplementedError( + "initial keyword argument is only supported by its default value." + ) + elif where is not True: + raise NotImplementedError( + "where keyword argument is only supported by its default values." + ) + else: + dpt_array = dpnp.get_usm_ndarray(a) + if dpt_array.size == 0: + # TODO: get rid of this if condition when dpctl supports it + for i in range(a.ndim): + if a.shape[i] == 0: + if axis is None or i in axis: + raise ValueError( + "reduction does not support zero-size arrays" + ) + else: + indices = [i for i in range(a.ndim) if i not in axis] + res_shape = tuple([a.shape[i] for i in indices]) + result = dpnp.empty(res_shape, dtype=a.dtype) else: - result_obj = dpnp_min(x1_desc, axis).get_pyobj() - result = dpnp.convert_single_elem_array_to_scalar(result_obj) - + result = dpnp_array._create_from_usm_ndarray( + dpt.min(dpt_array, axis=axis, keepdims=keepdims) + ) + if out is None: return result + else: + if out.shape != result.shape: + raise ValueError( + f"Output array of shape {result.shape} is needed, got {out.shape}." + ) + elif not isinstance(out, dpnp_array): + if isinstance(out, dpt.usm_ndarray): + out = dpnp_array._create_from_usm_ndarray(out) + else: + raise TypeError( + "Output array must be any of supported type, but got {}".format( + type(out) + ) + ) - return call_origin(numpy.min, x1, axis, out, keepdims, initial, where) + dpnp.copyto(out, result, casting="safe") + + return out def nanvar(x1, axis=None, dtype=None, out=None, ddof=0, keepdims=False): @@ -619,7 +704,7 @@ def nanvar(x1, axis=None, dtype=None, out=None, ddof=0, keepdims=False): Parameter `axis` is supported only with default value ``None``. Parameter `dtype` is supported only with default value ``None``. Parameter `out` is supported only with default value ``None``. - Parameter `keepdims` is supported only with default value ``numpy._NoValue``. + Parameter `keepdims` is supported only with default value ``False``. Otherwise the function will be executed sequentially on CPU. """ @@ -665,7 +750,7 @@ def std(x1, axis=None, dtype=None, out=None, ddof=0, keepdims=False): Parameter `axis` is supported only with default value ``None``. Parameter `dtype` is supported only with default value ``None``. Parameter `out` is supported only with default value ``None``. - Parameter `keepdims` is supported only with default value ``numpy._NoValue``. + Parameter `keepdims` is supported only with default value ``False``. Otherwise the function will be executed sequentially on CPU. Input array data types are limited by supported DPNP :ref:`Data types`. @@ -723,7 +808,7 @@ def var(x1, axis=None, dtype=None, out=None, ddof=0, keepdims=False): Parameter `axis` is supported only with default value ``None``. Parameter `dtype` is supported only with default value ``None``. Parameter `out` is supported only with default value ``None``. - Parameter `keepdims` is supported only with default value ``numpy._NoValue``. + Parameter `keepdims` is supported only with default value ``False``. Otherwise the function will be executed sequentially on CPU. Input array data types are limited by supported DPNP :ref:`Data types`. diff --git a/tests/skipped_tests.tbl b/tests/skipped_tests.tbl index 801b29fdf3c5..48490d92f389 100644 --- a/tests/skipped_tests.tbl +++ b/tests/skipped_tests.tbl @@ -118,7 +118,6 @@ tests/third_party/cupy/core_tests/test_ndarray_copy_and_view.py::TestArrayFlatte tests/third_party/cupy/core_tests/test_ndarray_copy_and_view.py::TestArrayFlatten::test_flatten_order_copied tests/third_party/cupy/core_tests/test_ndarray_copy_and_view.py::TestArrayFlatten::test_flatten_order_transposed -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestArrayReduction::test_min_nan tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestArrayReduction::test_ptp_all tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestArrayReduction::test_ptp_all_keepdims tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestArrayReduction::test_ptp_axis0 @@ -796,14 +795,10 @@ tests/third_party/cupy/random_tests/test_sample.py::TestMultinomial_param_4_{siz tests/third_party/cupy/random_tests/test_sample.py::TestRandint2::test_bound_float1 tests/third_party/cupy/random_tests/test_sample.py::TestRandint2::test_goodness_of_fit tests/third_party/cupy/random_tests/test_sample.py::TestRandint2::test_goodness_of_fit_2 - tests/third_party/cupy/random_tests/test_sample.py::TestRandomIntegers2::test_bound_1 tests/third_party/cupy/random_tests/test_sample.py::TestRandomIntegers2::test_bound_2 tests/third_party/cupy/random_tests/test_sample.py::TestRandomIntegers2::test_goodness_of_fit tests/third_party/cupy/random_tests/test_sample.py::TestRandomIntegers2::test_goodness_of_fit_2 -tests/third_party/cupy/random_tests/test_sample.py::TestRandomIntegers::test_high_is_none -tests/third_party/cupy/random_tests/test_sample.py::TestRandomIntegers::test_normal -tests/third_party/cupy/random_tests/test_sample.py::TestRandomIntegers::test_size_is_not_none tests/third_party/cupy/sorting_tests/test_search.py::TestArgMinMaxDtype_param_0_{func='argmin', is_module=True, shape=(3, 4)}::test_argminmax_dtype tests/third_party/cupy/sorting_tests/test_search.py::TestArgMinMaxDtype_param_1_{func='argmin', is_module=True, shape=()}::test_argminmax_dtype diff --git a/tests/test_amin_amax.py b/tests/test_amin_amax.py index 7c5bb8b1b503..5e197f5bf132 100644 --- a/tests/test_amin_amax.py +++ b/tests/test_amin_amax.py @@ -7,7 +7,7 @@ from .helper import get_all_dtypes -@pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True, no_complex=True)) +@pytest.mark.parametrize("dtype", get_all_dtypes()) def test_amax(dtype): a = numpy.array( [ @@ -25,7 +25,7 @@ def test_amax(dtype): assert_allclose(expected, result) -@pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True, no_complex=True)) +@pytest.mark.parametrize("dtype", get_all_dtypes()) def test_amin(dtype): a = numpy.array( [ @@ -55,8 +55,7 @@ def _get_min_max_input(type, shape): return a.reshape(shape) -@pytest.mark.usefixtures("allow_fall_back_on_numpy") -@pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True, no_complex=True)) +@pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) @pytest.mark.parametrize( "shape", [(4,), (2, 3), (4, 5, 6)], ids=["(4,)", "(2,3)", "(4,5,6)"] ) @@ -74,8 +73,7 @@ def test_amax_diff_shape(dtype, shape): numpy.testing.assert_array_equal(dpnp_res, np_res) -@pytest.mark.usefixtures("allow_fall_back_on_numpy") -@pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True, no_complex=True)) +@pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) @pytest.mark.parametrize( "shape", [(4,), (2, 3), (4, 5, 6)], ids=["(4,)", "(2,3)", "(4,5,6)"] ) diff --git a/tests/test_statistics.py b/tests/test_statistics.py index 4020c6c21d72..2894f24a37bb 100644 --- a/tests/test_statistics.py +++ b/tests/test_statistics.py @@ -1,3 +1,4 @@ +import dpctl.tensor as dpt import numpy import pytest from numpy.testing import assert_allclose @@ -21,20 +22,68 @@ def test_median(dtype, size): assert_allclose(dpnp_res, np_res) -@pytest.mark.usefixtures("allow_fall_back_on_numpy") -@pytest.mark.parametrize("axis", [0, 1, -1, 2, -2, (1, 2), (0, -2)]) -@pytest.mark.parametrize( - "dtype", get_all_dtypes(no_none=True, no_bool=True, no_complex=True) -) -def test_max(axis, dtype): +@pytest.mark.parametrize("func", ["max", "min"]) +@pytest.mark.parametrize("axis", [None, 0, 1, -1, 2, -2, (1, 2), (0, -2)]) +@pytest.mark.parametrize("keepdims", [False, True]) +@pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) +def test_max_min(func, axis, keepdims, dtype): a = numpy.arange(768, dtype=dtype).reshape((4, 4, 6, 8)) ia = dpnp.array(a) - np_res = numpy.max(a, axis=axis) - dpnp_res = dpnp.max(ia, axis=axis) + np_res = getattr(numpy, func)(a, axis=axis, keepdims=keepdims) + dpnp_res = getattr(dpnp, func)(ia, axis=axis, keepdims=keepdims) + + assert dpnp_res.shape == np_res.shape + assert_allclose(dpnp_res, np_res) + + +@pytest.mark.parametrize("func", ["max", "min"]) +@pytest.mark.parametrize("axis", [None, 0, 1, -1]) +@pytest.mark.parametrize("keepdims", [False, True]) +def test_max_min_bool(func, axis, keepdims): + a = numpy.arange(2, dtype=dpnp.bool) + a = numpy.tile(a, (2, 2)) + ia = dpnp.array(a) + + np_res = getattr(numpy, func)(a, axis=axis, keepdims=keepdims) + dpnp_res = getattr(dpnp, func)(ia, axis=axis, keepdims=keepdims) + + assert dpnp_res.shape == np_res.shape + assert_allclose(dpnp_res, np_res) + + +@pytest.mark.parametrize("func", ["max", "min"]) +def test_max_min_out(func): + a = numpy.arange(6).reshape((2, 3)) + ia = dpnp.array(a) + + np_res = getattr(numpy, func)(a, axis=0) + dpnp_res = dpnp.array(numpy.empty_like(np_res)) + getattr(dpnp, func)(ia, axis=0, out=dpnp_res) + assert_allclose(dpnp_res, np_res) + dpnp_res = dpt.asarray(numpy.empty_like(np_res)) + getattr(dpnp, func)(ia, axis=0, out=dpnp_res) assert_allclose(dpnp_res, np_res) + dpnp_res = numpy.empty_like(np_res) + with pytest.raises(TypeError): + getattr(dpnp, func)(ia, axis=0, out=dpnp_res) + + dpnp_res = dpnp.array(numpy.empty((2, 3))) + with pytest.raises(ValueError): + getattr(dpnp, func)(ia, axis=0, out=dpnp_res) + + +@pytest.mark.parametrize("func", ["max", "min"]) +def test_max_min_NotImplemented(func): + ia = dpnp.arange(5) + + with pytest.raises(NotImplementedError): + getattr(dpnp, func)(ia, where=False) + with pytest.raises(NotImplementedError): + getattr(dpnp, func)(ia, initial=6) + @pytest.mark.usefixtures("allow_fall_back_on_numpy") @pytest.mark.parametrize( diff --git a/tests/test_sycl_queue.py b/tests/test_sycl_queue.py index 44852b5f5137..3c131a6462f5 100644 --- a/tests/test_sycl_queue.py +++ b/tests/test_sycl_queue.py @@ -260,6 +260,8 @@ def test_meshgrid(device_x, device_y): pytest.param("log10", [1.0, 2.0, 4.0, 7.0]), pytest.param("log1p", [1.0e-10, 1.0, 2.0, 4.0, 7.0]), pytest.param("log2", [1.0, 2.0, 4.0, 7.0]), + pytest.param("max", [1.0, 2.0, 4.0, 7.0]), + pytest.param("min", [1.0, 2.0, 4.0, 7.0]), pytest.param("nancumprod", [1.0, dpnp.nan]), pytest.param("nancumsum", [1.0, dpnp.nan]), pytest.param("nanprod", [1.0, dpnp.nan]), diff --git a/tests/test_usm_type.py b/tests/test_usm_type.py index 206cae643269..3060edd4beae 100644 --- a/tests/test_usm_type.py +++ b/tests/test_usm_type.py @@ -350,6 +350,8 @@ def test_meshgrid(usm_type_x, usm_type_y): pytest.param("log1p", [1.0e-10, 1.0, 2.0, 4.0, 7.0]), pytest.param("log2", [1.0, 2.0, 4.0, 7.0]), pytest.param("nanprod", [1.0, 2.0, dp.nan]), + pytest.param("max", [1.0, 2.0, 4.0, 7.0]), + pytest.param("min", [1.0, 2.0, 4.0, 7.0]), pytest.param("negative", [1.0, 0.0, -1.0]), pytest.param("positive", [1.0, 0.0, -1.0]), pytest.param("prod", [1.0, 2.0]), diff --git a/tests/third_party/cupy/core_tests/test_ndarray_reduction.py b/tests/third_party/cupy/core_tests/test_ndarray_reduction.py index ceea3c6259cb..952398575f1d 100644 --- a/tests/third_party/cupy/core_tests/test_ndarray_reduction.py +++ b/tests/third_party/cupy/core_tests/test_ndarray_reduction.py @@ -215,6 +215,65 @@ def test_ptp_nan_imag(self, xp, dtype): return a.ptp() +@testing.parameterize( + *testing.product( + { + # TODO(leofang): make a @testing.for_all_axes decorator + "shape_and_axis": [ + ((), None), + ((0,), (0,)), + ((0, 2), (0,)), + ((0, 2), (1,)), + ((0, 2), (0, 1)), + ((2, 0), (0,)), + ((2, 0), (1,)), + ((2, 0), (0, 1)), + ((0, 2, 3), (0,)), + ((0, 2, 3), (1,)), + ((0, 2, 3), (2,)), + ((0, 2, 3), (0, 1)), + ((0, 2, 3), (1, 2)), + ((0, 2, 3), (0, 2)), + ((0, 2, 3), (0, 1, 2)), + ((2, 0, 3), (0,)), + ((2, 0, 3), (1,)), + ((2, 0, 3), (2,)), + ((2, 0, 3), (0, 1)), + ((2, 0, 3), (1, 2)), + ((2, 0, 3), (0, 2)), + ((2, 0, 3), (0, 1, 2)), + ((2, 3, 0), (0,)), + ((2, 3, 0), (1,)), + ((2, 3, 0), (2,)), + ((2, 3, 0), (0, 1)), + ((2, 3, 0), (1, 2)), + ((2, 3, 0), (0, 2)), + ((2, 3, 0), (0, 1, 2)), + ], + "order": ("C", "F"), + "func": ("min", "max"), + } + ) +) +class TestArrayReductionZeroSize: + @testing.numpy_cupy_allclose( + contiguous_check=False, accept_error=ValueError + ) + def test_zero_size(self, xp): + shape, axis = self.shape_and_axis + # NumPy only supports axis being an int + if self.func in ("argmax", "argmin"): + if axis is not None and len(axis) == 1: + axis = axis[0] + else: + pytest.skip( + f"NumPy does not support axis={axis} for {self.func}" + ) + # dtype is irrelevant here, just pick one + a = testing.shaped_random(shape, xp, xp.float32, order=self.order) + return getattr(a, self.func)(axis=axis) + + # This class compares CUB results against NumPy's @testing.parameterize( *testing.product( From c31557cdc74e2d90b7fda410bb6e47ce05824b89 Mon Sep 17 00:00:00 2001 From: vtavana <120411540+vtavana@users.noreply.github.com> Date: Mon, 13 Nov 2023 14:18:31 -0600 Subject: [PATCH 14/38] in place divide and floor_divide (#1587) * in place divide and floor_divide * address comments * add more tests for floor_divide * fix format --- tests/test_mathematical.py | 260 +++++++++++++++++++++++++++++++------ 1 file changed, 222 insertions(+), 38 deletions(-) diff --git a/tests/test_mathematical.py b/tests/test_mathematical.py index 63d5172c08ed..89a09a7dc294 100644 --- a/tests/test_mathematical.py +++ b/tests/test_mathematical.py @@ -1070,9 +1070,9 @@ def test_invalid_out(self, out): assert_raises(TypeError, numpy.add, a.asnumpy(), 2, out) -class TestHypot: - @pytest.mark.parametrize("dtype", get_float_dtypes()) - def test_hypot(self, dtype): +class TestDivide: + @pytest.mark.parametrize("dtype", get_float_complex_dtypes()) + def test_divide(self, dtype): array1_data = numpy.arange(10) array2_data = numpy.arange(5, 15) out = numpy.empty(10, dtype=dtype) @@ -1081,55 +1081,169 @@ def test_hypot(self, dtype): dp_array1 = dpnp.array(array1_data, dtype=dtype) dp_array2 = dpnp.array(array2_data, dtype=dtype) dp_out = dpnp.array(out, dtype=dtype) - result = dpnp.hypot(dp_array1, dp_array2, out=dp_out) + result = dpnp.divide(dp_array1, dp_array2, out=dp_out) # original np_array1 = numpy.array(array1_data, dtype=dtype) np_array2 = numpy.array(array2_data, dtype=dtype) - expected = numpy.hypot(np_array1, np_array2, out=out) + expected = numpy.divide(np_array1, np_array2, out=out) - assert_allclose(expected, result) - assert_allclose(out, dp_out) + assert_dtype_allclose(result, expected) + assert_dtype_allclose(dp_out, out) - @pytest.mark.parametrize("dtype", get_float_dtypes()) + @pytest.mark.usefixtures("suppress_divide_invalid_numpy_warnings") + @pytest.mark.parametrize("dtype", get_float_complex_dtypes()) def test_out_dtypes(self, dtype): size = 10 np_array1 = numpy.arange(size, 2 * size, dtype=dtype) np_array2 = numpy.arange(size, dtype=dtype) - np_out = numpy.empty(size, dtype=numpy.float32) - expected = numpy.hypot(np_array1, np_array2, out=np_out) + np_out = numpy.empty(size, dtype=numpy.complex64) + expected = numpy.divide(np_array1, np_array2, out=np_out) dp_array1 = dpnp.arange(size, 2 * size, dtype=dtype) dp_array2 = dpnp.arange(size, dtype=dtype) - dp_out = dpnp.empty(size, dtype=dpnp.float32) - if dtype != dpnp.float32: + dp_out = dpnp.empty(size, dtype=dpnp.complex64) + if dtype != dpnp.complex64: # dtype of out mismatches types of input arrays with pytest.raises(TypeError): - dpnp.hypot(dp_array1, dp_array2, out=dp_out) + dpnp.divide(dp_array1, dp_array2, out=dp_out) # allocate new out with expected type dp_out = dpnp.empty(size, dtype=dtype) - result = dpnp.hypot(dp_array1, dp_array2, out=dp_out) + result = dpnp.divide(dp_array1, dp_array2, out=dp_out) + assert_dtype_allclose(result, expected) - tol = numpy.finfo(numpy.float32).resolution - assert_allclose(expected, result, rtol=tol, atol=tol) + @pytest.mark.usefixtures("suppress_divide_invalid_numpy_warnings") + @pytest.mark.parametrize("dtype", get_float_complex_dtypes()) + def test_out_overlap(self, dtype): + size = 15 + # DPNP + dp_a = dpnp.arange(2 * size, dtype=dtype) + dpnp.divide(dp_a[size::], dp_a[::2], out=dp_a[:size:]) - @pytest.mark.parametrize("dtype", get_float_dtypes()) + # original + np_a = numpy.arange(2 * size, dtype=dtype) + numpy.divide(np_a[size::], np_a[::2], out=np_a[:size:]) + + assert_dtype_allclose(dp_a, np_a) + + @pytest.mark.parametrize("dtype", get_float_complex_dtypes()) + def test_inplace_strided_out(self, dtype): + size = 21 + + np_a = numpy.arange(size, dtype=dtype) + np_a[::3] /= 4 + + dp_a = dpnp.arange(size, dtype=dtype) + dp_a[::3] /= 4 + + assert_allclose(dp_a, np_a) + + @pytest.mark.parametrize( + "shape", [(0,), (15,), (2, 2)], ids=["(0,)", "(15, )", "(2,2)"] + ) + def test_invalid_shape(self, shape): + dp_array1 = dpnp.arange(10) + dp_array2 = dpnp.arange(5, 15) + dp_out = dpnp.empty(shape) + + with pytest.raises(ValueError): + dpnp.divide(dp_array1, dp_array2, out=dp_out) + + @pytest.mark.parametrize( + "out", + [4, (), [], (3, 7), [2, 4]], + ids=["4", "()", "[]", "(3, 7)", "[2, 4]"], + ) + def test_invalid_out(self, out): + a = dpnp.arange(10) + + assert_raises(TypeError, dpnp.divide, a, 2, out) + assert_raises(TypeError, numpy.divide, a.asnumpy(), 2, out) + + +class TestFloorDivide: + @pytest.mark.parametrize( + "dtype", get_all_dtypes(no_bool=True, no_none=True, no_complex=True) + ) + def test_floor_divide(self, dtype): + array1_data = numpy.arange(10) + array2_data = numpy.arange(5, 15) + out = numpy.empty(10, dtype=dtype) + + # DPNP + dp_array1 = dpnp.array(array1_data, dtype=dtype) + dp_array2 = dpnp.array(array2_data, dtype=dtype) + dp_out = dpnp.array(out, dtype=dtype) + result = dpnp.floor_divide(dp_array1, dp_array2, out=dp_out) + + # original + np_array1 = numpy.array(array1_data, dtype=dtype) + np_array2 = numpy.array(array2_data, dtype=dtype) + expected = numpy.floor_divide(np_array1, np_array2, out=out) + + assert_allclose(result, expected) + assert_allclose(dp_out, out) + + @pytest.mark.usefixtures("suppress_divide_invalid_numpy_warnings") + @pytest.mark.parametrize( + "dtype", get_all_dtypes(no_bool=True, no_none=True, no_complex=True) + ) + def test_out_dtypes(self, dtype): + size = 10 + + np_array1 = numpy.arange(size, 2 * size, dtype=dtype) + np_array2 = numpy.arange(size, dtype=dtype) + np_out = numpy.empty(size, dtype=numpy.complex64) + expected = numpy.floor_divide(np_array1, np_array2, out=np_out) + + dp_array1 = dpnp.arange(size, 2 * size, dtype=dtype) + dp_array2 = dpnp.arange(size, dtype=dtype) + + dp_out = dpnp.empty(size, dtype=dpnp.complex64) + if dtype != dpnp.complex64: + # dtype of out mismatches types of input arrays + with pytest.raises(TypeError): + dpnp.floor_divide(dp_array1, dp_array2, out=dp_out) + + # allocate new out with expected type + dp_out = dpnp.empty(size, dtype=dtype) + + result = dpnp.floor_divide(dp_array1, dp_array2, out=dp_out) + assert_allclose(result, expected) + + @pytest.mark.usefixtures("suppress_divide_invalid_numpy_warnings") + @pytest.mark.parametrize( + "dtype", get_all_dtypes(no_bool=True, no_none=True, no_complex=True) + ) def test_out_overlap(self, dtype): size = 15 # DPNP dp_a = dpnp.arange(2 * size, dtype=dtype) - dpnp.hypot(dp_a[size::], dp_a[::2], out=dp_a[:size:]) + dpnp.floor_divide(dp_a[size::], dp_a[::2], out=dp_a[:size:]) # original np_a = numpy.arange(2 * size, dtype=dtype) - numpy.hypot(np_a[size::], np_a[::2], out=np_a[:size:]) + numpy.floor_divide(np_a[size::], np_a[::2], out=np_a[:size:]) - tol = numpy.finfo(numpy.float32).resolution - assert_allclose(np_a, dp_a, rtol=tol, atol=tol) + assert_allclose(dp_a, np_a) + + @pytest.mark.parametrize( + "dtype", get_all_dtypes(no_bool=True, no_none=True, no_complex=True) + ) + def test_inplace_strided_out(self, dtype): + size = 21 + + np_a = numpy.arange(size, dtype=dtype) + np_a[::3] //= 4 + + dp_a = dpnp.arange(size, dtype=dtype) + dp_a[::3] //= 4 + + assert_allclose(dp_a, np_a) @pytest.mark.parametrize( "shape", [(0,), (15,), (2, 2)], ids=["(0,)", "(15, )", "(2,2)"] @@ -1140,7 +1254,7 @@ def test_invalid_shape(self, shape): dp_out = dpnp.empty(shape) with pytest.raises(ValueError): - dpnp.hypot(dp_array1, dp_array2, out=dp_out) + dpnp.floor_divide(dp_array1, dp_array2, out=dp_out) @pytest.mark.parametrize( "out", @@ -1150,8 +1264,8 @@ def test_invalid_shape(self, shape): def test_invalid_out(self, out): a = dpnp.arange(10) - assert_raises(TypeError, dpnp.hypot, a, 2, out) - assert_raises(TypeError, numpy.hypot, a.asnumpy(), 2, out) + assert_raises(TypeError, dpnp.floor_divide, a, 2, out) + assert_raises(TypeError, numpy.floor_divide, a.asnumpy(), 2, out) class TestFmax: @@ -1316,6 +1430,90 @@ def test_invalid_out(self, out): assert_raises(TypeError, numpy.fmin, a.asnumpy(), 2, out) +class TestHypot: + @pytest.mark.parametrize("dtype", get_float_dtypes()) + def test_hypot(self, dtype): + array1_data = numpy.arange(10) + array2_data = numpy.arange(5, 15) + out = numpy.empty(10, dtype=dtype) + + # DPNP + dp_array1 = dpnp.array(array1_data, dtype=dtype) + dp_array2 = dpnp.array(array2_data, dtype=dtype) + dp_out = dpnp.array(out, dtype=dtype) + result = dpnp.hypot(dp_array1, dp_array2, out=dp_out) + + # original + np_array1 = numpy.array(array1_data, dtype=dtype) + np_array2 = numpy.array(array2_data, dtype=dtype) + expected = numpy.hypot(np_array1, np_array2, out=out) + + assert_allclose(expected, result) + assert_allclose(out, dp_out) + + @pytest.mark.parametrize("dtype", get_float_dtypes()) + def test_out_dtypes(self, dtype): + size = 10 + + np_array1 = numpy.arange(size, 2 * size, dtype=dtype) + np_array2 = numpy.arange(size, dtype=dtype) + np_out = numpy.empty(size, dtype=numpy.float32) + expected = numpy.hypot(np_array1, np_array2, out=np_out) + + dp_array1 = dpnp.arange(size, 2 * size, dtype=dtype) + dp_array2 = dpnp.arange(size, dtype=dtype) + + dp_out = dpnp.empty(size, dtype=dpnp.float32) + if dtype != dpnp.float32: + # dtype of out mismatches types of input arrays + with pytest.raises(TypeError): + dpnp.hypot(dp_array1, dp_array2, out=dp_out) + + # allocate new out with expected type + dp_out = dpnp.empty(size, dtype=dtype) + + result = dpnp.hypot(dp_array1, dp_array2, out=dp_out) + + tol = numpy.finfo(numpy.float32).resolution + assert_allclose(expected, result, rtol=tol, atol=tol) + + @pytest.mark.parametrize("dtype", get_float_dtypes()) + def test_out_overlap(self, dtype): + size = 15 + # DPNP + dp_a = dpnp.arange(2 * size, dtype=dtype) + dpnp.hypot(dp_a[size::], dp_a[::2], out=dp_a[:size:]) + + # original + np_a = numpy.arange(2 * size, dtype=dtype) + numpy.hypot(np_a[size::], np_a[::2], out=np_a[:size:]) + + tol = numpy.finfo(numpy.float32).resolution + assert_allclose(np_a, dp_a, rtol=tol, atol=tol) + + @pytest.mark.parametrize( + "shape", [(0,), (15,), (2, 2)], ids=["(0,)", "(15, )", "(2,2)"] + ) + def test_invalid_shape(self, shape): + dp_array1 = dpnp.arange(10) + dp_array2 = dpnp.arange(5, 15) + dp_out = dpnp.empty(shape) + + with pytest.raises(ValueError): + dpnp.hypot(dp_array1, dp_array2, out=dp_out) + + @pytest.mark.parametrize( + "out", + [4, (), [], (3, 7), [2, 4]], + ids=["4", "()", "[]", "(3, 7)", "[2, 4]"], + ) + def test_invalid_out(self, out): + a = dpnp.arange(10) + + assert_raises(TypeError, dpnp.hypot, a, 2, out) + assert_raises(TypeError, numpy.hypot, a.asnumpy(), 2, out) + + class TestMaximum: @pytest.mark.parametrize("dtype", get_all_dtypes(no_none=True)) def test_maximum(self, dtype): @@ -1846,17 +2044,3 @@ def test_inplace_remainder(dtype): dp_a %= 4 assert_allclose(dp_a, np_a) - - -@pytest.mark.parametrize( - "dtype", get_all_dtypes(no_bool=True, no_none=True, no_complex=True) -) -def test_inplace_floor_divide(dtype): - size = 21 - np_a = numpy.arange(size, dtype=dtype) - dp_a = dpnp.arange(size, dtype=dtype) - - np_a //= 4 - dp_a //= 4 - - assert_allclose(dp_a, np_a) From 237a5f15388502050e070cbde2acff8da94c1d19 Mon Sep 17 00:00:00 2001 From: Natalia Polina Date: Wed, 15 Nov 2023 17:17:25 -0600 Subject: [PATCH 15/38] address comments --- dpnp/dpnp_array.py | 57 ++------------------------------- dpnp/dpnp_iface_manipulation.py | 33 ++++++++++++------- 2 files changed, 24 insertions(+), 66 deletions(-) diff --git a/dpnp/dpnp_array.py b/dpnp/dpnp_array.py index e9f195933016..f6bd27a99a2a 100644 --- a/dpnp/dpnp_array.py +++ b/dpnp/dpnp_array.py @@ -1054,39 +1054,7 @@ def ravel(self, order="C"): """ Return a contiguous flattened array. - For full documentation refer to :obj:`numpy.ndarray.ravel`. - - Parameters - ---------- - order : {'C', 'F'}, optional - The elements of a are read using this index order. ``C`` means to index - the elements in row-major, C-style order, with the last axis index - changing fastest, back to the first axis index changing slowest. ``F`` - means to index the elements in column-major, Fortran-style order, with - the first index changing fastest, and the last index changing slowest. - By default, ``C`` index order is used. - - Returns - ------- - y : dpnp_array - `y` is a contiguous 1-D array of the same subtype as a, with shape (a.size,) - - See Also - -------- - :obj:`dpnp.reshape` : Change the shape of an array without changing its data. - - Examples - -------- - >>> import dpnp as np - >>> x = np.array([[1, 2, 3], [4, 5, 6]]) - >>> x.ravel() - array([1, 2, 3, 4, 5, 6]) - - >>> x.reshape(-1) - array([1, 2, 3, 4, 5, 6]) - - >>> x.ravel(order='F') - array([1, 4, 2, 5, 3, 6]) + For full documentation refer to :obj:`dpnp.ravel`. """ @@ -1131,28 +1099,7 @@ def repeat(self, repeats, axis=None): """ Repeat elements of an array. - For full documentation refer to :obj:`numpy.ndarray.repeat`. - - Parameters - ---------- - repeat : Union[int, Tuple[int, ...]] - The number of repetitions for each element. - `repeats` is broadcasted to fit the shape of the given axis. - axis : Optional[int] - The axis along which to repeat values. The `axis` is required - if input array has more than one dimension. - - Returns - ------- - out : dpnp_array - Array with repeated elements. - - Examples - -------- - >>> import dpnp as np - >>> x = np.array([3]) - >>> x.repeat(4) - array([3, 3, 3, 3]) + For full documentation refer to :obj:`dpnp.repeat`. """ diff --git a/dpnp/dpnp_iface_manipulation.py b/dpnp/dpnp_iface_manipulation.py index 49e9e8731a14..6941af2829e1 100644 --- a/dpnp/dpnp_iface_manipulation.py +++ b/dpnp/dpnp_iface_manipulation.py @@ -979,10 +979,10 @@ def ravel(a, order="C"): Parameters ---------- x : {dpnp_array, usm_ndarray} - Input array. The elements in a are read in the order specified by order, + Input array. The elements in `a` are read in the order specified by order, and packed as a 1-D array. order : {'C', 'F'}, optional - The elements of a are read using this index order. ``C`` means to index + The elements of `a` are read using this index order. ``C`` means to index the elements in row-major, C-style order, with the last axis index changing fastest, back to the first axis index changing slowest. ``F`` means to index the elements in column-major, Fortran-style order, with @@ -991,8 +991,8 @@ def ravel(a, order="C"): Returns ------- - y : dpnp_array - `y` is a contiguous 1-D array of the same subtype as a, with shape (a.size,) + out : dpnp_array + `out` is a contiguous 1-D array of the same subtype as `a`, with shape (a.size,) See Also -------- @@ -1026,17 +1026,17 @@ def repeat(a, repeats, axis=None): ---------- x : {dpnp_array, usm_ndarray} Input array. - repeat : Union[int, Tuple[int, ...]] - The number of repetitions for each element. - `repeats` is broadcasted to fit the shape of the given axis. - axis : Optional[int] - The axis along which to repeat values. The `axis` is required - if input array has more than one dimension. + repeat : int or array of int + The number of repetitions for each element. `repeats` is broadcasted to fit + the shape of the given axis. + axis : int, optional + The axis along which to repeat values. By default, use the flattened input array, + and return a flat output array. Returns ------- out : dpnp_array - Array with repeated elements. + Output array which has the same shape as `a`, except along the given axis. See Also -------- @@ -1049,6 +1049,17 @@ def repeat(a, repeats, axis=None): >>> np.repeat(x, 4) array([3, 3, 3, 3]) + >>> x = np.array([[1,2], [3,4]]) + >>> np.repeat(x, 2) + array([1, 1, 2, 2, 3, 3, 4, 4]) + >>> np.repeat(x, 3, axis=1) + array([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 4, 4, 4]]) + >>> np.repeat(x, [1, 2], axis=0) + array([[1, 2], + [3, 4], + [3, 4]]) + """ rep = repeats From b81bfb98bdfab2a0d57011358512a9d0e8da5f7a Mon Sep 17 00:00:00 2001 From: Natalia Polina Date: Wed, 15 Nov 2023 20:56:14 -0800 Subject: [PATCH 16/38] muted tests for diagflat for scalars --- tests/third_party/cupy/creation_tests/test_matrix.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/third_party/cupy/creation_tests/test_matrix.py b/tests/third_party/cupy/creation_tests/test_matrix.py index e51a33c8c039..7123989d7c57 100644 --- a/tests/third_party/cupy/creation_tests/test_matrix.py +++ b/tests/third_party/cupy/creation_tests/test_matrix.py @@ -92,17 +92,17 @@ def test_diagflat3(self, xp): a = testing.shaped_arange((3, 3), xp) return xp.diagflat(a, -2) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") + @pytest.mark.skip("Scalar input is not supported") @testing.numpy_cupy_array_equal() def test_diagflat_from_scalar(self, xp): return xp.diagflat(3) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") + @pytest.mark.skip("Scalar input is not supported") @testing.numpy_cupy_array_equal() def test_diagflat_from_scalar_with_k0(self, xp): return xp.diagflat(3, 0) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") + @pytest.mark.skip("Scalar input is not supported") @testing.numpy_cupy_array_equal() def test_diagflat_from_scalar_with_k1(self, xp): return xp.diagflat(3, 1) From 630dae25da1e17b98aa0adfe24d1cf351ed03a91 Mon Sep 17 00:00:00 2001 From: Anton <100830759+antonwolfy@users.noreply.github.com> Date: Mon, 20 Nov 2023 18:38:44 +0100 Subject: [PATCH 17/38] Pin DPC++ compiler to 2023.2.1 for coverage to work around a crash in 2024.0 (#1628) --- .github/workflows/conda-package.yml | 2 +- .github/workflows/generate_coverage.yaml | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/conda-package.yml b/.github/workflows/conda-package.yml index 472c0d6be9a3..853776867d8d 100644 --- a/.github/workflows/conda-package.yml +++ b/.github/workflows/conda-package.yml @@ -423,7 +423,7 @@ jobs: run: conda install anaconda-client - name: Checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v3.5.2 with: repository: IntelPython/devops-tools fetch-depth: 0 diff --git a/.github/workflows/generate_coverage.yaml b/.github/workflows/generate_coverage.yaml index 668b69e1fb53..f52d2dcb6e10 100644 --- a/.github/workflows/generate_coverage.yaml +++ b/.github/workflows/generate_coverage.yaml @@ -42,8 +42,10 @@ jobs: - name: Install dpnp dependencies run: | + # use DPC++ compiler 2023.2 to work around an issue with crash conda install cython llvm cmake">=3.21" scikit-build ninja pytest pytest-cov coverage[toml] \ - dpctl dpcpp_linux-64 sysroot_linux-64">=2.28" mkl-devel-dpcpp tbb-devel onedpl-devel ${{ env.CHANNELS }} + dpctl dpcpp_linux-64"=2023.2" sysroot_linux-64">=2.28" mkl-devel-dpcpp tbb-devel"=2021.10" \ + onedpl-devel ${{ env.CHANNELS }} - name: Conda info run: | From 5bcf910be7a80a14024fbd545aa7d944336ad460 Mon Sep 17 00:00:00 2001 From: vtavana <120411540+vtavana@users.noreply.github.com> Date: Mon, 20 Nov 2023 14:51:02 -0600 Subject: [PATCH 18/38] rework implementation of diag, diagflat, vander, and ptp (#1579) * rework implementation of diag, diagflat, vander, and ptp * address comments - first round cherry-pick * address comments - second round * add tests for negative use cases to improve covergae * fixed missing merge conflicts * fix pre-commit --- dpnp/backend/include/dpnp_iface_fptr.hpp | 69 +++--- .../kernels/dpnp_krnl_arraycreation.cpp | 74 ------ dpnp/dpnp_algo/dpnp_algo.pxd | 6 - dpnp/dpnp_algo/dpnp_algo_arraycreation.pxi | 168 ------------- dpnp/dpnp_array.py | 2 - dpnp/dpnp_container.py | 1 + dpnp/dpnp_iface_arraycreation.py | 230 ++++++++++++------ dpnp/dpnp_iface_statistics.py | 44 ++++ tests/skipped_tests.tbl | 17 -- tests/skipped_tests_gpu.tbl | 18 +- tests/test_arraycreation.py | 64 ++++- tests/test_statistics.py | 66 ++++- tests/test_sycl_queue.py | 79 +++++- tests/test_usm_type.py | 25 +- .../cupy/core_tests/test_ndarray_reduction.py | 168 +++++++------ .../cupy/creation_tests/test_matrix.py | 44 +++- 16 files changed, 590 insertions(+), 485 deletions(-) diff --git a/dpnp/backend/include/dpnp_iface_fptr.hpp b/dpnp/backend/include/dpnp_iface_fptr.hpp index fbb18be244b2..e7cdc0d6cea0 100644 --- a/dpnp/backend/include/dpnp_iface_fptr.hpp +++ b/dpnp/backend/include/dpnp_iface_fptr.hpp @@ -132,9 +132,7 @@ enum class DPNPFuncName : size_t DPNP_FN_DET_EXT, /**< Used in numpy.linalg.det() impl, requires extra parameters */ DPNP_FN_DIAG, /**< Used in numpy.diag() impl */ - DPNP_FN_DIAG_EXT, /**< Used in numpy.diag() impl, requires extra parameters - */ - DPNP_FN_DIAG_INDICES, /**< Used in numpy.diag_indices() impl */ + DPNP_FN_DIAG_INDICES, /**< Used in numpy.diag_indices() impl */ DPNP_FN_DIAG_INDICES_EXT, /**< Used in numpy.diag_indices() impl, requires extra parameters */ DPNP_FN_DIAGONAL, /**< Used in numpy.diagonal() impl */ @@ -225,25 +223,24 @@ enum class DPNPFuncName : size_t DPNP_FN_MODF_EXT, /**< Used in numpy.modf() impl, requires extra parameters */ DPNP_FN_MULTIPLY, /**< Used in numpy.multiply() impl */ - DPNP_FN_MULTIPLY_EXT, /**< Used in numpy.multiply() impl, requires extra - parameters */ - DPNP_FN_NANVAR, /**< Used in numpy.nanvar() impl */ - DPNP_FN_NANVAR_EXT, /**< Used in numpy.nanvar() impl, requires extra - parameters */ - DPNP_FN_NEGATIVE, /**< Used in numpy.negative() impl */ - DPNP_FN_NONZERO, /**< Used in numpy.nonzero() impl */ - DPNP_FN_ONES, /**< Used in numpy.ones() impl */ - DPNP_FN_ONES_LIKE, /**< Used in numpy.ones_like() impl */ - DPNP_FN_PARTITION, /**< Used in numpy.partition() impl */ - DPNP_FN_PARTITION_EXT, /**< Used in numpy.partition() impl, requires extra - parameters */ - DPNP_FN_PLACE, /**< Used in numpy.place() impl */ - DPNP_FN_POWER, /**< Used in numpy.power() impl */ - DPNP_FN_PROD, /**< Used in numpy.prod() impl */ - DPNP_FN_PTP, /**< Used in numpy.ptp() impl */ - DPNP_FN_PTP_EXT, /**< Used in numpy.ptp() impl, requires extra parameters */ - DPNP_FN_PUT, /**< Used in numpy.put() impl */ - DPNP_FN_PUT_ALONG_AXIS, /**< Used in numpy.put_along_axis() impl */ + DPNP_FN_MULTIPLY_EXT, /**< Used in numpy.multiply() impl, requires extra + parameters */ + DPNP_FN_NANVAR, /**< Used in numpy.nanvar() impl */ + DPNP_FN_NANVAR_EXT, /**< Used in numpy.nanvar() impl, requires extra + parameters */ + DPNP_FN_NEGATIVE, /**< Used in numpy.negative() impl */ + DPNP_FN_NONZERO, /**< Used in numpy.nonzero() impl */ + DPNP_FN_ONES, /**< Used in numpy.ones() impl */ + DPNP_FN_ONES_LIKE, /**< Used in numpy.ones_like() impl */ + DPNP_FN_PARTITION, /**< Used in numpy.partition() impl */ + DPNP_FN_PARTITION_EXT, /**< Used in numpy.partition() impl, requires extra + parameters */ + DPNP_FN_PLACE, /**< Used in numpy.place() impl */ + DPNP_FN_POWER, /**< Used in numpy.power() impl */ + DPNP_FN_PROD, /**< Used in numpy.prod() impl */ + DPNP_FN_PTP, /**< Used in numpy.ptp() impl */ + DPNP_FN_PUT, /**< Used in numpy.put() impl */ + DPNP_FN_PUT_ALONG_AXIS, /**< Used in numpy.put_along_axis() impl */ DPNP_FN_PUT_ALONG_AXIS_EXT, /**< Used in numpy.put_along_axis() impl, requires extra parameters */ DPNP_FN_QR, /**< Used in numpy.linalg.qr() impl */ @@ -401,21 +398,19 @@ enum class DPNPFuncName : size_t DPNP_FN_TAKE, /**< Used in numpy.take() impl */ DPNP_FN_TAN, /**< Used in numpy.tan() impl */ DPNP_FN_TANH, /**< Used in numpy.tanh() impl */ - DPNP_FN_TRANSPOSE, /**< Used in numpy.transpose() impl */ - DPNP_FN_TRACE, /**< Used in numpy.trace() impl */ - DPNP_FN_TRACE_EXT, /**< Used in numpy.trace() impl, requires extra - parameters */ - DPNP_FN_TRAPZ, /**< Used in numpy.trapz() impl */ - DPNP_FN_TRAPZ_EXT, /**< Used in numpy.trapz() impl, requires extra - parameters */ - DPNP_FN_TRI, /**< Used in numpy.tri() impl */ - DPNP_FN_TRIL, /**< Used in numpy.tril() impl */ - DPNP_FN_TRIU, /**< Used in numpy.triu() impl */ - DPNP_FN_TRUNC, /**< Used in numpy.trunc() impl */ - DPNP_FN_VANDER, /**< Used in numpy.vander() impl */ - DPNP_FN_VANDER_EXT, /**< Used in numpy.vander() impl, requires extra - parameters */ - DPNP_FN_VAR, /**< Used in numpy.var() impl */ + DPNP_FN_TRANSPOSE, /**< Used in numpy.transpose() impl */ + DPNP_FN_TRACE, /**< Used in numpy.trace() impl */ + DPNP_FN_TRACE_EXT, /**< Used in numpy.trace() impl, requires extra + parameters */ + DPNP_FN_TRAPZ, /**< Used in numpy.trapz() impl */ + DPNP_FN_TRAPZ_EXT, /**< Used in numpy.trapz() impl, requires extra + parameters */ + DPNP_FN_TRI, /**< Used in numpy.tri() impl */ + DPNP_FN_TRIL, /**< Used in numpy.tril() impl */ + DPNP_FN_TRIU, /**< Used in numpy.triu() impl */ + DPNP_FN_TRUNC, /**< Used in numpy.trunc() impl */ + DPNP_FN_VANDER, /**< Used in numpy.vander() impl */ + DPNP_FN_VAR, /**< Used in numpy.var() impl */ DPNP_FN_VAR_EXT, /**< Used in numpy.var() impl, requires extra parameters */ DPNP_FN_ZEROS, /**< Used in numpy.zeros() impl */ DPNP_FN_ZEROS_LIKE, /**< Used in numpy.zeros_like() impl */ diff --git a/dpnp/backend/kernels/dpnp_krnl_arraycreation.cpp b/dpnp/backend/kernels/dpnp_krnl_arraycreation.cpp index a655c03100fb..b1af79e019d0 100644 --- a/dpnp/backend/kernels/dpnp_krnl_arraycreation.cpp +++ b/dpnp/backend/kernels/dpnp_krnl_arraycreation.cpp @@ -200,18 +200,6 @@ void (*dpnp_diag_default_c)(void *, const size_t, const size_t) = dpnp_diag_c<_DataType>; -template -DPCTLSyclEventRef (*dpnp_diag_ext_c)(DPCTLSyclQueueRef, - void *, - void *, - const int, - shape_elem_type *, - shape_elem_type *, - const size_t, - const size_t, - const DPCTLEventVectorRef) = - dpnp_diag_c<_DataType>; - template DPCTLSyclEventRef dpnp_eye_c(DPCTLSyclQueueRef q_ref, void *result1, @@ -569,23 +557,6 @@ void (*dpnp_ptp_default_c)(void *, const shape_elem_type *, const size_t) = dpnp_ptp_c<_DataType>; -template -DPCTLSyclEventRef (*dpnp_ptp_ext_c)(DPCTLSyclQueueRef, - void *, - const size_t, - const size_t, - const shape_elem_type *, - const shape_elem_type *, - const void *, - const size_t, - const size_t, - const shape_elem_type *, - const shape_elem_type *, - const shape_elem_type *, - const size_t, - const DPCTLEventVectorRef) = - dpnp_ptp_c<_DataType>; - template DPCTLSyclEventRef dpnp_vander_c(DPCTLSyclQueueRef q_ref, const void *array1_in, @@ -673,16 +644,6 @@ void (*dpnp_vander_default_c)(const void *, const int) = dpnp_vander_c<_DataType_input, _DataType_output>; -template -DPCTLSyclEventRef (*dpnp_vander_ext_c)(DPCTLSyclQueueRef, - const void *, - void *, - const size_t, - const size_t, - const int, - const DPCTLEventVectorRef) = - dpnp_vander_c<_DataType_input, _DataType_output>; - template class dpnp_trace_c_kernel; @@ -1192,15 +1153,6 @@ void func_map_init_arraycreation(func_map_t &fmap) fmap[DPNPFuncName::DPNP_FN_DIAG][eft_DBL][eft_DBL] = { eft_DBL, (void *)dpnp_diag_default_c}; - fmap[DPNPFuncName::DPNP_FN_DIAG_EXT][eft_INT][eft_INT] = { - eft_INT, (void *)dpnp_diag_ext_c}; - fmap[DPNPFuncName::DPNP_FN_DIAG_EXT][eft_LNG][eft_LNG] = { - eft_LNG, (void *)dpnp_diag_ext_c}; - fmap[DPNPFuncName::DPNP_FN_DIAG_EXT][eft_FLT][eft_FLT] = { - eft_FLT, (void *)dpnp_diag_ext_c}; - fmap[DPNPFuncName::DPNP_FN_DIAG_EXT][eft_DBL][eft_DBL] = { - eft_DBL, (void *)dpnp_diag_ext_c}; - fmap[DPNPFuncName::DPNP_FN_EYE][eft_INT][eft_INT] = { eft_INT, (void *)dpnp_eye_default_c}; fmap[DPNPFuncName::DPNP_FN_EYE][eft_LNG][eft_LNG] = { @@ -1284,15 +1236,6 @@ void func_map_init_arraycreation(func_map_t &fmap) fmap[DPNPFuncName::DPNP_FN_PTP][eft_DBL][eft_DBL] = { eft_DBL, (void *)dpnp_ptp_default_c}; - fmap[DPNPFuncName::DPNP_FN_PTP_EXT][eft_INT][eft_INT] = { - eft_INT, (void *)dpnp_ptp_ext_c}; - fmap[DPNPFuncName::DPNP_FN_PTP_EXT][eft_LNG][eft_LNG] = { - eft_LNG, (void *)dpnp_ptp_ext_c}; - fmap[DPNPFuncName::DPNP_FN_PTP_EXT][eft_FLT][eft_FLT] = { - eft_FLT, (void *)dpnp_ptp_ext_c}; - fmap[DPNPFuncName::DPNP_FN_PTP_EXT][eft_DBL][eft_DBL] = { - eft_DBL, (void *)dpnp_ptp_ext_c}; - fmap[DPNPFuncName::DPNP_FN_VANDER][eft_INT][eft_INT] = { eft_LNG, (void *)dpnp_vander_default_c}; fmap[DPNPFuncName::DPNP_FN_VANDER][eft_LNG][eft_LNG] = { @@ -1308,23 +1251,6 @@ void func_map_init_arraycreation(func_map_t &fmap) (void *) dpnp_vander_default_c, std::complex>}; - fmap[DPNPFuncName::DPNP_FN_VANDER_EXT][eft_INT][eft_INT] = { - eft_LNG, (void *)dpnp_vander_ext_c}; - fmap[DPNPFuncName::DPNP_FN_VANDER_EXT][eft_LNG][eft_LNG] = { - eft_LNG, (void *)dpnp_vander_ext_c}; - fmap[DPNPFuncName::DPNP_FN_VANDER_EXT][eft_FLT][eft_FLT] = { - eft_FLT, (void *)dpnp_vander_ext_c}; - fmap[DPNPFuncName::DPNP_FN_VANDER_EXT][eft_DBL][eft_DBL] = { - eft_DBL, (void *)dpnp_vander_ext_c}; - fmap[DPNPFuncName::DPNP_FN_VANDER_EXT][eft_BLN][eft_BLN] = { - eft_LNG, (void *)dpnp_vander_ext_c}; - fmap[DPNPFuncName::DPNP_FN_VANDER_EXT][eft_C64][eft_C64] = { - eft_C64, - (void *)dpnp_vander_ext_c, std::complex>}; - fmap[DPNPFuncName::DPNP_FN_VANDER_EXT][eft_C128][eft_C128] = { - eft_C128, - (void *)dpnp_vander_ext_c, std::complex>}; - fmap[DPNPFuncName::DPNP_FN_TRACE][eft_INT][eft_INT] = { eft_INT, (void *)dpnp_trace_default_c}; fmap[DPNPFuncName::DPNP_FN_TRACE][eft_LNG][eft_INT] = { diff --git a/dpnp/dpnp_algo/dpnp_algo.pxd b/dpnp/dpnp_algo/dpnp_algo.pxd index 7ff37794e93e..d6d25fef1b95 100644 --- a/dpnp/dpnp_algo/dpnp_algo.pxd +++ b/dpnp/dpnp_algo/dpnp_algo.pxd @@ -64,8 +64,6 @@ cdef extern from "dpnp_iface_fptr.hpp" namespace "DPNPFuncName": # need this na DPNP_FN_DEGREES_EXT DPNP_FN_DET DPNP_FN_DET_EXT - DPNP_FN_DIAG - DPNP_FN_DIAG_EXT DPNP_FN_DIAG_INDICES DPNP_FN_DIAG_INDICES_EXT DPNP_FN_DIAGONAL @@ -120,8 +118,6 @@ cdef extern from "dpnp_iface_fptr.hpp" namespace "DPNPFuncName": # need this na DPNP_FN_PARTITION DPNP_FN_PARTITION_EXT DPNP_FN_PLACE - DPNP_FN_PTP - DPNP_FN_PTP_EXT DPNP_FN_QR DPNP_FN_QR_EXT DPNP_FN_RADIANS @@ -218,8 +214,6 @@ cdef extern from "dpnp_iface_fptr.hpp" namespace "DPNPFuncName": # need this na DPNP_FN_TRIL_EXT DPNP_FN_TRIU DPNP_FN_TRIU_EXT - DPNP_FN_VANDER - DPNP_FN_VANDER_EXT DPNP_FN_VAR DPNP_FN_VAR_EXT DPNP_FN_ZEROS diff --git a/dpnp/dpnp_algo/dpnp_algo_arraycreation.pxi b/dpnp/dpnp_algo/dpnp_algo_arraycreation.pxi index 7b90ff1285fa..1322b1ccea5b 100644 --- a/dpnp/dpnp_algo/dpnp_algo_arraycreation.pxi +++ b/dpnp/dpnp_algo/dpnp_algo_arraycreation.pxi @@ -37,10 +37,7 @@ and the rest of the library __all__ += [ "dpnp_copy", - "dpnp_diag", - "dpnp_ptp", "dpnp_trace", - "dpnp_vander", ] @@ -88,136 +85,6 @@ cpdef utils.dpnp_descriptor dpnp_copy(utils.dpnp_descriptor x1): return call_fptr_1in_1out_strides(DPNP_FN_COPY_EXT, x1) -cpdef utils.dpnp_descriptor dpnp_diag(utils.dpnp_descriptor v, int k): - cdef shape_type_c input_shape = v.shape - cdef shape_type_c result_shape - - if v.ndim == 1: - n = v.shape[0] + abs(k) - - result_shape = (n, n) - else: - n = min(v.shape[0], v.shape[0] + k, v.shape[1], v.shape[1] - k) - if n < 0: - n = 0 - - result_shape = (n, ) - - v_obj = v.get_array() - - result_obj = dpnp_container.zeros(result_shape, dtype=v.dtype, device=v_obj.sycl_device) - cdef utils.dpnp_descriptor result = dpnp_descriptor(result_obj) - - cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(v.dtype) - - cdef DPNPFuncData kernel_data = get_dpnp_function_ptr(DPNP_FN_DIAG_EXT, param1_type, param1_type) - - result_sycl_queue = result.get_array().sycl_queue - - cdef c_dpctl.SyclQueue q = result_sycl_queue - cdef c_dpctl.DPCTLSyclQueueRef q_ref = q.get_queue_ref() - - cdef custom_1in_1out_func_ptr_t func = kernel_data.ptr - - cdef c_dpctl.DPCTLSyclEventRef event_ref = func(q_ref, - v.get_data(), - result.get_data(), - k, - input_shape.data(), - result_shape.data(), - v.ndim, - result.ndim, - NULL) # dep_events_ref - - with nogil: c_dpctl.DPCTLEvent_WaitAndThrow(event_ref) - c_dpctl.DPCTLEvent_Delete(event_ref) - - return result - - -cpdef dpnp_ptp(utils.dpnp_descriptor arr, axis=None): - cdef shape_type_c shape_arr = arr.shape - cdef shape_type_c output_shape - if axis is None: - axis_ = axis - output_shape = (1,) - else: - if isinstance(axis, int): - if axis < 0: - axis_ = tuple([arr.ndim - axis]) - else: - axis_ = tuple([axis]) - else: - _axis_ = [] - for i in range(len(axis)): - if axis[i] < 0: - _axis_.append(arr.ndim - axis[i]) - else: - _axis_.append(axis[i]) - axis_ = tuple(_axis_) - - out_shape = [] - ind = 0 - for id, shape_axis in enumerate(shape_arr): - if id not in axis_: - out_shape.append(shape_axis) - output_shape = tuple(out_shape) - - cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(arr.dtype) - - cdef DPNPFuncData kernel_data = get_dpnp_function_ptr(DPNP_FN_PTP_EXT, param1_type, param1_type) - - arr_obj = arr.get_array() - - cdef utils.dpnp_descriptor result = utils.create_output_descriptor(output_shape, - kernel_data.return_type, - None, - device=arr_obj.sycl_device, - usm_type=arr_obj.usm_type, - sycl_queue=arr_obj.sycl_queue) - - cdef shape_type_c axis1 - cdef Py_ssize_t axis_size = 0 - cdef shape_type_c axis2 = axis1 - if axis_ is not None: - axis1 = axis_ - axis2.reserve(len(axis1)) - for shape_it in axis1: - if shape_it < 0: - raise ValueError("DPNP dparray::__init__(): Negative values in 'shape' are not allowed") - axis2.push_back(shape_it) - axis_size = len(axis1) - - cdef shape_type_c result_strides = utils.strides_to_vector(result.strides, result.shape) - cdef shape_type_c arr_strides = utils.strides_to_vector(arr.strides, arr.shape) - - result_sycl_queue = result.get_array().sycl_queue - - cdef c_dpctl.SyclQueue q = result_sycl_queue - cdef c_dpctl.DPCTLSyclQueueRef q_ref = q.get_queue_ref() - - cdef custom_arraycreation_1in_1out_func_ptr_t func = kernel_data.ptr - cdef c_dpctl.DPCTLSyclEventRef event_ref = func(q_ref, - result.get_data(), - result.size, - result.ndim, - output_shape.data(), - result_strides.data(), - arr.get_data(), - arr.size, - arr.ndim, - shape_arr.data(), - arr_strides.data(), - axis2.data(), - axis_size, - NULL) # dep_events_ref - - with nogil: c_dpctl.DPCTLEvent_WaitAndThrow(event_ref) - c_dpctl.DPCTLEvent_Delete(event_ref) - - return result - - cpdef utils.dpnp_descriptor dpnp_trace(utils.dpnp_descriptor arr, offset=0, axis1=0, axis2=1, dtype=None, out=None): if dtype is None: dtype_ = arr.dtype @@ -262,38 +129,3 @@ cpdef utils.dpnp_descriptor dpnp_trace(utils.dpnp_descriptor arr, offset=0, axis c_dpctl.DPCTLEvent_Delete(event_ref) return result - - -cpdef utils.dpnp_descriptor dpnp_vander(utils.dpnp_descriptor x1, int N, int increasing): - cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(x1.dtype) - cdef DPNPFuncData kernel_data = get_dpnp_function_ptr(DPNP_FN_VANDER_EXT, param1_type, DPNP_FT_NONE) - - x1_obj = x1.get_array() - - # create result array with type given by FPTR data - cdef shape_type_c result_shape = (x1.size, N) - cdef utils.dpnp_descriptor result = utils.create_output_descriptor(result_shape, - kernel_data.return_type, - None, - device=x1_obj.sycl_device, - usm_type=x1_obj.usm_type, - sycl_queue=x1_obj.sycl_queue) - - result_sycl_queue = result.get_array().sycl_queue - - cdef c_dpctl.SyclQueue q = result_sycl_queue - cdef c_dpctl.DPCTLSyclQueueRef q_ref = q.get_queue_ref() - - cdef ftpr_custom_vander_1in_1out_t func = kernel_data.ptr - cdef c_dpctl.DPCTLSyclEventRef event_ref = func(q_ref, - x1.get_data(), - result.get_data(), - x1.size, - N, - increasing, - NULL) # dep_events_ref - - with nogil: c_dpctl.DPCTLEvent_WaitAndThrow(event_ref) - c_dpctl.DPCTLEvent_Delete(event_ref) - - return result diff --git a/dpnp/dpnp_array.py b/dpnp/dpnp_array.py index f6bd27a99a2a..2d359b4a6253 100644 --- a/dpnp/dpnp_array.py +++ b/dpnp/dpnp_array.py @@ -1039,8 +1039,6 @@ def prod( return dpnp.prod(self, axis, dtype, out, keepdims, initial, where) - # 'ptp' - def put(self, indices, vals, /, *, axis=None, mode="wrap"): """ Puts values of an array into another array along a given axis. diff --git a/dpnp/dpnp_container.py b/dpnp/dpnp_container.py index b36b64a5113f..fac883a775b5 100644 --- a/dpnp/dpnp_container.py +++ b/dpnp/dpnp_container.py @@ -218,6 +218,7 @@ def full( ): """Validate input parameters before passing them into `dpctl.tensor` module""" dpu.validate_usm_type(usm_type, allow_none=True) + sycl_queue_normalized = dpnp.get_normalized_queue_device( fill_value, sycl_queue=sycl_queue, device=device ) diff --git a/dpnp/dpnp_iface_arraycreation.py b/dpnp/dpnp_iface_arraycreation.py index 0ed1187cb1d8..edcfe7ab3fc7 100644 --- a/dpnp/dpnp_iface_arraycreation.py +++ b/dpnp/dpnp_iface_arraycreation.py @@ -1,5 +1,3 @@ -# cython: language_level=3 -# distutils: language = c++ # -*- coding: utf-8 -*- # ***************************************************************************** # Copyright (c) 2016-2023, Intel Corporation @@ -86,7 +84,6 @@ "ogrid", "ones", "ones_like", - "ptp", "trace", "tri", "tril", @@ -582,12 +579,30 @@ def copy(a, order="K", subok=False): return array(a, order=order, subok=subok, copy=True) -def diag(x1, k=0): +def diag(v, /, k=0, *, device=None, usm_type=None, sycl_queue=None): """ Extract a diagonal or construct a diagonal array. For full documentation refer to :obj:`numpy.diag`. + Returns + ------- + out : dpnp.ndarray + The extracted diagonal or constructed diagonal array. + + Limitations + ----------- + Parameter `k` is only supported as integer data type. + Otherwise ``TypeError`` exception will be raised. + + See Also + -------- + :obj:`diagonal` : Return specified diagonals. + :obj:`diagflat` : Create a 2-D array with the flattened input as a diagonal. + :obj:`trace` : Return sum along diagonals. + :obj:`triu` : Return upper triangle of an array. + :obj:`tril` : Return lower triangle of an array. + Examples -------- >>> import dpnp as np @@ -611,50 +626,92 @@ def diag(x1, k=0): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) - if x1_desc: - if not isinstance(k, int): - pass - elif x1_desc.ndim != 1 and x1_desc.ndim != 2: - pass - else: - return dpnp_diag(x1_desc, k).get_pyobj() + if not isinstance(k, int): + raise TypeError("An integer is required, but got {}".format(type(k))) + else: + v = dpnp.asarray( + v, device=device, usm_type=usm_type, sycl_queue=sycl_queue + ) - return call_origin(numpy.diag, x1, k) + init0 = max(0, -k) + init1 = max(0, k) + if v.ndim == 1: + size = v.shape[0] + abs(k) + m = dpnp.zeros( + (size, size), + dtype=v.dtype, + usm_type=v.usm_type, + sycl_queue=v.sycl_queue, + ) + for i in range(v.shape[0]): + m[(init0 + i), init1 + i] = v[i] + return m + elif v.ndim == 2: + size = min(v.shape[0], v.shape[0] + k, v.shape[1], v.shape[1] - k) + if size < 0: + size = 0 + m = dpnp.zeros( + (size,), + dtype=v.dtype, + usm_type=v.usm_type, + sycl_queue=v.sycl_queue, + ) + for i in range(size): + m[i] = v[(init0 + i), init1 + i] + return m + else: + raise ValueError("Input must be a 1-D or 2-D array.") -def diagflat(x1, k=0): +def diagflat(v, /, k=0, *, device=None, usm_type=None, sycl_queue=None): """ Create a two-dimensional array with the flattened input as a diagonal. For full documentation refer to :obj:`numpy.diagflat`. + Returns + ------- + out : dpnp.ndarray + The 2-D output array. + + See Also + -------- + :obj:`diag` : Return the extracted diagonal or constructed diagonal array. + :obj:`diagonal` : Return specified diagonals. + :obj:`trace` : Return sum along diagonals. + + Limitations + ----------- + Parameter `k` is only supported as integer data type. + Otherwise ``TypeError`` exception will be raised. + Examples -------- >>> import dpnp as np - >>> np.diagflat([[1,2], [3,4]]) + >>> x = np.array([[1,2], [3,4]]) + >>> np.diagflat(x) array([[1, 0, 0, 0], [0, 2, 0, 0], [0, 0, 3, 0], [0, 0, 0, 4]]) - >>> np.diagflat([1,2], 1) - array([[0, 1, 0], - [0, 0, 2], - [0, 0, 0]]) + >>> np.diagflat(x, 1) + array([[0, 1, 0, 0, 0], + [0, 0, 2, 0, 0], + [0, 0, 0, 3, 0], + [0, 0, 0, 0, 4], + [0, 0, 0, 0, 0]]) """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) - if x1_desc: - input_ravel = dpnp.ravel(x1) - input_ravel_desc = dpnp.get_dpnp_descriptor( - input_ravel, copy_when_nondefault_queue=False + if not isinstance(k, int): + raise TypeError("An integer is required, but got {}".format(type(k))) + else: + v = dpnp.asarray( + v, device=device, usm_type=usm_type, sycl_queue=sycl_queue ) - - return dpnp_diag(input_ravel_desc, k).get_pyobj() - - return call_origin(numpy.diagflat, x1, k) + v = dpnp.ravel(v) + return dpnp.diag(v, k, usm_type=v.usm_type, sycl_queue=v.sycl_queue) def empty( @@ -778,12 +835,12 @@ def empty_like( def eye( N, - M=None, /, - *, + M=None, k=0, dtype=None, order="C", + *, like=None, device=None, usm_type="device", @@ -800,6 +857,18 @@ def eye( Parameter `like` is supported only with default value ``None``. Otherwise the function will be executed sequentially on CPU. + Examples + -------- + >>> import dpnp as np + >>> np.eye(2, dtype=int) + array([[1, 0], + [0, 1]]) + + >>> np.eye(3, k=1) + array([[0., 1., 0.], + [0., 0., 1.], + [0., 0., 0.]]) + """ if order not in ("C", "c", "F", "f", None): pass @@ -941,6 +1010,7 @@ def full( [10, 10, 10, 10] """ + if like is not None: pass elif order not in ("C", "c", "F", "f", None): @@ -1095,7 +1165,14 @@ def geomspace( def identity( - n, dtype=None, *, device=None, usm_type="device", sycl_queue=None, like=None + n, + /, + dtype=None, + *, + like=None, + device=None, + usm_type="device", + sycl_queue=None, ): """ Return the identity array. @@ -1552,36 +1629,6 @@ def ones_like( return call_origin(numpy.ones_like, x1, dtype, order, subok, shape) -def ptp(arr, axis=None, out=None, keepdims=numpy._NoValue): - """ - Range of values (maximum - minimum) along an axis. - - For full documentation refer to :obj:`numpy.ptp`. - - Limitations - ----------- - Input array is supported as :obj:`dpnp.ndarray`. - Parameters `out` and `keepdims` are supported only with default values. - - """ - arr_desc = dpnp.get_dpnp_descriptor(arr, copy_when_nondefault_queue=False) - if not arr_desc: - pass - elif axis is not None and not isinstance(axis, int): - pass - elif out is not None: - pass - elif keepdims is not numpy._NoValue: - pass - else: - result_obj = dpnp_ptp(arr_desc, axis=axis).get_pyobj() - result = dpnp.convert_single_elem_array_to_scalar(result_obj) - - return result - - return call_origin(numpy.ptp, arr, axis, out, keepdims) - - def trace(x1, offset=0, axis1=0, axis2=1, dtype=None, out=None): """ Return the sum along diagonals of the array. @@ -1616,9 +1663,11 @@ def trace(x1, offset=0, axis1=0, axis2=1, dtype=None, out=None): def tri( N, + /, M=None, k=0, dtype=dpnp.float, + *, device=None, usm_type="device", sycl_queue=None, @@ -1636,7 +1685,7 @@ def tri( Limitations ----------- - Parameter `M`, `N`, and `k` are only supported as integer data type. + Parameter `M`, `N`, and `k` are only supported as integer data type and when they are positive. Keyword argument `kwargs` is currently unsupported. Otherwise the function will be executed sequentially on CPU. @@ -1781,12 +1830,31 @@ def triu(x1, /, *, k=0): return call_origin(numpy.triu, x1, k) -def vander(x1, N=None, increasing=False): +def vander( + x1, + /, + N=None, + increasing=False, + *, + device=None, + usm_type=None, + sycl_queue=None, +): """ Generate a Vandermonde matrix. For full documentation refer to :obj:`numpy.vander`. + Returns + ------- + out : dpnp.ndarray + Vandermonde matrix. + + Limitations + ----------- + Parameter `N`, if it is not ``None``, is only supported as integer data type. + Otherwise ``TypeError`` exception will be raised. + Examples -------- >>> import dpnp as np @@ -1797,12 +1865,14 @@ def vander(x1, N=None, increasing=False): [ 4, 2, 1], [ 9, 3, 1], [25, 5, 1]]) + >>> x = np.array([1, 2, 3, 5]) >>> np.vander(x) array([[ 1, 1, 1, 1], [ 8, 4, 2, 1], [ 27, 9, 3, 1], [125, 25, 5, 1]]) + >>> np.vander(x, increasing=True) array([[ 1, 1, 1, 1], [ 1, 2, 4, 8], @@ -1810,17 +1880,33 @@ def vander(x1, N=None, increasing=False): [ 1, 5, 25, 125]]) """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) - if x1_desc: - if x1.ndim != 1: - pass - else: - if N is None: - N = x1.size + x1 = dpnp.asarray( + x1, device=device, usm_type=usm_type, sycl_queue=sycl_queue + ) + + if N is not None and not isinstance(N, int): + raise TypeError("An integer is required, but got {}".format(type(N))) + elif x1.ndim != 1: + raise ValueError("x1 must be a one-dimensional array or sequence.") + else: + if N is None: + N = x1.size - return dpnp_vander(x1_desc, N, increasing).get_pyobj() + _dtype = int if x1.dtype == bool else x1.dtype + m = empty( + (x1.size, N), + dtype=_dtype, + usm_type=x1.usm_type, + sycl_queue=x1.sycl_queue, + ) + tmp = m[:, ::-1] if not increasing else m + dpnp.power( + x1.reshape(-1, 1), + dpnp.arange(N, dtype=_dtype, sycl_queue=x1.sycl_queue), + out=tmp, + ) - return call_origin(numpy.vander, x1, N=N, increasing=increasing) + return m def zeros( diff --git a/dpnp/dpnp_iface_statistics.py b/dpnp/dpnp_iface_statistics.py index 653b323c9e1d..cc6f848ae9f5 100644 --- a/dpnp/dpnp_iface_statistics.py +++ b/dpnp/dpnp_iface_statistics.py @@ -60,6 +60,7 @@ "mean", "median", "min", + "ptp", "nanvar", "std", "var", @@ -692,6 +693,49 @@ def min(a, axis=None, out=None, keepdims=False, initial=None, where=True): return out +def ptp( + a, + /, + axis=None, + out=None, + keepdims=False, +): + """ + Range of values (maximum - minimum) along an axis. + + For full documentation refer to :obj:`numpy.ptp`. + + Returns + ------- + ptp : dpnp.ndarray + The range of a given array. + + Limitations + ----------- + Input array is supported as :class:`dpnp.dpnp_array` or :class:`dpctl.tensor.usm_ndarray`. + + Examples + -------- + >>> import dpnp as np + >>> x = np.array([[4, 9, 2, 10],[6, 9, 7, 12]]) + >>> np.ptp(x, axis=1) + array([8, 6]) + + >>> np.ptp(x, axis=0) + array([2, 0, 5, 2]) + + >>> np.ptp(x) + array(10) + + """ + + return dpnp.subtract( + dpnp.max(a, axis=axis, keepdims=keepdims, out=out), + dpnp.min(a, axis=axis, keepdims=keepdims), + out=out, + ) + + def nanvar(x1, axis=None, dtype=None, out=None, ddof=0, keepdims=False): """ Compute the variance along the specified axis, while ignoring NaNs. diff --git a/tests/skipped_tests.tbl b/tests/skipped_tests.tbl index 3b9a6a95de9f..e20654e877a4 100644 --- a/tests/skipped_tests.tbl +++ b/tests/skipped_tests.tbl @@ -118,18 +118,6 @@ tests/third_party/cupy/core_tests/test_ndarray_copy_and_view.py::TestArrayFlatte tests/third_party/cupy/core_tests/test_ndarray_copy_and_view.py::TestArrayFlatten::test_flatten_order_copied tests/third_party/cupy/core_tests/test_ndarray_copy_and_view.py::TestArrayFlatten::test_flatten_order_transposed -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestArrayReduction::test_ptp_all -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestArrayReduction::test_ptp_all_keepdims -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestArrayReduction::test_ptp_axis0 -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestArrayReduction::test_ptp_axis1 -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestArrayReduction::test_ptp_axis2 -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestArrayReduction::test_ptp_axis_large -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestArrayReduction::test_ptp_multiple_axes -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestArrayReduction::test_ptp_multiple_axes_keepdims -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestArrayReduction::test_ptp_nan -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestArrayReduction::test_ptp_nan_imag -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestArrayReduction::test_ptp_nan_real - tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_0_{order='C', shape=(10,)}::test_cub_max tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_0_{order='C', shape=(10,)}::test_cub_min tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_1_{order='C', shape=(10, 20)}::test_cub_max @@ -167,11 +155,6 @@ tests/third_party/cupy/creation_tests/test_basic.py::TestBasic::test_ones_like_s tests/third_party/cupy/creation_tests/test_basic.py::TestBasic::test_zeros_like_subok tests/third_party/cupy/creation_tests/test_basic.py::TestBasic::test_zeros_strides -tests/third_party/cupy/creation_tests/test_matrix.py::TestMatrix::test_diag_construction_from_list -tests/third_party/cupy/creation_tests/test_matrix.py::TestMatrix::test_diag_construction_from_tuple -tests/third_party/cupy/creation_tests/test_matrix.py::TestMatrix::test_diag_extraction_from_nested_list -tests/third_party/cupy/creation_tests/test_matrix.py::TestMatrix::test_diag_extraction_from_nested_tuple - tests/third_party/cupy/creation_tests/test_ranges.py::TestMeshgrid_param_0_{copy=False, indexing='xy', sparse=False}::test_meshgrid0 tests/third_party/cupy/creation_tests/test_ranges.py::TestMeshgrid_param_0_{copy=False, indexing='xy', sparse=False}::test_meshgrid1 tests/third_party/cupy/creation_tests/test_ranges.py::TestMeshgrid_param_0_{copy=False, indexing='xy', sparse=False}::test_meshgrid2 diff --git a/tests/skipped_tests_gpu.tbl b/tests/skipped_tests_gpu.tbl index c7820b56af1a..cb35f0643343 100644 --- a/tests/skipped_tests_gpu.tbl +++ b/tests/skipped_tests_gpu.tbl @@ -109,12 +109,6 @@ tests/third_party/cupy/core_tests/test_ndarray_conversion.py::TestNdarrayToBytes tests/third_party/cupy/core_tests/test_ndarray_conversion.py::TestNdarrayToBytes_param_3_{order='C', shape=(2, 3)}::test_item tests/third_party/cupy/core_tests/test_ndarray_conversion.py::TestNdarrayToBytes_param_4_{order='F', shape=(2, 3)}::test_item -tests/third_party/cupy/creation_tests/test_matrix.py::TestMatrix::test_diag_construction -tests/third_party/cupy/creation_tests/test_matrix.py::TestMatrix::test_diag_construction_from_list -tests/third_party/cupy/creation_tests/test_matrix.py::TestMatrix::test_diag_construction_from_tuple -tests/third_party/cupy/creation_tests/test_matrix.py::TestMatrix::test_diag_extraction_from_nested_list -tests/third_party/cupy/creation_tests/test_matrix.py::TestMatrix::test_diag_extraction_from_nested_tuple - tests/third_party/cupy/indexing_tests/test_insert.py::TestFillDiagonal_param_4_{shape=(3, 3), val=(2,), wrap=True}::test_1darray tests/third_party/cupy/indexing_tests/test_insert.py::TestFillDiagonal_param_4_{shape=(3, 3), val=(2,), wrap=True}::test_fill_diagonal tests/third_party/cupy/indexing_tests/test_insert.py::TestFillDiagonal_param_5_{shape=(3, 3), val=(2,), wrap=False}::test_1darray @@ -200,17 +194,7 @@ tests/third_party/cupy/core_tests/test_ndarray_copy_and_view.py::TestArrayFlatte tests/third_party/cupy/core_tests/test_ndarray_copy_and_view.py::TestArrayFlatten::test_flatten_order_copied tests/third_party/cupy/core_tests/test_ndarray_copy_and_view.py::TestArrayFlatten::test_flatten_order_transposed -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestArrayReduction::test_ptp_all -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestArrayReduction::test_ptp_all_keepdims -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestArrayReduction::test_ptp_axis0 -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestArrayReduction::test_ptp_axis1 -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestArrayReduction::test_ptp_axis2 -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestArrayReduction::test_ptp_axis_large -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestArrayReduction::test_ptp_multiple_axes -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestArrayReduction::test_ptp_multiple_axes_keepdims -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestArrayReduction::test_ptp_nan -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestArrayReduction::test_ptp_nan_imag -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestArrayReduction::test_ptp_nan_real + tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_0_{order='C', shape=(10,)}::test_cub_max tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_0_{order='C', shape=(10,)}::test_cub_min diff --git a/tests/test_arraycreation.py b/tests/test_arraycreation.py index 7c674d265dad..779e62237a08 100644 --- a/tests/test_arraycreation.py +++ b/tests/test_arraycreation.py @@ -91,6 +91,7 @@ def test_arange(start, stop, step, dtype): assert_array_equal(exp_array, res_array) +@pytest.mark.parametrize("func", ["diag", "diagflat"]) @pytest.mark.parametrize( "k", [-6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6], @@ -117,11 +118,40 @@ def test_arange(start, stop, step, dtype): "[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]", ], ) -def test_diag(v, k): +def test_diag_diagflat(func, v, k): a = numpy.array(v) ia = dpnp.array(a) - expected = numpy.diag(a, k) - result = dpnp.diag(ia, k) + expected = getattr(numpy, func)(a, k) + result = getattr(dpnp, func)(ia, k) + assert_array_equal(expected, result) + + +@pytest.mark.parametrize("func", ["diag", "diagflat"]) +def test_diag_diagflat_raise_error(func): + ia = dpnp.array([0, 1, 2, 3, 4]) + with pytest.raises(TypeError): + getattr(dpnp, func)(ia, k=2.0) + + +@pytest.mark.parametrize("func", ["diag", "diagflat"]) +@pytest.mark.parametrize( + "seq", + [ + [0, 1, 2, 3, 4], + (0, 1, 2, 3, 4), + [[0, 1, 2], [3, 4, 5], [6, 7, 8]], + [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], + ], + ids=[ + "[0, 1, 2, 3, 4]", + "(0, 1, 2, 3, 4)", + "[[0, 1, 2], [3, 4, 5], [6, 7, 8]]", + "[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]", + ], +) +def test_diag_diagflat_seq(func, seq): + expected = getattr(numpy, func)(seq) + result = getattr(dpnp, func)(seq) assert_array_equal(expected, result) @@ -426,12 +456,36 @@ def test_triu_size_null(k): @pytest.mark.parametrize("n", [0, 1, 4, None], ids=["0", "1", "4", "None"]) @pytest.mark.parametrize("increase", [True, False], ids=["True", "False"]) def test_vander(array, dtype, n, increase): - create_array = lambda xp: xp.array(array, dtype=dtype) + if dtype in [dpnp.complex64, dpnp.complex128] and array == [0, 3, 5]: + pytest.skip( + "per array API dpnp.power(complex(0,0)), 0) returns nan+nanj while NumPy returns 1+0j" + ) vander_func = lambda xp, x: xp.vander(x, N=n, increasing=increase) a_np = numpy.array(array, dtype=dtype) a_dpnp = dpnp.array(array, dtype=dtype) - assert_array_equal(vander_func(numpy, a_np), vander_func(dpnp, a_dpnp)) + + assert_allclose(vander_func(numpy, a_np), vander_func(dpnp, a_dpnp)) + + +def test_vander_raise_error(): + a = dpnp.array([1, 2, 3, 4]) + with pytest.raises(TypeError): + dpnp.vander(a, N=1.0) + + a = dpnp.array([[1, 2], [3, 4]]) + with pytest.raises(ValueError): + dpnp.vander(a) + + +@pytest.mark.parametrize( + "sequence", + [[1, 2, 3, 4], (1, 2, 3, 4)], + ids=["[1, 2, 3, 4]", "(1, 2, 3, 4)"], +) +def test_vander_seq(sequence): + vander_func = lambda xp, x: xp.vander(x) + assert_allclose(vander_func(numpy, sequence), vander_func(dpnp, sequence)) @pytest.mark.parametrize( diff --git a/tests/test_statistics.py b/tests/test_statistics.py index 2894f24a37bb..fdfea361e6f5 100644 --- a/tests/test_statistics.py +++ b/tests/test_statistics.py @@ -1,7 +1,10 @@ import dpctl.tensor as dpt import numpy import pytest -from numpy.testing import assert_allclose +from numpy.testing import ( + assert_allclose, + assert_array_equal, +) import dpnp @@ -190,3 +193,64 @@ def test_cov_1D_rowvar(dtype): a = dpnp.array([[0, 1, 2]], dtype=dtype) b = numpy.array([[0, 1, 2]], dtype=dtype) assert_allclose(numpy.cov(b, rowvar=False), dpnp.cov(a, rowvar=False)) + + +@pytest.mark.parametrize( + "axis", + [None, 0, 1], + ids=["None", "0", "1"], +) +@pytest.mark.parametrize( + "v", + [ + [[0, 0], [0, 0]], + [[1, 2], [1, 2]], + [[1, 2], [3, 4]], + [[0, 1, 2], [3, 4, 5], [6, 7, 8]], + [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], + ], + ids=[ + "[[0, 0], [0, 0]]", + "[[1, 2], [1, 2]]", + "[[1, 2], [3, 4]]", + "[[0, 1, 2], [3, 4, 5], [6, 7, 8]]", + "[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]", + ], +) +def test_ptp(v, axis): + a = numpy.array(v) + ia = dpnp.array(a) + expected = numpy.ptp(a, axis) + result = dpnp.ptp(ia, axis) + assert_array_equal(expected, result) + + +@pytest.mark.parametrize( + "axis", + [None, 0, 1], + ids=["None", "0", "1"], +) +@pytest.mark.parametrize( + "v", + [ + [[0, 0], [0, 0]], + [[1, 2], [1, 2]], + [[1, 2], [3, 4]], + [[0, 1, 2], [3, 4, 5], [6, 7, 8]], + [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], + ], + ids=[ + "[[0, 0], [0, 0]]", + "[[1, 2], [1, 2]]", + "[[1, 2], [3, 4]]", + "[[0, 1, 2], [3, 4, 5], [6, 7, 8]]", + "[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]", + ], +) +def test_ptp_out(v, axis): + a = numpy.array(v) + ia = dpnp.array(a) + expected = numpy.ptp(a, axis) + result = dpnp.array(numpy.empty_like(expected)) + dpnp.ptp(ia, axis, out=result) + assert_array_equal(expected, result) diff --git a/tests/test_sycl_queue.py b/tests/test_sycl_queue.py index 2a4a814b6f74..083616cbf85b 100644 --- a/tests/test_sycl_queue.py +++ b/tests/test_sycl_queue.py @@ -141,6 +141,7 @@ def test_empty_like(device_x, device_y): @pytest.mark.parametrize( "func, args, kwargs", [ + pytest.param("diag", ["x0"], {}), pytest.param("full_like", ["x0"], {"fill_value": 5}), pytest.param("geomspace", ["x0[0:3]", "8", "4"], {}), pytest.param("geomspace", ["1", "x0[2:4]", "4"], {}), @@ -151,6 +152,9 @@ def test_empty_like(device_x, device_y): pytest.param("ones_like", ["x0"], {}), pytest.param("tril", ["x0.reshape((2,2))"], {}), pytest.param("triu", ["x0.reshape((2,2))"], {}), + pytest.param("linspace", ["x0", "4", "4"], {}), + pytest.param("linspace", ["1", "x0", "4"], {}), + pytest.param("vander", ["x0"], {}), pytest.param("zeros_like", ["x0"], {}), ], ) @@ -192,15 +196,43 @@ def test_array_creation_follow_device_logspace_base(device): assert_sycl_queue_equal(y.sycl_queue, x.sycl_queue) +@pytest.mark.parametrize( + "func, args, kwargs", + [ + pytest.param("diag", ["x0"], {}), + pytest.param("diagflat", ["x0"], {}), + ], +) +@pytest.mark.parametrize( + "device", + valid_devices, + ids=[device.filter_string for device in valid_devices], +) +def test_array_creation_follow_device_2d_array(func, args, kwargs, device): + x_orig = numpy.arange(9).reshape(3, 3) + numpy_args = [eval(val, {"x0": x_orig}) for val in args] + y_orig = getattr(numpy, func)(*numpy_args, **kwargs) + + x = dpnp.arange(9, device=device).reshape(3, 3) + dpnp_args = [eval(val, {"x0": x}) for val in args] + + y = getattr(dpnp, func)(*dpnp_args, **kwargs) + assert_allclose(y_orig, y) + assert_sycl_queue_equal(y.sycl_queue, x.sycl_queue) + + @pytest.mark.skip("muted until the issue reported by SAT-5969 is resolved") @pytest.mark.parametrize( "func, args, kwargs", [ + pytest.param("diag", ["x0"], {}), + pytest.param("full", ["10", "x0[3]"], {}), pytest.param("full_like", ["x0"], {"fill_value": 5}), pytest.param("ones_like", ["x0"], {}), pytest.param("zeros_like", ["x0"], {}), pytest.param("linspace", ["x0", "4", "4"], {}), pytest.param("linspace", ["1", "x0", "4"], {}), + pytest.param("vander", ["x0"], {}), ], ) @pytest.mark.parametrize( @@ -214,7 +246,7 @@ def test_array_creation_follow_device_logspace_base(device): ids=[device.filter_string for device in valid_devices], ) def test_array_creation_cross_device(func, args, kwargs, device_x, device_y): - if func is "linspace" and is_win_platform(): + if func == "linspace" and is_win_platform(): pytest.skip("CPU driver experiences an instability on Windows.") x_orig = numpy.array([1, 2, 3, 4]) @@ -225,8 +257,52 @@ def test_array_creation_cross_device(func, args, kwargs, device_x, device_y): dpnp_args = [eval(val, {"x0": x}) for val in args] dpnp_kwargs = dict(kwargs) + y = getattr(dpnp, func)(*dpnp_args, **dpnp_kwargs) + assert_sycl_queue_equal(y.sycl_queue, x.sycl_queue) + dpnp_kwargs["device"] = device_y + y = getattr(dpnp, func)(*dpnp_args, **dpnp_kwargs) + assert_allclose(y_orig, y) + + assert_sycl_queue_equal(y.sycl_queue, x.to_device(device_y).sycl_queue) + +@pytest.mark.skip("muted until the issue reported by SAT-5969 is resolved") +@pytest.mark.parametrize( + "func, args, kwargs", + [ + pytest.param("diag", ["x0"], {}), + pytest.param("diagflat", ["x0"], {}), + ], +) +@pytest.mark.parametrize( + "device_x", + valid_devices, + ids=[device.filter_string for device in valid_devices], +) +@pytest.mark.parametrize( + "device_y", + valid_devices, + ids=[device.filter_string for device in valid_devices], +) +def test_array_creation_cross_device_2d_array( + func, args, kwargs, device_x, device_y +): + if func == "linspace" and is_win_platform(): + pytest.skip("CPU driver experiences an instability on Windows.") + + x_orig = numpy.arange(9).reshape(3, 3) + numpy_args = [eval(val, {"x0": x_orig}) for val in args] + y_orig = getattr(numpy, func)(*numpy_args, **kwargs) + + x = dpnp.arange(9, device=device_x).reshape(3, 3) + dpnp_args = [eval(val, {"x0": x}) for val in args] + + dpnp_kwargs = dict(kwargs) + y = getattr(dpnp, func)(*dpnp_args, **dpnp_kwargs) + assert_sycl_queue_equal(y.sycl_queue, x.sycl_queue) + + dpnp_kwargs["device"] = device_y y = getattr(dpnp, func)(*dpnp_args, **dpnp_kwargs) assert_allclose(y_orig, y) @@ -295,6 +371,7 @@ def test_meshgrid(device_x, device_y): pytest.param("negative", [1.0, 0.0, -1.0]), pytest.param("positive", [1.0, 0.0, -1.0]), pytest.param("prod", [1.0, 2.0]), + pytest.param("ptp", [1.0, 2.0, 4.0, 7.0]), pytest.param( "real", [complex(1.0, 2.0), complex(3.0, 4.0), complex(5.0, 6.0)] ), diff --git a/tests/test_usm_type.py b/tests/test_usm_type.py index 99a39acae887..fd26d3e1c054 100644 --- a/tests/test_usm_type.py +++ b/tests/test_usm_type.py @@ -140,6 +140,7 @@ def test_coerced_usm_types_power(usm_type_x, usm_type_y): @pytest.mark.parametrize( "func, args", [ + pytest.param("diag", ["x0"]), pytest.param("empty_like", ["x0"]), pytest.param("full", ["10", "x0[3]"]), pytest.param("full_like", ["x0", "4"]), @@ -150,12 +151,13 @@ def test_coerced_usm_types_power(usm_type_x, usm_type_y): pytest.param("logspace", ["x0[0:2]", "8", "4"]), pytest.param("logspace", ["0", "x0[3:5]", "4"]), pytest.param("ones_like", ["x0"]), + pytest.param("vander", ["x0"]), pytest.param("zeros_like", ["x0"]), ], ) @pytest.mark.parametrize("usm_type_x", list_of_usm_types, ids=list_of_usm_types) @pytest.mark.parametrize("usm_type_y", list_of_usm_types, ids=list_of_usm_types) -def test_array_creation_from_an_array(func, args, usm_type_x, usm_type_y): +def test_array_creation_from_1d_array(func, args, usm_type_x, usm_type_y): x0 = dp.full(10, 3, usm_type=usm_type_x) new_args = [eval(val, {"x0": x0}) for val in args] @@ -166,6 +168,26 @@ def test_array_creation_from_an_array(func, args, usm_type_x, usm_type_y): assert y.usm_type == usm_type_y +@pytest.mark.parametrize( + "func, args", + [ + pytest.param("diag", ["x0"]), + pytest.param("diagflat", ["x0"]), + ], +) +@pytest.mark.parametrize("usm_type_x", list_of_usm_types, ids=list_of_usm_types) +@pytest.mark.parametrize("usm_type_y", list_of_usm_types, ids=list_of_usm_types) +def test_array_creation_from_2d_array(func, args, usm_type_x, usm_type_y): + x0 = dp.arange(25, usm_type=usm_type_x).reshape(5, 5) + new_args = [eval(val, {"x0": x0}) for val in args] + + x = getattr(dp, func)(*new_args) + y = getattr(dp, func)(*new_args, usm_type=usm_type_y) + + assert x.usm_type == usm_type_x + assert y.usm_type == usm_type_y + + @pytest.mark.parametrize( "func, arg, kwargs", [ @@ -374,6 +396,7 @@ def test_meshgrid(usm_type_x, usm_type_y): pytest.param("positive", [1.0, 0.0, -1.0]), pytest.param("prod", [1.0, 2.0]), pytest.param("proj", [complex(1.0, 2.0), complex(dp.inf, -1.0)]), + pytest.param("ptp", [1.0, 2.0, 4.0, 7.0]), pytest.param( "real", [complex(1.0, 2.0), complex(3.0, 4.0), complex(5.0, 6.0)] ), diff --git a/tests/third_party/cupy/core_tests/test_ndarray_reduction.py b/tests/third_party/cupy/core_tests/test_ndarray_reduction.py index 952398575f1d..f22864bfef5b 100644 --- a/tests/third_party/cupy/core_tests/test_ndarray_reduction.py +++ b/tests/third_party/cupy/core_tests/test_ndarray_reduction.py @@ -9,210 +9,222 @@ from tests.third_party.cupy import testing -@pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.gpu +@testing.parameterize( + *testing.product( + { + "order": ("C", "F"), + } + ) +) class TestArrayReduction(unittest.TestCase): @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_max_all(self, xp, dtype): - a = testing.shaped_random((2, 3), xp, dtype) + a = testing.shaped_random((2, 3), xp, dtype, order=self.order) return a.max() @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_max_all_keepdims(self, xp, dtype): - a = testing.shaped_random((2, 3), xp, dtype) + a = testing.shaped_random((2, 3), xp, dtype, order=self.order) return a.max(keepdims=True) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_max_axis_large(self, xp, dtype): - a = testing.shaped_random((3, 1000), xp, dtype) + a = testing.shaped_random((3, 1000), xp, dtype, order=self.order) return a.max(axis=0) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_max_axis0(self, xp, dtype): - a = testing.shaped_random((2, 3, 4), xp, dtype) + a = testing.shaped_random((2, 3, 4), xp, dtype, order=self.order) return a.max(axis=0) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_max_axis1(self, xp, dtype): - a = testing.shaped_random((2, 3, 4), xp, dtype) + a = testing.shaped_random((2, 3, 4), xp, dtype, order=self.order) return a.max(axis=1) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_max_axis2(self, xp, dtype): - a = testing.shaped_random((2, 3, 4), xp, dtype) + a = testing.shaped_random((2, 3, 4), xp, dtype, order=self.order) return a.max(axis=2) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_max_multiple_axes(self, xp, dtype): - a = testing.shaped_random((2, 3, 4), xp, dtype) + a = testing.shaped_random((2, 3, 4), xp, dtype, order=self.order) return a.max(axis=(1, 2)) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_max_multiple_axes_keepdims(self, xp, dtype): - a = testing.shaped_random((2, 3, 4), xp, dtype) + a = testing.shaped_random((2, 3, 4), xp, dtype, order=self.order) return a.max(axis=(1, 2), keepdims=True) @testing.for_float_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_max_nan(self, xp, dtype): - a = xp.array([float("nan"), 1, -1], dtype) + a = xp.array([float("nan"), 1, -1], dtype, order=self.order) return a.max() @testing.for_complex_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_max_nan_real(self, xp, dtype): - a = xp.array([float("nan"), 1, -1], dtype) + a = xp.array([float("nan"), 1, -1], dtype, order=self.order) return a.max() @testing.for_complex_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_max_nan_imag(self, xp, dtype): - a = xp.array([float("nan") * 1.0j, 1.0j, -1.0j], dtype) + a = xp.array( + [float("nan") * 1.0j, 1.0j, -1.0j], dtype, order=self.order + ) return a.max() @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_min_all(self, xp, dtype): - a = testing.shaped_random((2, 3), xp, dtype) + a = testing.shaped_random((2, 3), xp, dtype, order=self.order) return a.min() @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_min_all_keepdims(self, xp, dtype): - a = testing.shaped_random((2, 3), xp, dtype) + a = testing.shaped_random((2, 3), xp, dtype, order=self.order) return a.min(keepdims=True) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_min_axis_large(self, xp, dtype): - a = testing.shaped_random((3, 1000), xp, dtype) + a = testing.shaped_random((3, 1000), xp, dtype, order=self.order) return a.min(axis=0) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_min_axis0(self, xp, dtype): - a = testing.shaped_random((2, 3, 4), xp, dtype) + a = testing.shaped_random((2, 3, 4), xp, dtype, order=self.order) return a.min(axis=0) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_min_axis1(self, xp, dtype): - a = testing.shaped_random((2, 3, 4), xp, dtype) + a = testing.shaped_random((2, 3, 4), xp, dtype, order=self.order) return a.min(axis=1) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_min_axis2(self, xp, dtype): - a = testing.shaped_random((2, 3, 4), xp, dtype) + a = testing.shaped_random((2, 3, 4), xp, dtype, order=self.order) return a.min(axis=2) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_min_multiple_axes(self, xp, dtype): - a = testing.shaped_random((2, 3, 4), xp, dtype) + a = testing.shaped_random((2, 3, 4), xp, dtype, order=self.order) return a.min(axis=(1, 2)) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_min_multiple_axes_keepdims(self, xp, dtype): - a = testing.shaped_random((2, 3, 4), xp, dtype) + a = testing.shaped_random((2, 3, 4), xp, dtype, order=self.order) return a.min(axis=(1, 2), keepdims=True) @testing.for_float_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_min_nan(self, xp, dtype): - a = xp.array([float("nan"), 1, -1], dtype) + a = xp.array([float("nan"), 1, -1], dtype, order=self.order) return a.min() @testing.for_complex_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_min_nan_real(self, xp, dtype): - a = xp.array([float("nan"), 1, -1], dtype) + a = xp.array([float("nan"), 1, -1], dtype, order=self.order) return a.min() @testing.for_complex_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_min_nan_imag(self, xp, dtype): - a = xp.array([float("nan") * 1.0j, 1.0j, -1.0j], dtype) + a = xp.array( + [float("nan") * 1.0j, 1.0j, -1.0j], dtype, order=self.order + ) return a.min() # skip bool: numpy's ptp raises a TypeError on bool inputs @testing.for_all_dtypes(no_bool=True) - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_ptp_all(self, xp, dtype): - a = testing.shaped_random((2, 3), xp, dtype) - return a.ptp() + a = testing.shaped_random((2, 3), xp, dtype, order=self.order) + return xp.ptp(a) @testing.with_requires("numpy>=1.15") @testing.for_all_dtypes(no_bool=True) - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_ptp_all_keepdims(self, xp, dtype): - a = testing.shaped_random((2, 3), xp, dtype) - return a.ptp(keepdims=True) + a = testing.shaped_random((2, 3), xp, dtype, order=self.order) + return xp.ptp(a, keepdims=True) @testing.for_all_dtypes(no_bool=True) - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_ptp_axis_large(self, xp, dtype): - a = testing.shaped_random((3, 1000), xp, dtype) - return a.ptp(axis=0) + a = testing.shaped_random((3, 1000), xp, dtype, order=self.order) + return xp.ptp(a, axis=0) @testing.for_all_dtypes(no_bool=True) - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_ptp_axis0(self, xp, dtype): - a = testing.shaped_random((2, 3, 4), xp, dtype) - return a.ptp(axis=0) + a = testing.shaped_random((2, 3, 4), xp, dtype, order=self.order) + return xp.ptp(a, axis=0) @testing.for_all_dtypes(no_bool=True) - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_ptp_axis1(self, xp, dtype): - a = testing.shaped_random((2, 3, 4), xp, dtype) - return a.ptp(axis=1) + a = testing.shaped_random((2, 3, 4), xp, dtype, order=self.order) + return xp.ptp(a, axis=1) @testing.for_all_dtypes(no_bool=True) - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_ptp_axis2(self, xp, dtype): - a = testing.shaped_random((2, 3, 4), xp, dtype) - return a.ptp(axis=2) + a = testing.shaped_random((2, 3, 4), xp, dtype, order=self.order) + return xp.ptp(a, axis=2) @testing.with_requires("numpy>=1.15") @testing.for_all_dtypes(no_bool=True) - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_ptp_multiple_axes(self, xp, dtype): - a = testing.shaped_random((2, 3, 4), xp, dtype) - return a.ptp(axis=(1, 2)) + a = testing.shaped_random((2, 3, 4), xp, dtype, order=self.order) + return xp.ptp(a, axis=(1, 2)) @testing.with_requires("numpy>=1.15") @testing.for_all_dtypes(no_bool=True) - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_ptp_multiple_axes_keepdims(self, xp, dtype): - a = testing.shaped_random((2, 3, 4), xp, dtype) - return a.ptp(axis=(1, 2), keepdims=True) + a = testing.shaped_random((2, 3, 4), xp, dtype, order=self.order) + return xp.ptp(a, axis=(1, 2), keepdims=True) @testing.for_float_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_ptp_nan(self, xp, dtype): - a = xp.array([float("nan"), 1, -1], dtype) - return a.ptp() + a = xp.array([float("nan"), 1, -1], dtype, order=self.order) + return xp.ptp(a) @testing.for_complex_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_ptp_nan_real(self, xp, dtype): - a = xp.array([float("nan"), 1, -1], dtype) - return a.ptp() + a = xp.array([float("nan"), 1, -1], dtype, order=self.order) + return xp.ptp(a) @testing.for_complex_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(contiguous_check=False) def test_ptp_nan_imag(self, xp, dtype): - a = xp.array([float("nan") * 1.0j, 1.0j, -1.0j], dtype) - return a.ptp() + a = xp.array( + [float("nan") * 1.0j, 1.0j, -1.0j], dtype, order=self.order + ) + return xp.ptp(a) @testing.parameterize( diff --git a/tests/third_party/cupy/creation_tests/test_matrix.py b/tests/third_party/cupy/creation_tests/test_matrix.py index 7123989d7c57..2308ecee00cb 100644 --- a/tests/third_party/cupy/creation_tests/test_matrix.py +++ b/tests/third_party/cupy/creation_tests/test_matrix.py @@ -59,19 +59,16 @@ def test_diag_construction_from_tuple(self, xp): self.assertIsInstance(r, xp.ndarray) return r - @pytest.mark.usefixtures("allow_fall_back_on_numpy") def test_diag_scaler(self): for xp in (numpy, cupy): with pytest.raises(ValueError): xp.diag(1) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") def test_diag_0dim(self): for xp in (numpy, cupy): with pytest.raises(ValueError): xp.diag(xp.zeros(())) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") def test_diag_3dim(self): for xp in (numpy, cupy): with pytest.raises(ValueError): @@ -92,17 +89,14 @@ def test_diagflat3(self, xp): a = testing.shaped_arange((3, 3), xp) return xp.diagflat(a, -2) - @pytest.mark.skip("Scalar input is not supported") @testing.numpy_cupy_array_equal() def test_diagflat_from_scalar(self, xp): return xp.diagflat(3) - @pytest.mark.skip("Scalar input is not supported") @testing.numpy_cupy_array_equal() def test_diagflat_from_scalar_with_k0(self, xp): return xp.diagflat(3, 0) - @pytest.mark.skip("Scalar input is not supported") @testing.numpy_cupy_array_equal() def test_diagflat_from_scalar_with_k1(self, xp): return xp.diagflat(3, 1) @@ -183,3 +177,41 @@ def test_triu_nega(self, xp, dtype): def test_triu_posi(self, xp, dtype): m = testing.shaped_arange(self.shape, xp, dtype) return xp.triu(m, k=1) + + +@testing.parameterize( + *testing.product({"N": [None, 0, 1, 2, 3], "increasing": [False, True]}) +) +class TestVander(unittest.TestCase): + @testing.for_all_dtypes(no_bool=True) + @testing.numpy_cupy_allclose(type_check=False) + def test_vander(self, xp, dtype): + a = testing.shaped_arange((3,), xp, dtype=dtype) + return xp.vander(a, N=self.N, increasing=self.increasing) + + @testing.numpy_cupy_allclose() + def test_vander_array_like_list(self, xp): + a = [0, 1, 2, 3, 4, 5, 6] + return xp.vander(a, N=self.N, increasing=self.increasing) + + @testing.numpy_cupy_allclose() + def test_vander_array_like_tuple(self, xp): + a = (0, 1, 2, 3, 4, 5, 6) + return xp.vander(a, N=self.N, increasing=self.increasing) + + def test_vander_scalar(self): + for xp in (numpy, cupy): + with pytest.raises(ValueError): + xp.vander(1, N=self.N, increasing=self.increasing) + + def test_vander_0dim(self): + for xp in (numpy, cupy): + a = xp.zeros(()) + with pytest.raises(ValueError): + xp.vander(a, N=self.N, increasing=self.increasing) + + def test_vander_2dim(self): + for xp in (numpy, cupy): + m = xp.zeros((2, 2)) + with pytest.raises(ValueError): + xp.vander(m, N=self.N, increasing=self.increasing) From 525116a9c056bf797455a6ecac932f5692160299 Mon Sep 17 00:00:00 2001 From: Natalia Polina Date: Thu, 23 Nov 2023 05:33:42 -0800 Subject: [PATCH 19/38] Require dpcpp compiler 2024.0 and runtime >=2024.0 (#1625) * Require dpcpp compiler 2024.0 and runtime >=2024.0 * fix build error * Pinned compiler and dpctl versions * Rolled back to intel channel for conda build * Power is properly working for complex types with latest dpctl * Keep backward compatibility with dpctl 0.15.0 * Install dpctl version compatible with DPC++ 2023.2 while collecting coverage --------- Co-authored-by: Anton Volkov Co-authored-by: Anton <100830759+antonwolfy@users.noreply.github.com> --- .github/workflows/build-sphinx.yml | 2 +- .github/workflows/generate_coverage.yaml | 4 ++-- conda-recipe/meta.yaml | 11 +++++++---- tests/test_mathematical.py | 12 ++++++------ 4 files changed, 16 insertions(+), 13 deletions(-) diff --git a/.github/workflows/build-sphinx.yml b/.github/workflows/build-sphinx.yml index 8dc54185f0ae..90efdc044c9e 100644 --- a/.github/workflows/build-sphinx.yml +++ b/.github/workflows/build-sphinx.yml @@ -91,7 +91,7 @@ jobs: - name: Install dpnp dependencies run: | - conda install numpy"<1.24" dpctl">=0.15.0" mkl-devel-dpcpp onedpl-devel tbb-devel dpcpp_linux-64 \ + conda install numpy"<1.24" dpctl">=0.15.1dev2" mkl-devel-dpcpp onedpl-devel tbb-devel dpcpp_linux-64 \ cmake cython pytest ninja scikit-build sysroot_linux-64">=2.28" ${{ env.CHANNELS }} - name: Install cuPy dependencies diff --git a/.github/workflows/generate_coverage.yaml b/.github/workflows/generate_coverage.yaml index f52d2dcb6e10..9d6b2e511f4b 100644 --- a/.github/workflows/generate_coverage.yaml +++ b/.github/workflows/generate_coverage.yaml @@ -15,7 +15,7 @@ jobs: env: python-ver: '3.10' - CHANNELS: '-c dppy/label/dev -c intel -c conda-forge --override-channels' + CHANNELS: '-c dppy/label/coverage -c intel -c conda-forge --override-channels' steps: - name: Cancel Previous Runs @@ -44,7 +44,7 @@ jobs: run: | # use DPC++ compiler 2023.2 to work around an issue with crash conda install cython llvm cmake">=3.21" scikit-build ninja pytest pytest-cov coverage[toml] \ - dpctl dpcpp_linux-64"=2023.2" sysroot_linux-64">=2.28" mkl-devel-dpcpp tbb-devel"=2021.10" \ + dpctl">=0.15.1dev2" dpcpp_linux-64"=2023.2" sysroot_linux-64">=2.28" mkl-devel-dpcpp tbb-devel"=2021.10" \ onedpl-devel ${{ env.CHANNELS }} - name: Conda info diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index dd77870dee56..874f02bbae30 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -1,3 +1,6 @@ +{% set required_compiler_and_mkl_version = "2024.0" %} +{% set required_dpctl_version = "0.15.1dev2" %} + package: name: dpnp version: {{ GIT_DESCRIBE_TAG }} @@ -13,19 +16,19 @@ requirements: - cmake >=3.21 - ninja - git - - dpctl >=0.15.0 - - mkl-devel-dpcpp {{ environ.get('MKL_VER', '>=2023.2.0') }} + - dpctl >={{ required_dpctl_version }} + - mkl-devel-dpcpp >={{ required_compiler_and_mkl_version }} - onedpl-devel - tbb-devel - wheel - scikit-build build: - {{ compiler('cxx') }} - - {{ compiler('dpcpp') }} >=2023.2.0 # [not osx] + - {{ compiler('dpcpp') }} >={{ required_compiler_and_mkl_version }} # [not osx] - sysroot_linux-64 >=2.28 # [linux] run: - python - - dpctl >=0.15.0 + - {{ pin_compatible('dpctl', min_pin='x.x.x', max_pin=None) }} - {{ pin_compatible('dpcpp-cpp-rt', min_pin='x.x', max_pin='x') }} - {{ pin_compatible('mkl-dpcpp', min_pin='x.x', max_pin='x') }} - {{ pin_compatible('numpy', min_pin='x.x', max_pin='x') }} diff --git a/tests/test_mathematical.py b/tests/test_mathematical.py index 89a09a7dc294..b0c8caff3ad9 100644 --- a/tests/test_mathematical.py +++ b/tests/test_mathematical.py @@ -1876,13 +1876,13 @@ def test_complex_values(self): dp_arr = dpnp.array(np_arr) func = lambda x: x**2 - # Linux: ((inf + 0j) ** 2) == (Inf + NaNj) in dpnp and == (NaN + NaNj) in numpy - # Win: ((inf + 0j) ** 2) == (Inf + 0j) in dpnp and == (Inf + NaNj) in numpy + # TODO: unmute the test once it's available if is_win_platform(): - assert_equal(func(dp_arr)[5], numpy.inf) - else: - assert_equal(func(dp_arr)[5], (numpy.inf + 0j) * 1) - assert_allclose(func(np_arr)[:5], func(dp_arr).asnumpy()[:5], rtol=1e-6) + pytest.skip( + "Until the latest dpctl is available on internal channel" + ) + + assert_dtype_allclose(func(dp_arr), func(np_arr)) @pytest.mark.parametrize("val", [0, 1], ids=["0", "1"]) @pytest.mark.parametrize("dtype", [dpnp.int32, dpnp.int64]) From 7d0815f8aad228cf99d14e53e261840cc93eb2ea Mon Sep 17 00:00:00 2001 From: vtavana <120411540+vtavana@users.noreply.github.com> Date: Thu, 23 Nov 2023 12:53:06 -0600 Subject: [PATCH 20/38] implement dpnp.argmin and dpnp.argmax using dpctl.tensor (#1610) * rework implementation of diag, diagflat, vander, and ptp * address comments - first round cherry-pick * address comments - second round * add tests for negative use cases to improve covergae * fixed missing merge conflicts * fix pre-commit * implement dpnp.argmin and dpnp.argmax using dpctl.tensor * address comments * add tests for negative use cases to improve coverage * remove unneccessary parts with updates in dpctl #1465 * add paramater section in doc * update ndarray.argmin and ndarray.argmax function signature * use a utility func for returning output * add tests for ndarray implementation * Place new function acc to lexicographical order --------- Co-authored-by: Anton Volkov Co-authored-by: Anton <100830759+antonwolfy@users.noreply.github.com> --- dpnp/backend/include/dpnp_iface_fptr.hpp | 4 - dpnp/backend/kernels/dpnp_krnl_searching.cpp | 50 ------ dpnp/dpnp_algo/CMakeLists.txt | 1 - dpnp/dpnp_algo/dpnp_algo.pxd | 10 -- dpnp/dpnp_algo/dpnp_algo.pyx | 1 - dpnp/dpnp_algo/dpnp_algo_searching.pxi | 119 -------------- dpnp/dpnp_array.py | 47 +----- dpnp/dpnp_iface.py | 46 ++++++ dpnp/dpnp_iface_mathematical.py | 21 +-- dpnp/dpnp_iface_searching.py | 152 ++++++++++++------ dpnp/dpnp_iface_statistics.py | 83 ++-------- tests/skipped_tests.tbl | 2 - tests/skipped_tests_gpu.tbl | 2 - tests/test_search.py | 76 +++++++++ tests/test_sycl_queue.py | 2 + tests/test_usm_type.py | 2 + .../cupy/core_tests/test_ndarray_reduction.py | 102 +++++++++++- .../cupy/sorting_tests/test_search.py | 18 +-- 18 files changed, 346 insertions(+), 392 deletions(-) delete mode 100644 dpnp/dpnp_algo/dpnp_algo_searching.pxi create mode 100644 tests/test_search.py diff --git a/dpnp/backend/include/dpnp_iface_fptr.hpp b/dpnp/backend/include/dpnp_iface_fptr.hpp index e7cdc0d6cea0..40f161cd9262 100644 --- a/dpnp/backend/include/dpnp_iface_fptr.hpp +++ b/dpnp/backend/include/dpnp_iface_fptr.hpp @@ -76,11 +76,7 @@ enum class DPNPFuncName : size_t DPNP_FN_ARCTAN2, /**< Used in numpy.arctan2() impl */ DPNP_FN_ARCTANH, /**< Used in numpy.arctanh() impl */ DPNP_FN_ARGMAX, /**< Used in numpy.argmax() impl */ - DPNP_FN_ARGMAX_EXT, /**< Used in numpy.argmax() impl, requires extra - parameters */ DPNP_FN_ARGMIN, /**< Used in numpy.argmin() impl */ - DPNP_FN_ARGMIN_EXT, /**< Used in numpy.argmin() impl, requires extra - parameters */ DPNP_FN_ARGSORT, /**< Used in numpy.argsort() impl */ DPNP_FN_ARGSORT_EXT, /**< Used in numpy.argsort() impl, requires extra parameters */ diff --git a/dpnp/backend/kernels/dpnp_krnl_searching.cpp b/dpnp/backend/kernels/dpnp_krnl_searching.cpp index ae08e9c4bf53..045d405056c5 100644 --- a/dpnp/backend/kernels/dpnp_krnl_searching.cpp +++ b/dpnp/backend/kernels/dpnp_krnl_searching.cpp @@ -78,14 +78,6 @@ void (*dpnp_argmax_default_c)(void *, void *, size_t) = dpnp_argmax_c<_DataType, _idx_DataType>; -template -DPCTLSyclEventRef (*dpnp_argmax_ext_c)(DPCTLSyclQueueRef, - void *, - void *, - size_t, - const DPCTLEventVectorRef) = - dpnp_argmax_c<_DataType, _idx_DataType>; - template class dpnp_argmin_c_kernel; @@ -133,14 +125,6 @@ void (*dpnp_argmin_default_c)(void *, void *, size_t) = dpnp_argmin_c<_DataType, _idx_DataType>; -template -DPCTLSyclEventRef (*dpnp_argmin_ext_c)(DPCTLSyclQueueRef, - void *, - void *, - size_t, - const DPCTLEventVectorRef) = - dpnp_argmin_c<_DataType, _idx_DataType>; - void func_map_init_searching(func_map_t &fmap) { fmap[DPNPFuncName::DPNP_FN_ARGMAX][eft_INT][eft_INT] = { @@ -160,23 +144,6 @@ void func_map_init_searching(func_map_t &fmap) fmap[DPNPFuncName::DPNP_FN_ARGMAX][eft_DBL][eft_LNG] = { eft_LNG, (void *)dpnp_argmax_default_c}; - fmap[DPNPFuncName::DPNP_FN_ARGMAX_EXT][eft_INT][eft_INT] = { - eft_INT, (void *)dpnp_argmax_ext_c}; - fmap[DPNPFuncName::DPNP_FN_ARGMAX_EXT][eft_INT][eft_LNG] = { - eft_LNG, (void *)dpnp_argmax_ext_c}; - fmap[DPNPFuncName::DPNP_FN_ARGMAX_EXT][eft_LNG][eft_INT] = { - eft_INT, (void *)dpnp_argmax_ext_c}; - fmap[DPNPFuncName::DPNP_FN_ARGMAX_EXT][eft_LNG][eft_LNG] = { - eft_LNG, (void *)dpnp_argmax_ext_c}; - fmap[DPNPFuncName::DPNP_FN_ARGMAX_EXT][eft_FLT][eft_INT] = { - eft_INT, (void *)dpnp_argmax_ext_c}; - fmap[DPNPFuncName::DPNP_FN_ARGMAX_EXT][eft_FLT][eft_LNG] = { - eft_LNG, (void *)dpnp_argmax_ext_c}; - fmap[DPNPFuncName::DPNP_FN_ARGMAX_EXT][eft_DBL][eft_INT] = { - eft_INT, (void *)dpnp_argmax_ext_c}; - fmap[DPNPFuncName::DPNP_FN_ARGMAX_EXT][eft_DBL][eft_LNG] = { - eft_LNG, (void *)dpnp_argmax_ext_c}; - fmap[DPNPFuncName::DPNP_FN_ARGMIN][eft_INT][eft_INT] = { eft_INT, (void *)dpnp_argmin_default_c}; fmap[DPNPFuncName::DPNP_FN_ARGMIN][eft_INT][eft_LNG] = { @@ -194,22 +161,5 @@ void func_map_init_searching(func_map_t &fmap) fmap[DPNPFuncName::DPNP_FN_ARGMIN][eft_DBL][eft_LNG] = { eft_LNG, (void *)dpnp_argmin_default_c}; - fmap[DPNPFuncName::DPNP_FN_ARGMIN_EXT][eft_INT][eft_INT] = { - eft_INT, (void *)dpnp_argmin_ext_c}; - fmap[DPNPFuncName::DPNP_FN_ARGMIN_EXT][eft_INT][eft_LNG] = { - eft_LNG, (void *)dpnp_argmin_ext_c}; - fmap[DPNPFuncName::DPNP_FN_ARGMIN_EXT][eft_LNG][eft_INT] = { - eft_INT, (void *)dpnp_argmin_ext_c}; - fmap[DPNPFuncName::DPNP_FN_ARGMIN_EXT][eft_LNG][eft_LNG] = { - eft_LNG, (void *)dpnp_argmin_ext_c}; - fmap[DPNPFuncName::DPNP_FN_ARGMIN_EXT][eft_FLT][eft_INT] = { - eft_INT, (void *)dpnp_argmin_ext_c}; - fmap[DPNPFuncName::DPNP_FN_ARGMIN_EXT][eft_FLT][eft_LNG] = { - eft_LNG, (void *)dpnp_argmin_ext_c}; - fmap[DPNPFuncName::DPNP_FN_ARGMIN_EXT][eft_DBL][eft_INT] = { - eft_INT, (void *)dpnp_argmin_ext_c}; - fmap[DPNPFuncName::DPNP_FN_ARGMIN_EXT][eft_DBL][eft_LNG] = { - eft_LNG, (void *)dpnp_argmin_ext_c}; - return; } diff --git a/dpnp/dpnp_algo/CMakeLists.txt b/dpnp/dpnp_algo/CMakeLists.txt index e077ee85e157..442fa8e82b16 100644 --- a/dpnp/dpnp_algo/CMakeLists.txt +++ b/dpnp/dpnp_algo/CMakeLists.txt @@ -6,7 +6,6 @@ set(dpnp_algo_pyx_deps ${CMAKE_CURRENT_SOURCE_DIR}/dpnp_algo_sorting.pxi ${CMAKE_CURRENT_SOURCE_DIR}/dpnp_algo_arraycreation.pxi ${CMAKE_CURRENT_SOURCE_DIR}/dpnp_algo_mathematical.pxi - ${CMAKE_CURRENT_SOURCE_DIR}/dpnp_algo_searching.pxi ${CMAKE_CURRENT_SOURCE_DIR}/dpnp_algo_indexing.pxi ${CMAKE_CURRENT_SOURCE_DIR}/dpnp_algo_logic.pxi ${CMAKE_CURRENT_SOURCE_DIR}/dpnp_algo_special.pxi diff --git a/dpnp/dpnp_algo/dpnp_algo.pxd b/dpnp/dpnp_algo/dpnp_algo.pxd index d6d25fef1b95..2ec000ad573f 100644 --- a/dpnp/dpnp_algo/dpnp_algo.pxd +++ b/dpnp/dpnp_algo/dpnp_algo.pxd @@ -36,10 +36,6 @@ cdef extern from "dpnp_iface_fptr.hpp" namespace "DPNPFuncName": # need this na DPNP_FN_ALLCLOSE DPNP_FN_ALLCLOSE_EXT DPNP_FN_ARANGE - DPNP_FN_ARGMAX - DPNP_FN_ARGMAX_EXT - DPNP_FN_ARGMIN - DPNP_FN_ARGMIN_EXT DPNP_FN_ARGSORT DPNP_FN_ARGSORT_EXT DPNP_FN_CBRT @@ -355,12 +351,6 @@ Sorting functions cpdef dpnp_descriptor dpnp_argsort(dpnp_descriptor array1) cpdef dpnp_descriptor dpnp_sort(dpnp_descriptor array1) -""" -Searching functions -""" -cpdef dpnp_descriptor dpnp_argmax(dpnp_descriptor array1) -cpdef dpnp_descriptor dpnp_argmin(dpnp_descriptor array1) - """ Trigonometric functions """ diff --git a/dpnp/dpnp_algo/dpnp_algo.pyx b/dpnp/dpnp_algo/dpnp_algo.pyx index 74a8b8e89c57..257c502bfa0c 100644 --- a/dpnp/dpnp_algo/dpnp_algo.pyx +++ b/dpnp/dpnp_algo/dpnp_algo.pyx @@ -63,7 +63,6 @@ include "dpnp_algo_indexing.pxi" include "dpnp_algo_linearalgebra.pxi" include "dpnp_algo_logic.pxi" include "dpnp_algo_mathematical.pxi" -include "dpnp_algo_searching.pxi" include "dpnp_algo_sorting.pxi" include "dpnp_algo_special.pxi" include "dpnp_algo_statistics.pxi" diff --git a/dpnp/dpnp_algo/dpnp_algo_searching.pxi b/dpnp/dpnp_algo/dpnp_algo_searching.pxi deleted file mode 100644 index a84c918f3c23..000000000000 --- a/dpnp/dpnp_algo/dpnp_algo_searching.pxi +++ /dev/null @@ -1,119 +0,0 @@ -# cython: language_level=3 -# cython: linetrace=True -# -*- coding: utf-8 -*- -# ***************************************************************************** -# Copyright (c) 2016-2023, Intel Corporation -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# - Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# - Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF -# THE POSSIBILITY OF SUCH DAMAGE. -# ***************************************************************************** - -"""Module Backend (Searching part) - -This module contains interface functions between C backend layer -and the rest of the library - -""" - -# NO IMPORTs here. All imports must be placed into main "dpnp_algo.pyx" file - -__all__ += [ - "dpnp_argmax", - "dpnp_argmin" -] - - -# C function pointer to the C library template functions -ctypedef c_dpctl.DPCTLSyclEventRef(*custom_search_1in_1out_func_ptr_t)(c_dpctl.DPCTLSyclQueueRef, - void * , void * , size_t, - const c_dpctl.DPCTLEventVectorRef) - - -cpdef utils.dpnp_descriptor dpnp_argmax(utils.dpnp_descriptor in_array1): - cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(in_array1.dtype) - cdef DPNPFuncType output_type = dpnp_dtype_to_DPNPFuncType(dpnp.int64) - - cdef DPNPFuncData kernel_data = get_dpnp_function_ptr(DPNP_FN_ARGMAX_EXT, param1_type, output_type) - - in_array1_obj = in_array1.get_array() - - # create result array with type given by FPTR data - cdef shape_type_c result_shape = (1,) - cdef utils.dpnp_descriptor result = utils.create_output_descriptor(result_shape, - kernel_data.return_type, - None, - device=in_array1_obj.sycl_device, - usm_type=in_array1_obj.usm_type, - sycl_queue=in_array1_obj.sycl_queue) - - result_sycl_queue = result.get_array().sycl_queue - - cdef c_dpctl.SyclQueue q = result_sycl_queue - cdef c_dpctl.DPCTLSyclQueueRef q_ref = q.get_queue_ref() - - cdef custom_search_1in_1out_func_ptr_t func = kernel_data.ptr - - cdef c_dpctl.DPCTLSyclEventRef event_ref = func(q_ref, - in_array1.get_data(), - result.get_data(), - in_array1.size, - NULL) # dep_events_ref - - with nogil: c_dpctl.DPCTLEvent_WaitAndThrow(event_ref) - c_dpctl.DPCTLEvent_Delete(event_ref) - - return result - - -cpdef utils.dpnp_descriptor dpnp_argmin(utils.dpnp_descriptor in_array1): - cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(in_array1.dtype) - cdef DPNPFuncType output_type = dpnp_dtype_to_DPNPFuncType(dpnp.int64) - - cdef DPNPFuncData kernel_data = get_dpnp_function_ptr(DPNP_FN_ARGMIN_EXT, param1_type, output_type) - - in_array1_obj = in_array1.get_array() - - # create result array with type given by FPTR data - cdef shape_type_c result_shape = (1,) - cdef utils.dpnp_descriptor result = utils.create_output_descriptor(result_shape, - kernel_data.return_type, - None, - device=in_array1_obj.sycl_device, - usm_type=in_array1_obj.usm_type, - sycl_queue=in_array1_obj.sycl_queue) - - result_sycl_queue = result.get_array().sycl_queue - - cdef c_dpctl.SyclQueue q = result_sycl_queue - cdef c_dpctl.DPCTLSyclQueueRef q_ref = q.get_queue_ref() - - cdef custom_search_1in_1out_func_ptr_t func = kernel_data.ptr - - cdef c_dpctl.DPCTLSyclEventRef event_ref = func(q_ref, - in_array1.get_data(), - result.get_data(), - in_array1.size, - NULL) # dep_events_ref - - with nogil: c_dpctl.DPCTLEvent_WaitAndThrow(event_ref) - c_dpctl.DPCTLEvent_Delete(event_ref) - - return result diff --git a/dpnp/dpnp_array.py b/dpnp/dpnp_array.py index 2d359b4a6253..88db5d695f9d 100644 --- a/dpnp/dpnp_array.py +++ b/dpnp/dpnp_array.py @@ -486,58 +486,23 @@ def any(self, axis=None, out=None, keepdims=False, *, where=True): self, axis=axis, out=out, keepdims=keepdims, where=where ) - def argmax(self, axis=None, out=None): + def argmax(self, axis=None, out=None, *, keepdims=False): """ Returns array of indices of the maximum values along the given axis. - Parameters - ---------- - axis : {None, integer} - If None, the index is into the flattened array, otherwise along - the specified axis - out : {None, array}, optional - Array into which the result can be placed. Its type is preserved - and it must be of the right shape to hold the output. - - Returns - ------- - index_array : {integer_array} - - Examples - -------- - >>> a = np.arange(6).reshape(2,3) - >>> a.argmax() - 5 - >>> a.argmax(0) - array([1, 1, 1]) - >>> a.argmax(1) - array([2, 2]) + Refer to :obj:`dpnp.argmax` for full documentation. """ - return dpnp.argmax(self, axis, out) + return dpnp.argmax(self, axis, out, keepdims=keepdims) - def argmin(self, axis=None, out=None): + def argmin(self, axis=None, out=None, *, keepdims=False): """ Return array of indices to the minimum values along the given axis. - Parameters - ---------- - axis : {None, integer} - If None, the index is into the flattened array, otherwise along - the specified axis - out : {None, array}, optional - Array into which the result can be placed. Its type is preserved - and it must be of the right shape to hold the output. - - Returns - ------- - ndarray or scalar - If multi-dimension input, returns a new ndarray of indices to the - minimum values along the given axis. Otherwise, returns a scalar - of index to the minimum values along the given axis. + Refer to :obj:`dpnp.argmin` for full documentation. """ - return dpnp.argmin(self, axis, out) + return dpnp.argmin(self, axis, out, keepdims=keepdims) # 'argpartition', diff --git a/dpnp/dpnp_iface.py b/dpnp/dpnp_iface.py index d6d5b3a48615..e91a9b991f89 100644 --- a/dpnp/dpnp_iface.py +++ b/dpnp/dpnp_iface.py @@ -65,6 +65,7 @@ "get_dpnp_descriptor", "get_include", "get_normalized_queue_device", + "get_result_array", "get_usm_ndarray", "get_usm_ndarray_or_scalar", "is_supported_array_or_scalar", @@ -418,6 +419,51 @@ def get_normalized_queue_device(obj=None, device=None, sycl_queue=None): ) +def get_result_array(a, out=None): + """ + If `out` is provided, value of `a` array will be copied into the + `out` array according to ``safe`` casting rule. + Otherwise, the input array `a` is returned. + + Parameters + ---------- + a : {dpnp_array} + Input array. + + out : {dpnp_array, usm_ndarray} + If provided, value of `a` array will be copied into it + according to ``safe`` casting rule. + It should be of the appropriate shape. + + Returns + ------- + out : {dpnp_array} + Return `out` if provided, otherwise return `a`. + + """ + + if out is None: + return a + else: + if out.shape != a.shape: + raise ValueError( + f"Output array of shape {a.shape} is needed, got {out.shape}." + ) + elif not isinstance(out, dpnp_array): + if isinstance(out, dpt.usm_ndarray): + out = dpnp_array._create_from_usm_ndarray(out) + else: + raise TypeError( + "Output array must be any of supported type, but got {}".format( + type(out) + ) + ) + + dpnp.copyto(out, a, casting="safe") + + return out + + def get_usm_ndarray(a): """ Return :class:`dpctl.tensor.usm_ndarray` from input array `a`. diff --git a/dpnp/dpnp_iface_mathematical.py b/dpnp/dpnp_iface_mathematical.py index 330179c2ca44..d9e0155ca118 100644 --- a/dpnp/dpnp_iface_mathematical.py +++ b/dpnp/dpnp_iface_mathematical.py @@ -2158,26 +2158,7 @@ def prod( dpt.prod(dpt_array, axis=axis, dtype=dtype, keepdims=keepdims) ) - if out is None: - return result - else: - if out.shape != result.shape: - raise ValueError( - f"Output array of shape {result.shape} is needed, got {out.shape}." - ) - elif not isinstance(out, dpnp_array): - if isinstance(out, dpt.usm_ndarray): - out = dpnp_array._create_from_usm_ndarray(out) - else: - raise TypeError( - "Output array must be any of supported type, but got {}".format( - type(out) - ) - ) - - dpnp.copyto(out, result, casting="safe") - - return out + return dpnp.get_result_array(result, out) def proj( diff --git a/dpnp/dpnp_iface_searching.py b/dpnp/dpnp_iface_searching.py index 26fc1528a0fa..e74c0c1beccf 100644 --- a/dpnp/dpnp_iface_searching.py +++ b/dpnp/dpnp_iface_searching.py @@ -1,5 +1,3 @@ -# cython: language_level=3 -# distutils: language = c++ # -*- coding: utf-8 -*- # ***************************************************************************** # Copyright (c) 2016-2023, Intel Corporation @@ -51,22 +49,38 @@ __all__ = ["argmax", "argmin", "searchsorted", "where"] -def argmax(x1, axis=None, out=None): +def argmax(a, axis=None, out=None, *, keepdims=False): """ Returns the indices of the maximum values along an axis. For full documentation refer to :obj:`numpy.argmax`. - Limitations - ----------- - Input array is supported as :obj:`dpnp.ndarray`. - Otherwise the function will be executed sequentially on CPU. - Parameter `axis` is supported only with default value ``None``. - Parameter `out` is supported only with default value ``None``. - Input array data types are limited by supported DPNP :ref:`Data types`. + Parameters + ---------- + a : {dpnp_array, usm_ndarray} + Input array. + axis : int, optional + By default, the index is into the flattened array, otherwise + along the specified axis. + out : {dpnp_array, usm_ndarray}, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + Returns + ------- + out : dpnp.ndarray + Indices of maximum value of `a`. It has the same shape as `a.shape` + with the dimension along `axis` removed. If `keepdims` is set to True, + then the size of `axis` will be 1 with the resulting array having same + shape as `a.shape`. See Also -------- + :obj:`dpnp.ndarray.argmax` : Equivalent function. :obj:`dpnp.argmin` : Returns the indices of the minimum values along an axis. :obj:`dpnp.amax` : The maximum value along a given axis. :obj:`dpnp.unravel_index` : Convert a flat index into an index tuple. @@ -82,46 +96,71 @@ def argmax(x1, axis=None, out=None): -------- >>> import dpnp as np >>> a = np.arange(6).reshape((2, 3)) + 10 - >>> a.shape - (2, 3) - >>> [i for i in a] - [10, 11, 12, 13, 14, 15] + >>> a + array([[10, 11, 12], + [13, 14, 15]]) >>> np.argmax(a) - 5 + array(5) - """ + >>> np.argmax(a, axis=0) + array([1, 1, 1]) + >>> np.argmax(a, axis=1) + array([2, 2]) + + >>> b = np.arange(6) + >>> b[1] = 5 + >>> b + array([0, 5, 2, 3, 4, 5]) + >>> np.argmax(b) # Only the first occurrence is returned. + array(1) - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) - if x1_desc: - if axis is not None: - pass - elif out is not None: - pass - else: - result_obj = dpnp_argmax(x1_desc).get_pyobj() - result = dpnp.convert_single_elem_array_to_scalar(result_obj) + >>> x = np.arange(24).reshape((2, 3, 4)) + >>> res = np.argmax(x, axis=1, keepdims=True) # Setting keepdims to True + >>> res.shape + (2, 1, 4) + + """ - return result + dpt_array = dpnp.get_usm_ndarray(a) + result = dpnp_array._create_from_usm_ndarray( + dpt.argmax(dpt_array, axis=axis, keepdims=keepdims) + ) - return call_origin(numpy.argmax, x1, axis, out) + return dpnp.get_result_array(result, out) -def argmin(x1, axis=None, out=None): +def argmin(a, axis=None, out=None, *, keepdims=False): """ Returns the indices of the minimum values along an axis. For full documentation refer to :obj:`numpy.argmin`. - Limitations - ----------- - Input array is supported as :obj:`dpnp.ndarray`. - Otherwise the function will be executed sequentially on CPU. - Parameter `axis` is supported only with default value ``None``. - Parameter `out` is supported only with default value ``None``. - Input array data types are limited by supported DPNP :ref:`Data types`. + Parameters + ---------- + a : {dpnp_array, usm_ndarray} + Input array. + axis : int, optional + By default, the index is into the flattened array, otherwise + along the specified axis. + out : {dpnp_array, usm_ndarray}, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + Returns + ------- + out : dpnp.ndarray + Indices of minimum value of `a`. It has the same shape as `a.shape` + with the dimension along `axis` removed. If `keepdims` is set to True, + then the size of `axis` will be 1 with the resulting array having same + shape as `a.shape`. See Also -------- + :obj:`dpnp.ndarray.argmin` : Equivalent function. :obj:`dpnp.argmax` : Returns the indices of the maximum values along an axis. :obj:`dpnp.amin` : The minimum value along a given axis. :obj:`dpnp.unravel_index` : Convert a flat index into an index tuple. @@ -137,28 +176,37 @@ def argmin(x1, axis=None, out=None): -------- >>> import dpnp as np >>> a = np.arange(6).reshape((2, 3)) + 10 - >>> a.shape - (2, 3) - >>> [i for i in a] - [10, 11, 12, 13, 14, 15] + >>> a + array([[10, 11, 12], + [13, 14, 15]]) >>> np.argmin(a) - 0 + array(0) - """ + >>> np.argmin(a, axis=0) + array([0, 0, 0]) + >>> np.argmin(a, axis=1) + array([0, 0]) + + >>> b = np.arange(6) + 10 + >>> b[4] = 10 + >>> b + array([10, 11, 12, 13, 10, 15]) + >>> np.argmin(b) # Only the first occurrence is returned. + array(0) - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) - if x1_desc: - if axis is not None: - pass - elif out is not None: - pass - else: - result_obj = dpnp_argmin(x1_desc).get_pyobj() - result = dpnp.convert_single_elem_array_to_scalar(result_obj) + >>> x = np.arange(24).reshape((2, 3, 4)) + >>> res = np.argmin(x, axis=1, keepdims=True) # Setting keepdims to True + >>> res.shape + (2, 1, 4) + + """ - return result + dpt_array = dpnp.get_usm_ndarray(a) + result = dpnp_array._create_from_usm_ndarray( + dpt.argmin(dpt_array, axis=axis, keepdims=keepdims) + ) - return call_origin(numpy.argmin, x1, axis, out) + return dpnp.get_result_array(result, out) def searchsorted(a, v, side="left", sorter=None): diff --git a/dpnp/dpnp_iface_statistics.py b/dpnp/dpnp_iface_statistics.py index cc6f848ae9f5..cc11aeede1a5 100644 --- a/dpnp/dpnp_iface_statistics.py +++ b/dpnp/dpnp_iface_statistics.py @@ -410,43 +410,11 @@ def max(a, axis=None, out=None, keepdims=False, initial=None, where=True): ) else: dpt_array = dpnp.get_usm_ndarray(a) - if dpt_array.size == 0: - # TODO: get rid of this if condition when dpctl supports it - axis = (axis,) if isinstance(axis, int) else axis - for i in range(a.ndim): - if a.shape[i] == 0: - if axis is None or i in axis: - raise ValueError( - "reduction does not support zero-size arrays" - ) - else: - indices = [i for i in range(a.ndim) if i not in axis] - res_shape = tuple([a.shape[i] for i in indices]) - result = dpnp.empty(res_shape, dtype=a.dtype) - else: - result = dpnp_array._create_from_usm_ndarray( - dpt.max(dpt_array, axis=axis, keepdims=keepdims) - ) - if out is None: - return result - else: - if out.shape != result.shape: - raise ValueError( - f"Output array of shape {result.shape} is needed, got {out.shape}." - ) - elif not isinstance(out, dpnp_array): - if isinstance(out, dpt.usm_ndarray): - out = dpnp_array._create_from_usm_ndarray(out) - else: - raise TypeError( - "Output array must be any of supported type, but got {}".format( - type(out) - ) - ) - - dpnp.copyto(out, result, casting="safe") + result = dpnp_array._create_from_usm_ndarray( + dpt.max(dpt_array, axis=axis, keepdims=keepdims) + ) - return out + return dpnp.get_result_array(result, out) def mean(x, /, *, axis=None, dtype=None, keepdims=False, out=None, where=True): @@ -651,46 +619,15 @@ def min(a, axis=None, out=None, keepdims=False, initial=None, where=True): ) elif where is not True: raise NotImplementedError( - "where keyword argument is only supported by its default values." + "where keyword argument is only supported by its default value." ) else: dpt_array = dpnp.get_usm_ndarray(a) - if dpt_array.size == 0: - # TODO: get rid of this if condition when dpctl supports it - for i in range(a.ndim): - if a.shape[i] == 0: - if axis is None or i in axis: - raise ValueError( - "reduction does not support zero-size arrays" - ) - else: - indices = [i for i in range(a.ndim) if i not in axis] - res_shape = tuple([a.shape[i] for i in indices]) - result = dpnp.empty(res_shape, dtype=a.dtype) - else: - result = dpnp_array._create_from_usm_ndarray( - dpt.min(dpt_array, axis=axis, keepdims=keepdims) - ) - if out is None: - return result - else: - if out.shape != result.shape: - raise ValueError( - f"Output array of shape {result.shape} is needed, got {out.shape}." - ) - elif not isinstance(out, dpnp_array): - if isinstance(out, dpt.usm_ndarray): - out = dpnp_array._create_from_usm_ndarray(out) - else: - raise TypeError( - "Output array must be any of supported type, but got {}".format( - type(out) - ) - ) - - dpnp.copyto(out, result, casting="safe") - - return out + result = dpnp_array._create_from_usm_ndarray( + dpt.min(dpt_array, axis=axis, keepdims=keepdims) + ) + + return dpnp.get_result_array(result, out) def ptp( diff --git a/tests/skipped_tests.tbl b/tests/skipped_tests.tbl index e20654e877a4..05acfa51b740 100644 --- a/tests/skipped_tests.tbl +++ b/tests/skipped_tests.tbl @@ -816,8 +816,6 @@ tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMin::test_nanargm tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMin::test_nanargmin_zero_size_axis1 tests/third_party/cupy/sorting_tests/test_search.py::TestNonzeroZeroDimension_param_0_{array=array(0)}::test_nonzero tests/third_party/cupy/sorting_tests/test_search.py::TestNonzeroZeroDimension_param_1_{array=array(1)}::test_nonzero -tests/third_party/cupy/sorting_tests/test_search.py::TestSearch::test_argmax_zero_size -tests/third_party/cupy/sorting_tests/test_search.py::TestSearch::test_argmin_zero_size tests/third_party/cupy/sorting_tests/test_sort.py::TestArgpartition_param_0_{external=False}::test_argpartition_axis tests/third_party/cupy/sorting_tests/test_sort.py::TestArgpartition_param_0_{external=False}::test_argpartition_invalid_axis1 tests/third_party/cupy/sorting_tests/test_sort.py::TestArgpartition_param_0_{external=False}::test_argpartition_invalid_axis2 diff --git a/tests/skipped_tests_gpu.tbl b/tests/skipped_tests_gpu.tbl index cb35f0643343..77c23e454175 100644 --- a/tests/skipped_tests_gpu.tbl +++ b/tests/skipped_tests_gpu.tbl @@ -877,8 +877,6 @@ tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMin::test_nanargm tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMin::test_nanargmin_zero_size_axis1 tests/third_party/cupy/sorting_tests/test_search.py::TestNonzeroZeroDimension_param_0_{array=array(0)}::test_nonzero tests/third_party/cupy/sorting_tests/test_search.py::TestNonzeroZeroDimension_param_1_{array=array(1)}::test_nonzero -tests/third_party/cupy/sorting_tests/test_search.py::TestSearch::test_argmax_zero_size -tests/third_party/cupy/sorting_tests/test_search.py::TestSearch::test_argmin_zero_size tests/third_party/cupy/sorting_tests/test_sort.py::TestArgpartition_param_0_{external=False}::test_argpartition_axis tests/third_party/cupy/sorting_tests/test_sort.py::TestArgpartition_param_0_{external=False}::test_argpartition_invalid_axis1 tests/third_party/cupy/sorting_tests/test_sort.py::TestArgpartition_param_0_{external=False}::test_argpartition_invalid_axis2 diff --git a/tests/test_search.py b/tests/test_search.py new file mode 100644 index 000000000000..aa5a2c9915c4 --- /dev/null +++ b/tests/test_search.py @@ -0,0 +1,76 @@ +import dpctl.tensor as dpt +import numpy +import pytest +from numpy.testing import assert_allclose + +import dpnp + +from .helper import get_all_dtypes + + +@pytest.mark.parametrize("func", ["argmax", "argmin"]) +@pytest.mark.parametrize("axis", [None, 0, 1, -1, 2, -2]) +@pytest.mark.parametrize("keepdims", [False, True]) +@pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) +def test_argmax_argmin(func, axis, keepdims, dtype): + a = numpy.arange(768, dtype=dtype).reshape((4, 4, 6, 8)) + ia = dpnp.array(a) + + np_res = getattr(numpy, func)(a, axis=axis, keepdims=keepdims) + dpnp_res = getattr(dpnp, func)(ia, axis=axis, keepdims=keepdims) + + assert dpnp_res.shape == np_res.shape + assert_allclose(dpnp_res, np_res) + + +@pytest.mark.parametrize("func", ["argmax", "argmin"]) +@pytest.mark.parametrize("axis", [None, 0, 1, -1]) +@pytest.mark.parametrize("keepdims", [False, True]) +def test_argmax_argmin_bool(func, axis, keepdims): + a = numpy.arange(2, dtype=dpnp.bool) + a = numpy.tile(a, (2, 2)) + ia = dpnp.array(a) + + np_res = getattr(numpy, func)(a, axis=axis, keepdims=keepdims) + dpnp_res = getattr(dpnp, func)(ia, axis=axis, keepdims=keepdims) + + assert dpnp_res.shape == np_res.shape + assert_allclose(dpnp_res, np_res) + + +@pytest.mark.parametrize("func", ["argmax", "argmin"]) +def test_argmax_argmin_out(func): + a = numpy.arange(6).reshape((2, 3)) + ia = dpnp.array(a) + + np_res = getattr(numpy, func)(a, axis=0) + dpnp_res = dpnp.array(numpy.empty_like(np_res)) + getattr(dpnp, func)(ia, axis=0, out=dpnp_res) + assert_allclose(dpnp_res, np_res) + + dpnp_res = dpt.asarray(numpy.empty_like(np_res)) + getattr(dpnp, func)(ia, axis=0, out=dpnp_res) + assert_allclose(dpnp_res, np_res) + + dpnp_res = numpy.empty_like(np_res) + with pytest.raises(TypeError): + getattr(dpnp, func)(ia, axis=0, out=dpnp_res) + + dpnp_res = dpnp.array(numpy.empty((2, 3))) + with pytest.raises(ValueError): + getattr(dpnp, func)(ia, axis=0, out=dpnp_res) + + +@pytest.mark.parametrize("axis", [None, 0, 1, -1]) +@pytest.mark.parametrize("keepdims", [False, True]) +def test_ndarray_argmax_argmin(axis, keepdims): + a = numpy.arange(192, dtype="f4").reshape((4, 6, 8)) + ia = dpnp.array(a) + + np_res = a.argmax(axis=axis, keepdims=keepdims) + dpnp_res = ia.argmax(axis=axis, keepdims=keepdims) + assert_allclose(dpnp_res, np_res) + + np_res = a.argmin(axis=axis, keepdims=keepdims) + dpnp_res = ia.argmin(axis=axis, keepdims=keepdims) + assert_allclose(dpnp_res, np_res) diff --git a/tests/test_sycl_queue.py b/tests/test_sycl_queue.py index 083616cbf85b..43d89a27cb8e 100644 --- a/tests/test_sycl_queue.py +++ b/tests/test_sycl_queue.py @@ -338,6 +338,8 @@ def test_meshgrid(device_x, device_y): pytest.param("arcsinh", [-5.0, -3.5, 0.0, 3.5, 5.0]), pytest.param("arctan", [-1.0, 0.0, 1.0]), pytest.param("arctanh", [-0.5, 0.0, 0.5]), + pytest.param("argmax", [1.0, 2.0, 4.0, 7.0]), + pytest.param("argmin", [1.0, 2.0, 4.0, 7.0]), pytest.param("ceil", [-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]), pytest.param("conjugate", [[1.0 + 1.0j, 0.0], [0.0, 1.0 + 1.0j]]), pytest.param("copy", [1.0, 2.0, 3.0]), diff --git a/tests/test_usm_type.py b/tests/test_usm_type.py index fd26d3e1c054..7d31cb7f25aa 100644 --- a/tests/test_usm_type.py +++ b/tests/test_usm_type.py @@ -372,6 +372,8 @@ def test_meshgrid(usm_type_x, usm_type_y): pytest.param("arcsinh", [-5.0, -3.5, 0.0, 3.5, 5.0]), pytest.param("arctan", [-1.0, 0.0, 1.0]), pytest.param("arctanh", [-0.5, 0.0, 0.5]), + pytest.param("argmax", [1.0, 2.0, 4.0, 7.0]), + pytest.param("argmin", [1.0, 2.0, 4.0, 7.0]), pytest.param("ceil", [-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]), pytest.param("conjugate", [[1.0 + 1.0j, 0.0], [0.0, 1.0 + 1.0j]]), pytest.param( diff --git a/tests/third_party/cupy/core_tests/test_ndarray_reduction.py b/tests/third_party/cupy/core_tests/test_ndarray_reduction.py index f22864bfef5b..70d654d6a694 100644 --- a/tests/third_party/cupy/core_tests/test_ndarray_reduction.py +++ b/tests/third_party/cupy/core_tests/test_ndarray_reduction.py @@ -226,6 +226,106 @@ def test_ptp_nan_imag(self, xp, dtype): ) return xp.ptp(a) + @testing.for_all_dtypes() + @testing.numpy_cupy_allclose(contiguous_check=False) + def test_argmax_all(self, xp, dtype): + a = testing.shaped_random((2, 3), xp, dtype, order=self.order) + return a.argmax() + + @testing.for_all_dtypes() + @testing.numpy_cupy_allclose(contiguous_check=False) + def test_argmax_axis_large(self, xp, dtype): + a = testing.shaped_random((3, 1000), xp, dtype, order=self.order) + return a.argmax(axis=0) + + @testing.for_all_dtypes() + @testing.numpy_cupy_allclose(contiguous_check=False) + def test_argmax_axis0(self, xp, dtype): + a = testing.shaped_random((2, 3, 4), xp, dtype, order=self.order) + return a.argmax(axis=0) + + @testing.for_all_dtypes() + @testing.numpy_cupy_allclose(contiguous_check=False) + def test_argmax_axis1(self, xp, dtype): + a = testing.shaped_random((2, 3, 4), xp, dtype, order=self.order) + return a.argmax(axis=1) + + @testing.for_all_dtypes() + @testing.numpy_cupy_allclose(contiguous_check=False) + def test_argmax_axis2(self, xp, dtype): + a = testing.shaped_random((2, 3, 4), xp, dtype, order=self.order) + return a.argmax(axis=2) + + @testing.for_float_dtypes() + @testing.numpy_cupy_allclose(contiguous_check=False) + def test_argmax_nan(self, xp, dtype): + a = xp.array([float("nan"), 1, -1], dtype, order=self.order) + return a.argmax() + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose(contiguous_check=False) + def test_argmax_nan_real(self, xp, dtype): + a = xp.array([float("nan"), 1, -1], dtype, order=self.order) + return a.argmax() + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose(contiguous_check=False) + def test_argmax_nan_imag(self, xp, dtype): + a = xp.array( + [float("nan") * 1.0j, 1.0j, -1.0j], dtype, order=self.order + ) + return a.argmax() + + @testing.for_all_dtypes() + @testing.numpy_cupy_allclose(contiguous_check=False) + def test_argmin_all(self, xp, dtype): + a = testing.shaped_random((2, 3), xp, dtype, order=self.order) + return a.argmin() + + @testing.for_all_dtypes() + @testing.numpy_cupy_allclose(contiguous_check=False) + def test_argmin_axis_large(self, xp, dtype): + a = testing.shaped_random((3, 1000), xp, dtype, order=self.order) + return a.argmin(axis=0) + + @testing.for_all_dtypes() + @testing.numpy_cupy_allclose(contiguous_check=False) + def test_argmin_axis0(self, xp, dtype): + a = testing.shaped_random((2, 3, 4), xp, dtype, order=self.order) + return a.argmin(axis=0) + + @testing.for_all_dtypes() + @testing.numpy_cupy_allclose(contiguous_check=False) + def test_argmin_axis1(self, xp, dtype): + a = testing.shaped_random((2, 3, 4), xp, dtype, order=self.order) + return a.argmin(axis=1) + + @testing.for_all_dtypes() + @testing.numpy_cupy_allclose(contiguous_check=False) + def test_argmin_axis2(self, xp, dtype): + a = testing.shaped_random((2, 3, 4), xp, dtype, order=self.order) + return a.argmin(axis=2) + + @testing.for_float_dtypes() + @testing.numpy_cupy_allclose(contiguous_check=False) + def test_argmin_nan(self, xp, dtype): + a = xp.array([float("nan"), 1, -1], dtype, order=self.order) + return a.argmin() + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose(contiguous_check=False) + def test_argmin_nan_real(self, xp, dtype): + a = xp.array([float("nan"), 1, -1], dtype, order=self.order) + return a.argmin() + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose(contiguous_check=False) + def test_argmin_nan_imag(self, xp, dtype): + a = xp.array( + [float("nan") * 1.0j, 1.0j, -1.0j], dtype, order=self.order + ) + return a.argmin() + @testing.parameterize( *testing.product( @@ -263,7 +363,7 @@ def test_ptp_nan_imag(self, xp, dtype): ((2, 3, 0), (0, 1, 2)), ], "order": ("C", "F"), - "func": ("min", "max"), + "func": ("min", "max", "argmin", "argmax"), } ) ) diff --git a/tests/third_party/cupy/sorting_tests/test_search.py b/tests/third_party/cupy/sorting_tests/test_search.py index 1a917367266d..edfe4ea02ed1 100644 --- a/tests/third_party/cupy/sorting_tests/test_search.py +++ b/tests/third_party/cupy/sorting_tests/test_search.py @@ -29,35 +29,30 @@ def test_argmax_nan(self, xp, dtype): a = xp.array([float("nan"), -1, 1], dtype) return a.argmax() - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose() def test_argmax_axis_large(self, xp, dtype): a = testing.shaped_random((3, 1000), xp, dtype) return a.argmax(axis=0) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose() def test_external_argmax_axis_large(self, xp, dtype): a = testing.shaped_random((3, 1000), xp, dtype) return xp.argmax(a, axis=0) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose() def test_argmax_axis0(self, xp, dtype): a = testing.shaped_random((2, 3, 4), xp, dtype) return a.argmax(axis=0) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose() def test_argmax_axis1(self, xp, dtype): a = testing.shaped_random((2, 3, 4), xp, dtype) return a.argmax(axis=1) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose() def test_argmax_axis2(self, xp, dtype): @@ -77,7 +72,6 @@ def test_argmax_zero_size(self, dtype): with pytest.raises(ValueError): a.argmax() - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes(no_complex=True) def test_argmax_zero_size_axis0(self, dtype): for xp in (numpy, cupy): @@ -85,7 +79,6 @@ def test_argmax_zero_size_axis0(self, dtype): with pytest.raises(ValueError): a.argmax(axis=0) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose() def test_argmax_zero_size_axis1(self, xp, dtype): @@ -98,8 +91,8 @@ def test_argmin_all(self, xp, dtype): a = testing.shaped_random((2, 3), xp, dtype) return a.argmin() - @testing.for_all_dtypes(no_complex=True) - @testing.numpy_cupy_allclose(accept_error=ValueError) + @testing.for_float_dtypes() + @testing.numpy_cupy_allclose() def test_argmin_nan(self, xp, dtype): a = xp.array([float("nan"), -1, 1], dtype) return a.argmin() @@ -110,35 +103,30 @@ def test_external_argmin_all(self, xp, dtype): a = testing.shaped_random((2, 3), xp, dtype) return xp.argmin(a) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose() def test_argmin_axis_large(self, xp, dtype): a = testing.shaped_random((3, 1000), xp, dtype) return a.argmin(axis=0) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose() def test_external_argmin_axis_large(self, xp, dtype): a = testing.shaped_random((3, 1000), xp, dtype) return xp.argmin(a, axis=0) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose() def test_argmin_axis0(self, xp, dtype): a = testing.shaped_random((2, 3, 4), xp, dtype) return a.argmin(axis=0) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose() def test_argmin_axis1(self, xp, dtype): a = testing.shaped_random((2, 3, 4), xp, dtype) return a.argmin(axis=1) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose() def test_argmin_axis2(self, xp, dtype): @@ -158,7 +146,6 @@ def test_argmin_zero_size(self, dtype): with pytest.raises(ValueError): return a.argmin() - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes(no_complex=True) def test_argmin_zero_size_axis0(self, dtype): for xp in (numpy, cupy): @@ -166,7 +153,6 @@ def test_argmin_zero_size_axis0(self, dtype): with pytest.raises(ValueError): a.argmin(axis=0) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose() def test_argmin_zero_size_axis1(self, xp, dtype): From 0604f18b2942ed06d8201a6c9bc895e8a0f10815 Mon Sep 17 00:00:00 2001 From: vtavana <120411540+vtavana@users.noreply.github.com> Date: Thu, 23 Nov 2023 17:43:55 -0600 Subject: [PATCH 21/38] remove skip test (#1629) --- dpnp/dpnp_iface_manipulation.py | 2 +- tests/third_party/cupy/statistics_tests/test_meanvar.py | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/dpnp/dpnp_iface_manipulation.py b/dpnp/dpnp_iface_manipulation.py index 6941af2829e1..b805a3a906e9 100644 --- a/dpnp/dpnp_iface_manipulation.py +++ b/dpnp/dpnp_iface_manipulation.py @@ -1049,7 +1049,7 @@ def repeat(a, repeats, axis=None): >>> np.repeat(x, 4) array([3, 3, 3, 3]) - >>> x = np.array([[1,2], [3,4]]) + >>> x = np.array([[1, 2], [3, 4]]) >>> np.repeat(x, 2) array([1, 1, 2, 2, 3, 3, 4, 4]) >>> np.repeat(x, 3, axis=1) diff --git a/tests/third_party/cupy/statistics_tests/test_meanvar.py b/tests/third_party/cupy/statistics_tests/test_meanvar.py index f7689a46393a..738057a99f49 100644 --- a/tests/third_party/cupy/statistics_tests/test_meanvar.py +++ b/tests/third_party/cupy/statistics_tests/test_meanvar.py @@ -181,8 +181,6 @@ def test_external_mean_axis(self, xp, dtype): @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose(rtol=1e-06) def test_mean_all_float32_dtype(self, xp, dtype): - if dtype == xp.int32: - pytest.skip("skip until issue #1468 is solved in dpctl") a = xp.full((2, 3, 4), 123456789, dtype=dtype) return xp.mean(a, dtype=numpy.float32) From 780d686f31fdb12117a333421517f47f27a992b0 Mon Sep 17 00:00:00 2001 From: vtavana <120411540+vtavana@users.noreply.github.com> Date: Thu, 23 Nov 2023 20:45:16 -0600 Subject: [PATCH 22/38] implement dpnp.cbrt, dpnp.exp2, dpnp.copysign, dpnp.rsqrt (#1624) * implement dpnp.cbrt, dpnp.exp2, dpnp.copysign, dpnp.rsqrt * address comments * address comments - 2nd round --- dpnp/backend/extensions/vm/cbrt.hpp | 79 +++ dpnp/backend/extensions/vm/exp2.hpp | 79 +++ dpnp/backend/extensions/vm/types_matrix.hpp | 30 + dpnp/backend/extensions/vm/vm_py.cpp | 60 ++ dpnp/backend/include/dpnp_iface_fptr.hpp | 16 +- dpnp/backend/kernels/dpnp_krnl_elemwise.cpp | 43 -- dpnp/dpnp_algo/dpnp_algo.pxd | 8 - dpnp/dpnp_algo/dpnp_algo_mathematical.pxi | 9 - dpnp/dpnp_algo/dpnp_algo_trigonometric.pxi | 10 - dpnp/dpnp_algo/dpnp_elementwise_common.py | 655 +++++++++++------- dpnp/dpnp_iface_mathematical.py | 98 ++- dpnp/dpnp_iface_trigonometric.py | 193 +++++- tests/skipped_tests.tbl | 4 - tests/skipped_tests_gpu.tbl | 2 - tests/skipped_tests_gpu_no_fp64.tbl | 8 - tests/test_strides.py | 14 + tests/test_sycl_queue.py | 39 +- tests/test_umath.py | 210 ++++++ tests/test_usm_type.py | 44 +- .../third_party/cupy/math_tests/test_misc.py | 5 +- 20 files changed, 1135 insertions(+), 471 deletions(-) create mode 100644 dpnp/backend/extensions/vm/cbrt.hpp create mode 100644 dpnp/backend/extensions/vm/exp2.hpp diff --git a/dpnp/backend/extensions/vm/cbrt.hpp b/dpnp/backend/extensions/vm/cbrt.hpp new file mode 100644 index 000000000000..83a44335bcc2 --- /dev/null +++ b/dpnp/backend/extensions/vm/cbrt.hpp @@ -0,0 +1,79 @@ +//***************************************************************************** +// Copyright (c) 2023, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** + +#pragma once + +#include + +#include "common.hpp" +#include "types_matrix.hpp" + +namespace dpnp +{ +namespace backend +{ +namespace ext +{ +namespace vm +{ +template +sycl::event cbrt_contig_impl(sycl::queue exec_q, + const std::int64_t n, + const char *in_a, + char *out_y, + const std::vector &depends) +{ + type_utils::validate_type_for_device(exec_q); + + const T *a = reinterpret_cast(in_a); + using resTy = typename types::CbrtOutputType::value_type; + resTy *y = reinterpret_cast(out_y); + + return mkl_vm::cbrt(exec_q, + n, // number of elements to be calculated + a, // pointer `a` containing input vector of size n + y, // pointer `y` to the output vector of size n + depends); +} + +template +struct CbrtContigFactory +{ + fnT get() + { + if constexpr (std::is_same_v< + typename types::CbrtOutputType::value_type, void>) + { + return nullptr; + } + else { + return cbrt_contig_impl; + } + } +}; +} // namespace vm +} // namespace ext +} // namespace backend +} // namespace dpnp diff --git a/dpnp/backend/extensions/vm/exp2.hpp b/dpnp/backend/extensions/vm/exp2.hpp new file mode 100644 index 000000000000..8f80d0c1d50f --- /dev/null +++ b/dpnp/backend/extensions/vm/exp2.hpp @@ -0,0 +1,79 @@ +//***************************************************************************** +// Copyright (c) 2023, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** + +#pragma once + +#include + +#include "common.hpp" +#include "types_matrix.hpp" + +namespace dpnp +{ +namespace backend +{ +namespace ext +{ +namespace vm +{ +template +sycl::event exp2_contig_impl(sycl::queue exec_q, + const std::int64_t n, + const char *in_a, + char *out_y, + const std::vector &depends) +{ + type_utils::validate_type_for_device(exec_q); + + const T *a = reinterpret_cast(in_a); + using resTy = typename types::Exp2OutputType::value_type; + resTy *y = reinterpret_cast(out_y); + + return mkl_vm::exp2(exec_q, + n, // number of elements to be calculated + a, // pointer `a` containing input vector of size n + y, // pointer `y` to the output vector of size n + depends); +} + +template +struct Exp2ContigFactory +{ + fnT get() + { + if constexpr (std::is_same_v< + typename types::Exp2OutputType::value_type, void>) + { + return nullptr; + } + else { + return exp2_contig_impl; + } + } +}; +} // namespace vm +} // namespace ext +} // namespace backend +} // namespace dpnp diff --git a/dpnp/backend/extensions/vm/types_matrix.hpp b/dpnp/backend/extensions/vm/types_matrix.hpp index 03328f92fb35..0ddc61884e1c 100644 --- a/dpnp/backend/extensions/vm/types_matrix.hpp +++ b/dpnp/backend/extensions/vm/types_matrix.hpp @@ -202,6 +202,21 @@ struct AtanhOutputType dpctl_td_ns::DefaultResultEntry>::result_type; }; +/** + * @brief A factory to define pairs of supported types for which + * MKL VM library provides support in oneapi::mkl::vm::cbrt function. + * + * @tparam T Type of input vector `a` and of result vector `y`. + */ +template +struct CbrtOutputType +{ + using value_type = typename std::disjunction< + dpctl_td_ns::TypeMapResultEntry, + dpctl_td_ns::TypeMapResultEntry, + dpctl_td_ns::DefaultResultEntry>::result_type; +}; + /** * @brief A factory to define pairs of supported types for which * MKL VM library provides support in oneapi::mkl::vm::ceil function. @@ -308,6 +323,21 @@ struct ExpOutputType dpctl_td_ns::DefaultResultEntry>::result_type; }; +/** + * @brief A factory to define pairs of supported types for which + * MKL VM library provides support in oneapi::mkl::vm::exp2 function. + * + * @tparam T Type of input vector `a` and of result vector `y`. + */ +template +struct Exp2OutputType +{ + using value_type = typename std::disjunction< + dpctl_td_ns::TypeMapResultEntry, + dpctl_td_ns::TypeMapResultEntry, + dpctl_td_ns::DefaultResultEntry>::result_type; +}; + /** * @brief A factory to define pairs of supported types for which * MKL VM library provides support in oneapi::mkl::vm::expm1 function. diff --git a/dpnp/backend/extensions/vm/vm_py.cpp b/dpnp/backend/extensions/vm/vm_py.cpp index a7dfce88a7a0..09416b00918b 100644 --- a/dpnp/backend/extensions/vm/vm_py.cpp +++ b/dpnp/backend/extensions/vm/vm_py.cpp @@ -39,6 +39,7 @@ #include "atan.hpp" #include "atan2.hpp" #include "atanh.hpp" +#include "cbrt.hpp" #include "ceil.hpp" #include "common.hpp" #include "conj.hpp" @@ -46,6 +47,7 @@ #include "cosh.hpp" #include "div.hpp" #include "exp.hpp" +#include "exp2.hpp" #include "expm1.hpp" #include "floor.hpp" #include "hypot.hpp" @@ -81,12 +83,14 @@ static unary_impl_fn_ptr_t asinh_dispatch_vector[dpctl_td_ns::num_types]; static unary_impl_fn_ptr_t atan_dispatch_vector[dpctl_td_ns::num_types]; static binary_impl_fn_ptr_t atan2_dispatch_vector[dpctl_td_ns::num_types]; static unary_impl_fn_ptr_t atanh_dispatch_vector[dpctl_td_ns::num_types]; +static unary_impl_fn_ptr_t cbrt_dispatch_vector[dpctl_td_ns::num_types]; static unary_impl_fn_ptr_t ceil_dispatch_vector[dpctl_td_ns::num_types]; static unary_impl_fn_ptr_t conj_dispatch_vector[dpctl_td_ns::num_types]; static unary_impl_fn_ptr_t cos_dispatch_vector[dpctl_td_ns::num_types]; static unary_impl_fn_ptr_t cosh_dispatch_vector[dpctl_td_ns::num_types]; static binary_impl_fn_ptr_t div_dispatch_vector[dpctl_td_ns::num_types]; static unary_impl_fn_ptr_t exp_dispatch_vector[dpctl_td_ns::num_types]; +static unary_impl_fn_ptr_t exp2_dispatch_vector[dpctl_td_ns::num_types]; static unary_impl_fn_ptr_t expm1_dispatch_vector[dpctl_td_ns::num_types]; static unary_impl_fn_ptr_t floor_dispatch_vector[dpctl_td_ns::num_types]; static binary_impl_fn_ptr_t hypot_dispatch_vector[dpctl_td_ns::num_types]; @@ -366,6 +370,34 @@ PYBIND11_MODULE(_vm_impl, m) py::arg("sycl_queue"), py::arg("src"), py::arg("dst")); } + // UnaryUfunc: ==== Cbrt(x) ==== + { + vm_ext::init_ufunc_dispatch_vector( + cbrt_dispatch_vector); + + auto cbrt_pyapi = [&](sycl::queue exec_q, arrayT src, arrayT dst, + const event_vecT &depends = {}) { + return vm_ext::unary_ufunc(exec_q, src, dst, depends, + cbrt_dispatch_vector); + }; + m.def("_cbrt", cbrt_pyapi, + "Call `cbrt` function from OneMKL VM library to compute " + "the element-wise cube root of vector elements", + py::arg("sycl_queue"), py::arg("src"), py::arg("dst"), + py::arg("depends") = py::list()); + + auto cbrt_need_to_call_pyapi = [&](sycl::queue exec_q, arrayT src, + arrayT dst) { + return vm_ext::need_to_call_unary_ufunc(exec_q, src, dst, + cbrt_dispatch_vector); + }; + m.def("_mkl_cbrt_to_call", cbrt_need_to_call_pyapi, + "Check input arguments to answer if `cbrt` function from " + "OneMKL VM library can be used", + py::arg("sycl_queue"), py::arg("src"), py::arg("dst")); + } + // UnaryUfunc: ==== Ceil(x) ==== { vm_ext::init_ufunc_dispatch_vector( + exp2_dispatch_vector); + + auto exp2_pyapi = [&](sycl::queue exec_q, arrayT src, arrayT dst, + const event_vecT &depends = {}) { + return vm_ext::unary_ufunc(exec_q, src, dst, depends, + exp2_dispatch_vector); + }; + m.def("_exp2", exp2_pyapi, + "Call `exp2` function from OneMKL VM library to compute " + "the element-wise base-2 exponential of vector elements", + py::arg("sycl_queue"), py::arg("src"), py::arg("dst"), + py::arg("depends") = py::list()); + + auto exp2_need_to_call_pyapi = [&](sycl::queue exec_q, arrayT src, + arrayT dst) { + return vm_ext::need_to_call_unary_ufunc(exec_q, src, dst, + exp2_dispatch_vector); + }; + m.def("_mkl_exp2_to_call", exp2_need_to_call_pyapi, + "Check input arguments to answer if `exp2` function from " + "OneMKL VM library can be used", + py::arg("sycl_queue"), py::arg("src"), py::arg("dst")); + } + // UnaryUfunc: ==== expm1(x) ==== { vm_ext::init_ufunc_dispatch_vector}; - fmap[DPNPFuncName::DPNP_FN_CBRT_EXT][eft_INT][eft_INT] = { - eft_DBL, (void *)dpnp_cbrt_c_ext}; - fmap[DPNPFuncName::DPNP_FN_CBRT_EXT][eft_LNG][eft_LNG] = { - eft_DBL, (void *)dpnp_cbrt_c_ext}; - fmap[DPNPFuncName::DPNP_FN_CBRT_EXT][eft_FLT][eft_FLT] = { - eft_FLT, (void *)dpnp_cbrt_c_ext}; - fmap[DPNPFuncName::DPNP_FN_CBRT_EXT][eft_DBL][eft_DBL] = { - eft_DBL, (void *)dpnp_cbrt_c_ext}; - fmap[DPNPFuncName::DPNP_FN_CEIL][eft_INT][eft_INT] = { eft_DBL, (void *)dpnp_ceil_c_default}; fmap[DPNPFuncName::DPNP_FN_CEIL][eft_LNG][eft_LNG] = { @@ -438,27 +429,6 @@ static void func_map_init_elemwise_1arg_2type(func_map_t &fmap) fmap[DPNPFuncName::DPNP_FN_EXP2][eft_DBL][eft_DBL] = { eft_DBL, (void *)dpnp_exp2_c_default}; - fmap[DPNPFuncName::DPNP_FN_EXP2_EXT][eft_INT][eft_INT] = { - get_default_floating_type(), - (void *)dpnp_exp2_c_ext< - int32_t, func_type_map_t::find_type>, - get_default_floating_type(), - (void *)dpnp_exp2_c_ext< - int32_t, func_type_map_t::find_type< - get_default_floating_type()>>}; - fmap[DPNPFuncName::DPNP_FN_EXP2_EXT][eft_LNG][eft_LNG] = { - get_default_floating_type(), - (void *)dpnp_exp2_c_ext< - int64_t, func_type_map_t::find_type>, - get_default_floating_type(), - (void *)dpnp_exp2_c_ext< - int64_t, func_type_map_t::find_type< - get_default_floating_type()>>}; - fmap[DPNPFuncName::DPNP_FN_EXP2_EXT][eft_FLT][eft_FLT] = { - eft_FLT, (void *)dpnp_exp2_c_ext}; - fmap[DPNPFuncName::DPNP_FN_EXP2_EXT][eft_DBL][eft_DBL] = { - eft_DBL, (void *)dpnp_exp2_c_ext}; - fmap[DPNPFuncName::DPNP_FN_EXP][eft_INT][eft_INT] = { eft_DBL, (void *)dpnp_exp_c_default}; fmap[DPNPFuncName::DPNP_FN_EXP][eft_LNG][eft_LNG] = { @@ -1465,19 +1435,6 @@ static void func_map_elemwise_2arg_3type_core(func_map_t &fmap) template static void func_map_elemwise_2arg_3type_short_core(func_map_t &fmap) { - ((fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][FT1][FTs] = - {get_floating_res_type(), - (void *)dpnp_copysign_c_ext< - func_type_map_t::find_type()>, - func_type_map_t::find_type, - func_type_map_t::find_type>, - get_floating_res_type(), - (void *)dpnp_copysign_c_ext< - func_type_map_t::find_type< - get_floating_res_type()>, - func_type_map_t::find_type, - func_type_map_t::find_type>}), - ...); ((fmap[DPNPFuncName::DPNP_FN_FMOD_EXT][FT1][FTs] = {get_floating_res_type(), (void *) diff --git a/dpnp/dpnp_algo/dpnp_algo.pxd b/dpnp/dpnp_algo/dpnp_algo.pxd index 2ec000ad573f..80c6035d7a9f 100644 --- a/dpnp/dpnp_algo/dpnp_algo.pxd +++ b/dpnp/dpnp_algo/dpnp_algo.pxd @@ -38,16 +38,12 @@ cdef extern from "dpnp_iface_fptr.hpp" namespace "DPNPFuncName": # need this na DPNP_FN_ARANGE DPNP_FN_ARGSORT DPNP_FN_ARGSORT_EXT - DPNP_FN_CBRT - DPNP_FN_CBRT_EXT DPNP_FN_CHOLESKY DPNP_FN_CHOLESKY_EXT DPNP_FN_CHOOSE DPNP_FN_CHOOSE_EXT DPNP_FN_COPY DPNP_FN_COPY_EXT - DPNP_FN_COPYSIGN - DPNP_FN_COPYSIGN_EXT DPNP_FN_CORRELATE DPNP_FN_CORRELATE_EXT DPNP_FN_CROSS @@ -76,8 +72,6 @@ cdef extern from "dpnp_iface_fptr.hpp" namespace "DPNPFuncName": # need this na DPNP_FN_ERF_EXT DPNP_FN_EYE DPNP_FN_EYE_EXT - DPNP_FN_EXP2 - DPNP_FN_EXP2_EXT DPNP_FN_FABS DPNP_FN_FABS_EXT DPNP_FN_FFT_FFT @@ -354,8 +348,6 @@ cpdef dpnp_descriptor dpnp_sort(dpnp_descriptor array1) """ Trigonometric functions """ -cpdef dpnp_descriptor dpnp_cbrt(dpnp_descriptor array1) cpdef dpnp_descriptor dpnp_degrees(dpnp_descriptor array1) -cpdef dpnp_descriptor dpnp_exp2(dpnp_descriptor array1) cpdef dpnp_descriptor dpnp_radians(dpnp_descriptor array1) cpdef dpnp_descriptor dpnp_recip(dpnp_descriptor array1) diff --git a/dpnp/dpnp_algo/dpnp_algo_mathematical.pxi b/dpnp/dpnp_algo/dpnp_algo_mathematical.pxi index f9828229b53a..431892f10217 100644 --- a/dpnp/dpnp_algo/dpnp_algo_mathematical.pxi +++ b/dpnp/dpnp_algo/dpnp_algo_mathematical.pxi @@ -36,7 +36,6 @@ and the rest of the library # NO IMPORTs here. All imports must be placed into main "dpnp_algo.pyx" file __all__ += [ - "dpnp_copysign", "dpnp_cross", "dpnp_cumprod", "dpnp_cumsum", @@ -64,14 +63,6 @@ ctypedef c_dpctl.DPCTLSyclEventRef(*ftpr_custom_trapz_2in_1out_with_2size_t)(c_d const c_dpctl.DPCTLEventVectorRef) -cpdef utils.dpnp_descriptor dpnp_copysign(utils.dpnp_descriptor x1_obj, - utils.dpnp_descriptor x2_obj, - object dtype=None, - utils.dpnp_descriptor out=None, - object where=True): - return call_fptr_2in_1out_strides(DPNP_FN_COPYSIGN_EXT, x1_obj, x2_obj, dtype, out, where) - - cpdef utils.dpnp_descriptor dpnp_cross(utils.dpnp_descriptor x1_obj, utils.dpnp_descriptor x2_obj, object dtype=None, diff --git a/dpnp/dpnp_algo/dpnp_algo_trigonometric.pxi b/dpnp/dpnp_algo/dpnp_algo_trigonometric.pxi index 099aa9ba7abd..41a29b275577 100644 --- a/dpnp/dpnp_algo/dpnp_algo_trigonometric.pxi +++ b/dpnp/dpnp_algo/dpnp_algo_trigonometric.pxi @@ -36,27 +36,17 @@ and the rest of the library # NO IMPORTs here. All imports must be placed into main "dpnp_algo.pyx" file __all__ += [ - 'dpnp_cbrt', 'dpnp_degrees', - 'dpnp_exp2', 'dpnp_radians', 'dpnp_recip', 'dpnp_unwrap' ] -cpdef utils.dpnp_descriptor dpnp_cbrt(utils.dpnp_descriptor x1): - return call_fptr_1in_1out_strides(DPNP_FN_CBRT_EXT, x1) - - cpdef utils.dpnp_descriptor dpnp_degrees(utils.dpnp_descriptor x1): return call_fptr_1in_1out_strides(DPNP_FN_DEGREES_EXT, x1) -cpdef utils.dpnp_descriptor dpnp_exp2(utils.dpnp_descriptor x1): - return call_fptr_1in_1out_strides(DPNP_FN_EXP2_EXT, x1) - - cpdef utils.dpnp_descriptor dpnp_recip(utils.dpnp_descriptor x1): return call_fptr_1in_1out_strides(DPNP_FN_RECIP_EXT, x1) diff --git a/dpnp/dpnp_algo/dpnp_elementwise_common.py b/dpnp/dpnp_algo/dpnp_elementwise_common.py index 315b266c8032..bd7babbe01d0 100644 --- a/dpnp/dpnp_algo/dpnp_elementwise_common.py +++ b/dpnp/dpnp_algo/dpnp_elementwise_common.py @@ -46,13 +46,16 @@ "dpnp_bitwise_and", "dpnp_bitwise_or", "dpnp_bitwise_xor", + "dpnp_cbrt", "dpnp_ceil", "dpnp_conj", + "dpnp_copysign", "dpnp_cos", "dpnp_cosh", "dpnp_divide", "dpnp_equal", "dpnp_exp", + "dpnp_exp2", "dpnp_expm1", "dpnp_floor", "dpnp_floor_divide", @@ -88,6 +91,7 @@ "dpnp_remainder", "dpnp_right_shift", "dpnp_round", + "dpnp_rsqrt", "dpnp_sign", "dpnp_signbit", "dpnp_sin", @@ -155,16 +159,22 @@ def check_nd_call_func( ) ) return dpnp_func(*x_args, out=out, order=order) - return call_origin( - origin_func, - *x_args, - out=out, - where=where, - order=order, - dtype=dtype, - subok=subok, - **kwargs, - ) + if origin_func is not None: + return call_origin( + origin_func, + *x_args, + out=out, + where=where, + order=order, + dtype=dtype, + subok=subok, + **kwargs, + ) + else: + raise NotImplementedError( + f"Requested function={dpnp_func.__name__} with args={x_args} and kwargs={kwargs} " + "isn't currently supported." + ) def _make_unary_func( @@ -231,10 +241,10 @@ def _call_func(src1, src2, dst, sycl_queue, depends=None): x (dpnp.ndarray): Input array, expected to have numeric data type. out ({None, dpnp.ndarray}, optional): - Output array to populate. Array must have the correct - shape and the expected data type. - order ("C","F","A","K", optional): memory layout of the new - output array, if parameter `out` is `None`. + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Return: dpnp.ndarray: @@ -275,10 +285,10 @@ def dpnp_abs(x, out=None, order="K"): x (dpnp.ndarray): Input array, expected to have numeric data type. out ({None, dpnp.ndarray}, optional): - Output array to populate. Array must have the correct - shape and the expected data type. - order ("C","F","A","K", optional): memory layout of the new - output array, if parameter `out` is `None`. + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Return: dpnp.ndarray: @@ -316,10 +326,10 @@ def dpnp_acos(x, out=None, order="K"): x (dpnp.ndarray): Input array, expected to have numeric data type. out ({None, dpnp.ndarray}, optional): - Output array to populate. Array must have the correct - shape and the expected data type. - order ("C","F","A","K", optional): memory layout of the new - output array, if parameter `out` is `None`. + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Return: dpnp.ndarray: @@ -361,9 +371,9 @@ def dpnp_acosh(x, out=None, order="K"): Second input array, also expected to have numeric data type. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. + Array must have the correct shape and the expected data type. order ("C","F","A","K", None, optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -403,10 +413,10 @@ def dpnp_add(x1, x2, out=None, order="K"): x (dpnp.ndarray): Input array, expected to have numeric data type. out ({None, dpnp.ndarray}, optional): - Output array to populate. Array must have the correct - shape and the expected data type. - order ("C","F","A","K", optional): memory layout of the new - output array, if parameter `out` is `None`. + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Return: dpnp.ndarray: @@ -444,10 +454,10 @@ def dpnp_asin(x, out=None, order="K"): x (dpnp.ndarray): Input array, expected to have numeric data type. out ({None, dpnp.ndarray}, optional): - Output array to populate. Array must have the correct - shape and the expected data type. - order ("C","F","A","K", optional): memory layout of the new - output array, if parameter `out` is `None`. + Output array to populate. + Array must have the correct shape and the expected data type.. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Return: dpnp.ndarray: @@ -485,10 +495,10 @@ def dpnp_asinh(x, out=None, order="K"): x (dpnp.ndarray): Input array, expected to have numeric data type. out ({None, dpnp.ndarray}, optional): - Output array to populate. Array must have the correct - shape and the expected data type. - order ("C","F","A","K", optional): memory layout of the new - output array, if parameter `out` is `None`. + Output array to populate. + Array must have the correct shape and the expected data type.. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Return: dpnp.ndarray: @@ -533,9 +543,9 @@ def dpnp_atan(x, out=None, order="K"): floating-point data type. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. + Array must have the correct shape and the expected data type. order ("C","F","A","K", None, optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -576,10 +586,10 @@ def dpnp_atan2(x1, x2, out=None, order="K"): x (dpnp.ndarray): Input array, expected to have numeric data type. out ({None, dpnp.ndarray}, optional): - Output array to populate. Array must have the correct - shape and the expected data type. - order ("C","F","A","K", optional): memory layout of the new - output array, if parameter `out` is `None`. + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Return: dpnp.ndarray: @@ -623,9 +633,9 @@ def dpnp_atanh(x, out=None, order="K"): type. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. - order ("C","F","A","K", optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -667,9 +677,9 @@ def dpnp_bitwise_and(x1, x2, out=None, order="K"): type. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. - order ("C","F","A","K", optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -711,9 +721,9 @@ def dpnp_bitwise_or(x1, x2, out=None, order="K"): type. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. - order ("C","F","A","K", optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -740,6 +750,46 @@ def dpnp_bitwise_xor(x1, x2, out=None, order="K"): return dpnp_array._create_from_usm_ndarray(res_usm) +_cbrt_docstring = """ +cbrt(x, out=None, order='K') + +Returns the cbrting for each element `x_i` for input array `x`. +The cbrt of the scalar `x` is the smallest integer `i`, such that `i >= x`. + +Args: + x (dpnp.ndarray): + Input array, expected to have a real-valued data type. + out ({None, dpnp.ndarray}, optional): + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. + Default: "K". +Return: + dpnp.ndarray: + An array containing the element-wise cbrting of input array. + The returned array has the same data type as `x`. +""" + +cbrt_func = _make_unary_func( + "cbrt", dpt.cbrt, _cbrt_docstring, vmi._mkl_cbrt_to_call, vmi._cbrt +) + + +def dpnp_cbrt(x, out=None, order="K"): + """ + Invokes cbrt() function from pybind11 extension of OneMKL VM if possible. + + Otherwise fully relies on dpctl.tensor implementation for cbrt() function. + """ + # dpctl.tensor only works with usm_ndarray + x1_usm = dpnp.get_usm_ndarray(x) + out_usm = None if out is None else dpnp.get_usm_ndarray(out) + + res_usm = cbrt_func(x1_usm, out=out_usm, order=order) + return dpnp_array._create_from_usm_ndarray(res_usm) + + _ceil_docstring = """ ceil(x, out=None, order='K') @@ -750,10 +800,10 @@ def dpnp_bitwise_xor(x1, x2, out=None, order="K"): x (dpnp.ndarray): Input array, expected to have a real-valued data type. out ({None, dpnp.ndarray}, optional): - Output array to populate. Array must have the correct - shape and the expected data type. - order ("C","F","A","K", optional): memory layout of the new - output array, if parameter `out` is `None`. + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Return: dpnp.ndarray: @@ -780,122 +830,163 @@ def dpnp_ceil(x, out=None, order="K"): return dpnp_array._create_from_usm_ndarray(res_usm) -_cos_docstring = """ -cos(x, out=None, order='K') +_conj_docstring = """ +conj(x, out=None, order='K') -Computes cosine for each element `x_i` for input array `x`. +Computes conjugate for each element `x_i` for input array `x`. Args: x (dpnp.ndarray): Input array, expected to have numeric data type. out ({None, dpnp.ndarray}, optional): - Output array to populate. Array must have the correct - shape and the expected data type. - order ("C","F","A","K", optional): memory layout of the new - output array, if parameter `out` is `None`. + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Return: dpnp.ndarray: - An array containing the element-wise cosine. The data type - of the returned array is determined by the Type Promotion Rules. + An array containing the element-wise conjugate. + The returned array has the same data type as `x`. """ -cos_func = _make_unary_func( - "cos", dpt.cos, _cos_docstring, vmi._mkl_cos_to_call, vmi._cos +conj_func = _make_unary_func( + "conj", dpt.conj, _conj_docstring, vmi._mkl_conj_to_call, vmi._conj ) -def dpnp_cos(x, out=None, order="K"): +def dpnp_conj(x, out=None, order="K"): """ - Invokes cos() function from pybind11 extension of OneMKL VM if possible. - - Otherwise fully relies on dpctl.tensor implementation for cos() function. + Invokes conj() function from pybind11 extension of OneMKL VM if possible. + Otherwise fully relies on dpctl.tensor implementation for conj() function. """ # dpctl.tensor only works with usm_ndarray x1_usm = dpnp.get_usm_ndarray(x) out_usm = None if out is None else dpnp.get_usm_ndarray(out) - res_usm = cos_func(x1_usm, out=out_usm, order=order) + res_usm = conj_func(x1_usm, out=out_usm, order=order) return dpnp_array._create_from_usm_ndarray(res_usm) -_cosh_docstring = """ -cosh(x, out=None, order='K') +_copysign_docstring = """ +copysign(x1, x2, out=None, order='K') -Computes hyperbolic cosine for each element `x_i` for input array `x`. +Composes a floating-point value with the magnitude of `x1_i` and the sign of +`x2_i` for each element of input arrays `x1` and `x2`. + +Args: + x1 (dpnp.ndarray): + First input array, expected to have a real floating-point data type. + x2 (dpnp.ndarray): + Second input array, also expected to have a real floating-point data + type. + out ({None, dpnp.ndarray}, optional): + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. + Default: "K". +Returns: + dpnp.ndarray: + An array containing the element-wise results. The data type + of the returned array is determined by the Type Promotion Rules. +""" + +copysign_func = _make_binary_func("copysign", dpt.copysign, _copysign_docstring) + + +def dpnp_copysign(x1, x2, out=None, order="K"): + """Invokes copysign() from dpctl.tensor implementation for copysign() function.""" + + # dpctl.tensor only works with usm_ndarray or scalar + x1_usm_or_scalar = dpnp.get_usm_ndarray_or_scalar(x1) + x2_usm_or_scalar = dpnp.get_usm_ndarray_or_scalar(x2) + out_usm = None if out is None else dpnp.get_usm_ndarray(out) + + res_usm = copysign_func( + x1_usm_or_scalar, x2_usm_or_scalar, out=out_usm, order=order + ) + return dpnp_array._create_from_usm_ndarray(res_usm) + + +_cos_docstring = """ +cos(x, out=None, order='K') + +Computes cosine for each element `x_i` for input array `x`. Args: x (dpnp.ndarray): Input array, expected to have numeric data type. out ({None, dpnp.ndarray}, optional): - Output array to populate. Array must have the correct - shape and the expected data type. - order ("C","F","A","K", optional): memory layout of the new - output array, if parameter `out` is `None`. + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Return: dpnp.ndarray: - An array containing the element-wise hyperbolic cosine. The data type + An array containing the element-wise cosine. The data type of the returned array is determined by the Type Promotion Rules. """ -cosh_func = _make_unary_func( - "cosh", dpt.cosh, _cosh_docstring, vmi._mkl_cosh_to_call, vmi._cosh +cos_func = _make_unary_func( + "cos", dpt.cos, _cos_docstring, vmi._mkl_cos_to_call, vmi._cos ) -def dpnp_cosh(x, out=None, order="K"): +def dpnp_cos(x, out=None, order="K"): """ - Invokes cosh() function from pybind11 extension of OneMKL VM if possible. + Invokes cos() function from pybind11 extension of OneMKL VM if possible. - Otherwise fully relies on dpctl.tensor implementation for cosh() function. + Otherwise fully relies on dpctl.tensor implementation for cos() function. """ # dpctl.tensor only works with usm_ndarray x1_usm = dpnp.get_usm_ndarray(x) out_usm = None if out is None else dpnp.get_usm_ndarray(out) - res_usm = cosh_func(x1_usm, out=out_usm, order=order) + res_usm = cos_func(x1_usm, out=out_usm, order=order) return dpnp_array._create_from_usm_ndarray(res_usm) -_conj_docstring = """ -conj(x, out=None, order='K') +_cosh_docstring = """ +cosh(x, out=None, order='K') -Computes conjugate for each element `x_i` for input array `x`. +Computes hyperbolic cosine for each element `x_i` for input array `x`. Args: x (dpnp.ndarray): Input array, expected to have numeric data type. out ({None, dpnp.ndarray}, optional): - Output array to populate. Array must have the correct - shape and the expected data type. - order ("C","F","A","K", optional): memory layout of the new - output array, if parameter `out` is `None`. + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Return: dpnp.ndarray: - An array containing the element-wise conjugate. - The returned array has the same data type as `x`. + An array containing the element-wise hyperbolic cosine. The data type + of the returned array is determined by the Type Promotion Rules. """ -conj_func = _make_unary_func( - "conj", dpt.conj, _conj_docstring, vmi._mkl_conj_to_call, vmi._conj +cosh_func = _make_unary_func( + "cosh", dpt.cosh, _cosh_docstring, vmi._mkl_cosh_to_call, vmi._cosh ) -def dpnp_conj(x, out=None, order="K"): +def dpnp_cosh(x, out=None, order="K"): """ - Invokes conj() function from pybind11 extension of OneMKL VM if possible. + Invokes cosh() function from pybind11 extension of OneMKL VM if possible. + + Otherwise fully relies on dpctl.tensor implementation for cosh() function. - Otherwise fully relies on dpctl.tensor implementation for conj() function. """ # dpctl.tensor only works with usm_ndarray x1_usm = dpnp.get_usm_ndarray(x) out_usm = None if out is None else dpnp.get_usm_ndarray(out) - res_usm = conj_func(x1_usm, out=out_usm, order=order) + res_usm = cosh_func(x1_usm, out=out_usm, order=order) return dpnp_array._create_from_usm_ndarray(res_usm) @@ -912,9 +1003,9 @@ def dpnp_conj(x, out=None, order="K"): Second input array, also expected to have numeric data type. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. + Array must have the correct shape and the expected data type. order ("C","F","A","K", None, optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -960,7 +1051,7 @@ def dpnp_divide(x1, x2, out=None, order="K"): Output array to populate. Array have the correct shape and the expected data type. order ("C","F","A","K", None, optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -994,10 +1085,10 @@ def dpnp_equal(x1, x2, out=None, order="K"): x (dpnp.ndarray): Input array, expected to have numeric data type. out ({None, dpnp.ndarray}, optional): - Output array to populate. Array must have the correct - shape and the expected data type. - order ("C","F","A","K", optional): memory layout of the new - output array, if parameter `out` is `None`. + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Return: dpnp.ndarray: @@ -1026,6 +1117,47 @@ def dpnp_exp(x, out=None, order="K"): return dpnp_array._create_from_usm_ndarray(res_usm) +_exp2_docstring = """ +exp2(x, out=None, order='K') + +Computes the base-2 exponential for each element `x_i` for input array `x`. + +Args: + x (dpnp.ndarray): + Input array, expected to have a floating-point data type. + out ({None, dpnp.ndarray}, optional): + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. + Default: "K". +Return: + dpnp.ndarray: + An array containing the element-wise base-2 exponentials. + The data type of the returned array is determined by + the Type Promotion Rules. +""" + +exp2_func = _make_unary_func( + "exp2", dpt.exp2, _exp2_docstring, vmi._mkl_exp2_to_call, vmi._exp2 +) + + +def dpnp_exp2(x, out=None, order="K"): + """ + Invokes exp2() function from pybind11 extension of OneMKL VM if possible. + + Otherwise fully relies on dpctl.tensor implementation for exp2() function. + """ + + # dpctl.tensor only works with usm_ndarray + x1_usm = dpnp.get_usm_ndarray(x) + out_usm = None if out is None else dpnp.get_usm_ndarray(out) + + res_usm = exp2_func(x1_usm, out=out_usm, order=order) + return dpnp_array._create_from_usm_ndarray(res_usm) + + _expm1_docstring = """ expm1(x, out=None, order='K') @@ -1037,10 +1169,10 @@ def dpnp_exp(x, out=None, order="K"): x (dpnp.ndarray): Input array, expected to have numeric data type. out ({None, dpnp.ndarray}, optional): - Output array to populate. Array must have the correct - shape and the expected data type. - order ("C","F","A","K", optional): memory layout of the new - output array, if parameter `out` is `None`. + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Return: dpnp.ndarray: @@ -1079,10 +1211,10 @@ def dpnp_expm1(x, out=None, order="K"): x (dpnp.ndarray): Input array, expected to have a real-valued data type. out ({None, dpnp.ndarray}, optional): - Output array to populate. Array must have the correct - shape and the expected data type. - order ("C","F","A","K", optional): memory layout of the new - output array, if parameter `out` is `None`. + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Return: dpnp.ndarray: @@ -1123,9 +1255,9 @@ def dpnp_floor(x, out=None, order="K"): Second input array, also expected to have numeric data type. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. + Array must have the correct shape and the expected data type. order ("C","F","A","K", None, optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -1166,9 +1298,9 @@ def dpnp_floor_divide(x1, x2, out=None, order="K"): Second input array, also expected to have numeric data type. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. + Array must have the correct shape and the expected data type. order ("C","F","A","K", None, optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -1206,9 +1338,9 @@ def dpnp_greater(x1, x2, out=None, order="K"): Second input array, also expected to have numeric data type. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. + Array must have the correct shape and the expected data type. order ("C","F","A","K", None, optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -1248,9 +1380,9 @@ def dpnp_greater_equal(x1, x2, out=None, order="K"): Second input array, also expected to have a real-valued data type. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. + Array must have the correct shape and the expected data type. order ("C","F","A","K", None, optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -1291,9 +1423,9 @@ def dpnp_hypot(x1, x2, out=None, order="K"): Input array, expected to have numeric data type. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. - order ("C","F","A","K", optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -1329,8 +1461,8 @@ def dpnp_imag(x, out=None, order="K"): out ({None, dpnp.ndarray}, optional): Output array to populate. Array must have the correct shape and the expected data type. - order ("C","F","A","K", optional): memory layout of the new - output array, if parameter `out` is `None`. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Return: dpnp.ndarray: @@ -1363,9 +1495,9 @@ def dpnp_invert(x, out=None, order="K"): Input array, expected to have numeric data type. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. - order ("C","F","A","K", optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -1398,9 +1530,9 @@ def dpnp_isfinite(x, out=None, order="K"): Input array, expected to have numeric data type. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. - order ("C","F","A","K", optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -1432,9 +1564,9 @@ def dpnp_isinf(x, out=None, order="K"): Input array, expected to have numeric data type. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. - order ("C","F","A","K", optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -1471,9 +1603,9 @@ def dpnp_isnan(x, out=None, order="K"): Each element must be greater than or equal to 0. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. - order ("C","F","A","K", optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -1513,9 +1645,9 @@ def dpnp_left_shift(x1, x2, out=None, order="K"): Second input array, also expected to have numeric data type. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. + Array must have the correct shape and the expected data type. order ("C","F","A","K", None, optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -1553,9 +1685,9 @@ def dpnp_less(x1, x2, out=None, order="K"): Second input array, also expected to have numeric data type. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. + Array must have the correct shape and the expected data type. order ("C","F","A","K", None, optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -1591,10 +1723,10 @@ def dpnp_less_equal(x1, x2, out=None, order="K"): x (dpnp.ndarray): Input array, expected to have numeric data type. out ({None, dpnp.ndarray}, optional): - Output array to populate. Array must have the correct - shape and the expected data type. - order ("C","F","A","K", optional): memory layout of the new - output array, if parameter `out` is `None`. + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Return: dpnp.ndarray: @@ -1632,10 +1764,10 @@ def dpnp_log(x, out=None, order="K"): x (dpnp.ndarray): Input array, expected to have numeric data type. out ({None, dpnp.ndarray}, optional): - Output array to populate. Array must have the correct - shape and the expected data type. - order ("C","F","A","K", optional): memory layout of the new - output array, if parameter `out` is `None`. + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Return: dpnp.ndarray: @@ -1673,10 +1805,10 @@ def dpnp_log10(x, out=None, order="K"): x (dpnp.ndarray): Input array, expected to have numeric data type. out ({None, dpnp.ndarray}, optional): - Output array to populate. Array must have the correct - shape and the expected data type. - order ("C","F","A","K", optional): memory layout of the new - output array, if parameter `out` is `None`. + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Return: dpnp.ndarray: @@ -1713,10 +1845,10 @@ def dpnp_log1p(x, out=None, order="K"): x (dpnp.ndarray): Input array, expected to have numeric data type. out ({None, dpnp.ndarray}, optional): - Output array to populate. Array must have the correct - shape and the expected data type. - order ("C","F","A","K", optional): memory layout of the new - output array, if parameter `out` is `None`. + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Return: dpnp.ndarray: @@ -1764,9 +1896,9 @@ def dpnp_log2(x, out=None, order="K"): floating-point data type. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. + Array must have the correct shape and the expected data type. order ("C","F","A","K", None, optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -1806,9 +1938,9 @@ def dpnp_logaddexp(x1, x2, out=None, order="K"): Second input array. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. - order ("C","F","A","K", optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -1843,10 +1975,10 @@ def dpnp_logical_and(x1, x2, out=None, order="K"): x (dpnp.ndarray): Input array. out ({None, dpnp.ndarray}, optional): - Output array to populate. Array must have the correct - shape and the expected data type. - order ("C","F","A","K", optional): memory layout of the new - output array, if parameter `out` is `None`. + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Return: dpnp.ndarray: @@ -1882,9 +2014,9 @@ def dpnp_logical_not(x, out=None, order="K"): Second input array. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. - order ("C","F","A","K", optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -1923,9 +2055,9 @@ def dpnp_logical_or(x1, x2, out=None, order="K"): Second input array. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. - order ("C","F","A","K", optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -1964,9 +2096,9 @@ def dpnp_logical_xor(x1, x2, out=None, order="K"): Second input array, also expected to have numeric data type. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. - order ("C","F","A","K", optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -2004,9 +2136,9 @@ def dpnp_maximum(x1, x2, out=None, order="K"): Second input array, also expected to have numeric data type. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. - order ("C","F","A","K", optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -2044,9 +2176,9 @@ def dpnp_minimum(x1, x2, out=None, order="K"): Second input array, also expected to have numeric data type. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. + Array must have the correct shape and the expected data type. order ("C","F","A","K", None, optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -2091,9 +2223,9 @@ def dpnp_multiply(x1, x2, out=None, order="K"): Input array, expected to have numeric data type. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. - order ("C","F","A","K", optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -2134,9 +2266,9 @@ def dpnp_negative(x, out=None, order="K"): Second input array, also expected to have numeric data type. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. + Array must have the correct shape and the expected data type. order ("C","F","A","K", None, optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -2173,9 +2305,9 @@ def dpnp_not_equal(x1, x2, out=None, order="K"): Input array, expected to have numeric data type. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. - order ("C","F","A","K", optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -2217,7 +2349,7 @@ def dpnp_positive(x, out=None, order="K"): Output array to populate. Array must have the correct shape and the expected data type. order ("C","F","A","K", None, optional): - Output array, if parameter `out` is `None`. + Output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -2259,9 +2391,9 @@ def dpnp_power(x1, x2, out=None, order="K"): Input array, expected to have numeric data type. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. - order ("C","F","A","K", optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -2293,9 +2425,9 @@ def dpnp_proj(x, out=None, order="K"): Input array, expected to have numeric data type. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. - order ("C","F","A","K", optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -2334,9 +2466,9 @@ def dpnp_real(x, out=None, order="K"): Second input array, also expected to have a real-valued data type. out ({None, usm_ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. - order ("C","F","A","K", optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -2375,9 +2507,9 @@ def dpnp_remainder(x1, x2, out=None, order="K"): Each element must be greater than or equal to 0. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. - order ("C","F","A","K", optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -2414,10 +2546,10 @@ def dpnp_right_shift(x1, x2, out=None, order="K"): x (dpnp.ndarray): Input array, expected to have numeric data type. out ({None, dpnp.ndarray}, optional): - Output array to populate. Array must have the correct - shape and the expected data type. - order ("C","F","A","K", optional): memory layout of the new - output array, if parameter `out` is `None`. + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Return: dpnp.ndarray: @@ -2444,6 +2576,41 @@ def dpnp_round(x, out=None, order="K"): return dpnp_array._create_from_usm_ndarray(res_usm) +_rsqrt_docstring = """ +rsqrt(x, out=None, order="K") + +Computes the reciprocal square-root for each element `x_i` for input array `x`. + +Args: + x (dpnp.ndarray): + Input array, expected to have a real floating-point data type. + out ({None, dpnp.ndarray}, optional): + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. + Default: "K". +Returns: + dpnp.ndarray: + An array containing the element-wise reciprocal square-root. + The data type of the returned array is determined by + the Type Promotion Rules. +""" + +rsqrt_func = _make_unary_func("rsqrt", dpt.rsqrt, _rsqrt_docstring) + + +def dpnp_rsqrt(x, out=None, order="K"): + """Invokes rsqrt() from dpctl.tensor implementation for rsqrt() function.""" + + # dpctl.tensor only works with usm_ndarray + x1_usm = dpnp.get_usm_ndarray(x) + out_usm = None if out is None else dpnp.get_usm_ndarray(out) + + res_usm = rsqrt_func(x1_usm, out=out_usm, order=order) + return dpnp_array._create_from_usm_ndarray(res_usm) + + _sign_docstring = """ sign(x, out=None, order="K") @@ -2458,9 +2625,9 @@ def dpnp_round(x, out=None, order="K"): Input array, expected to have numeric data type. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. - order ("C","F","A","K", optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -2497,9 +2664,9 @@ def dpnp_sign(x, out=None, order="K"): Input array, expected to have numeric data type. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. - order ("C","F","A","K", optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -2530,10 +2697,10 @@ def dpnp_signbit(x, out=None, order="K"): x (dpnp.ndarray): Input array, expected to have numeric data type. out ({None, dpnp.ndarray}, optional): - Output array to populate. Array must have the correct - shape and the expected data type. - order ("C","F","A","K", optional): memory layout of the new - output array, if parameter `out` is `None`. + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Return: dpnp.ndarray: @@ -2570,10 +2737,10 @@ def dpnp_sin(x, out=None, order="K"): x (dpnp.ndarray): Input array, expected to have numeric data type. out ({None, dpnp.ndarray}, optional): - Output array to populate. Array must have the correct - shape and the expected data type. - order ("C","F","A","K", optional): memory layout of the new - output array, if parameter `out` is `None`. + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Return: dpnp.ndarray: @@ -2610,10 +2777,10 @@ def dpnp_sinh(x, out=None, order="K"): x (dpnp.ndarray): Input array. out ({None, dpnp.ndarray}, optional): - Output array to populate. Array must have the correct - shape and the expected data type. - order ("C","F","A","K", optional): memory layout of the new - output array, if parameter `out` is `None`. + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Return: dpnp.ndarray: @@ -2649,10 +2816,10 @@ def dpnp_sqrt(x, out=None, order="K"): x (dpnp.ndarray): Input array. out ({None, dpnp.ndarray}, optional): - Output array to populate. Array must have the correct - shape and the expected data type. - order ("C","F","A","K", optional): memory layout of the new - output array, if parameter `out` is `None`. + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Return: dpnp.ndarray: @@ -2692,9 +2859,9 @@ def dpnp_square(x, out=None, order="K"): Second input array, also expected to have numeric data type. out ({None, dpnp.ndarray}, optional): Output array to populate. - Array have the correct shape and the expected data type. + Array must have the correct shape and the expected data type. order ("C","F","A","K", None, optional): - Memory layout of the newly output array, if parameter `out` is `None`. + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Returns: dpnp.ndarray: @@ -2749,10 +2916,10 @@ def dpnp_subtract(x1, x2, out=None, order="K"): x (dpnp.ndarray): Input array, expected to have numeric data type. out ({None, dpnp.ndarray}, optional): - Output array to populate. Array must have the correct - shape and the expected data type. - order ("C","F","A","K", optional): memory layout of the new - output array, if parameter `out` is `None`. + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Return: dpnp.ndarray: @@ -2789,10 +2956,10 @@ def dpnp_tan(x, out=None, order="K"): x (dpnp.ndarray): Input array, expected to have numeric data type. out ({None, dpnp.ndarray}, optional): - Output array to populate. Array must have the correct - shape and the expected data type. - order ("C","F","A","K", optional): memory layout of the new - output array, if parameter `out` is `None`. + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Return: dpnp.ndarray: @@ -2832,10 +2999,10 @@ def dpnp_tanh(x, out=None, order="K"): x (dpnp.ndarray): Input array, expected to have a real-valued data type. out ({None, dpnp.ndarray}, optional): - Output array to populate. Array must have the correct - shape and the expected data type. - order ("C","F","A","K", optional): memory layout of the new - output array, if parameter `out` is `None`. + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C", "F", "A", "K", optional): + Memory layout of the newly output array, if parameter `out` is ``None``. Default: "K". Return: dpnp.ndarray: diff --git a/dpnp/dpnp_iface_mathematical.py b/dpnp/dpnp_iface_mathematical.py index d9e0155ca118..cacab84510bc 100644 --- a/dpnp/dpnp_iface_mathematical.py +++ b/dpnp/dpnp_iface_mathematical.py @@ -55,6 +55,7 @@ dpnp_add, dpnp_ceil, dpnp_conj, + dpnp_copysign, dpnp_divide, dpnp_floor, dpnp_floor_divide, @@ -432,13 +433,36 @@ def convolve(a, v, mode="full"): def copysign( - x1, x2, /, out=None, *, where=True, dtype=None, subok=True, **kwargs + x1, + x2, + /, + out=None, + *, + where=True, + order="K", + dtype=None, + subok=True, + **kwargs, ): """ Change the sign of `x1` to that of `x2`, element-wise. For full documentation refer to :obj:`numpy.copysign`. + Parameters + ---------- + x1 : {dpnp.ndarray, usm_ndarray} + First input array, expected to have a real floating-point data type. + x2 : {dpnp.ndarray, usm_ndarray} + Second input array, also expected to have a real floating-point data + type. + out : ({None, dpnp.ndarray, usm_ndarray}, optional): + Output array to populate. + Array must have the correct shape and the expected data type. + order : ({'C', 'F', 'A', 'K'}, optional): + Memory layout of the newly output array, if parameter `out` is `None`. + Default: "K". + Returns ------- out : dpnp.ndarray @@ -451,7 +475,12 @@ def copysign( Parameters `where`, `dtype` and `subok` are supported with their default values. Keyword argument `kwargs` is currently unsupported. Otherwise the function will be executed sequentially on CPU. - Input array data types are limited by supported DPNP :ref:`Data types`. + Input array data types are limited by supported real data types. + + See Also + -------- + :obj:`dpnp.negative` : Return the numerical negative of each element of `x`. + :obj:`dpnp.positive` : Return the numerical positive of each element of `x`. Examples -------- @@ -471,60 +500,17 @@ def copysign( """ - if kwargs: - pass - elif where is not True: - pass - elif dtype is not None: - pass - elif subok is not True: - pass - elif dpnp.isscalar(x1) and dpnp.isscalar(x2): - # at least either x1 or x2 has to be an array - pass - else: - # get USM type and queue to copy scalar from the host memory into a USM allocation - usm_type, queue = ( - get_usm_allocations([x1, x2]) - if dpnp.isscalar(x1) or dpnp.isscalar(x2) - else (None, None) - ) - - x1_desc = dpnp.get_dpnp_descriptor( - x1, - copy_when_strides=False, - copy_when_nondefault_queue=False, - alloc_usm_type=usm_type, - alloc_queue=queue, - ) - x2_desc = dpnp.get_dpnp_descriptor( - x2, - copy_when_strides=False, - copy_when_nondefault_queue=False, - alloc_usm_type=usm_type, - alloc_queue=queue, - ) - if x1_desc and x2_desc: - if out is not None: - if not dpnp.is_supported_array_type(out): - raise TypeError( - "return array must be of supported array type" - ) - out_desc = ( - dpnp.get_dpnp_descriptor( - out, copy_when_nondefault_queue=False - ) - or None - ) - else: - out_desc = None - - return dpnp_copysign( - x1_desc, x2_desc, dtype=dtype, out=out_desc, where=where - ).get_pyobj() - - return call_origin( - numpy.copysign, x1, x2, dtype=dtype, out=out, where=where, **kwargs + return check_nd_call_func( + numpy.copysign, + dpnp_copysign, + x1, + x2, + out=out, + where=where, + order=order, + dtype=dtype, + subok=subok, + **kwargs, ) diff --git a/dpnp/dpnp_iface_trigonometric.py b/dpnp/dpnp_iface_trigonometric.py index 608639f0030e..5b6447831dfa 100644 --- a/dpnp/dpnp_iface_trigonometric.py +++ b/dpnp/dpnp_iface_trigonometric.py @@ -55,9 +55,11 @@ dpnp_atan, dpnp_atan2, dpnp_atanh, + dpnp_cbrt, dpnp_cos, dpnp_cosh, dpnp_exp, + dpnp_exp2, dpnp_expm1, dpnp_hypot, dpnp_log, @@ -65,6 +67,7 @@ dpnp_log2, dpnp_log10, dpnp_logaddexp, + dpnp_rsqrt, dpnp_sin, dpnp_sinh, dpnp_sqrt, @@ -98,6 +101,7 @@ "rad2deg", "radians", "reciprocal", + "rsqrt", "sin", "sinh", "sqrt", @@ -532,34 +536,70 @@ def arctanh( ) -def cbrt(x1): +def cbrt( + x, + /, + out=None, + *, + order="K", + where=True, + dtype=None, + subok=True, + **kwargs, +): """ Return the cube-root of an array, element-wise. For full documentation refer to :obj:`numpy.cbrt`. + Parameters + ---------- + x : {dpnp.ndarray, usm_ndarray} + Input array, expected to have a real-valued data type. + out : ({None, dpnp.ndarray, usm_ndarray}, optional): + Output array to populate. + Array must have the correct shape and the expected data type. + order : ({'C', 'F', 'A', 'K'}, optional): + Memory layout of the newly output array, if parameter `out` is `None`. + Default: "K". + + Returns + ------- + out : dpnp.ndarray + The cube-root of each element in `x`. + Limitations ----------- - Input array is supported as :class:`dpnp.ndarray`. - Input array data types are limited by supported DPNP :ref:`Data types`. + Parameter `x` is only supported as either :class:`dpnp.ndarray` or :class:`dpctl.tensor.usm_ndarray`. + Parameters `where`, `dtype` and `subok` are supported with their default values. + Keyword argument `kwargs` is currently unsupported. + Otherwise the function will be executed sequentially on CPU. + Input array data types are limited by real-valued data types. + + See Also + -------- + :obj:`dpnp.sqrt` : Return the positive square-root of an array, element-wise. Examples -------- >>> import dpnp as np >>> x = np.array([1, 8, 27]) - >>> out = np.cbrt(x) - >>> [i for i in out] - [1.0, 2.0, 3.0] + >>> np.cbrt(x) + array([1., 2., 3.]) """ - x1_desc = dpnp.get_dpnp_descriptor( - x1, copy_when_strides=False, copy_when_nondefault_queue=False + return check_nd_call_func( + numpy.cbrt, + dpnp_cbrt, + x, + out=out, + where=where, + order=order, + dtype=dtype, + subok=subok, + **kwargs, ) - if x1_desc: - return dpnp_cbrt(x1_desc).get_pyobj() - - return call_origin(numpy.cbrt, x1, **kwargs) def cos( @@ -787,39 +827,72 @@ def exp( ) -def exp2(x1): +def exp2( + x, + /, + out=None, + *, + order="K", + where=True, + dtype=None, + subok=True, + **kwargs, +): """ Calculate `2**p` for all `p` in the input array. For full documentation refer to :obj:`numpy.exp2`. + Parameters + ---------- + x : {dpnp.ndarray, usm_ndarray} + Input array, expected to have a floating-point data type. + out : ({None, dpnp.ndarray, usm_ndarray}, optional): + Output array to populate. + Array must have the correct shape and the expected data type. + order : ({'C', 'F', 'A', 'K'}, optional): + Memory layout of the newly output array, if parameter `out` is `None`. + Default: "K". + + Returns + ------- + out : dpnp.ndarray + Element-wise 2 to the power `x`. + Limitations ----------- - Input array is supported as :obj:`dpnp.ndarray`. + Parameter `x` is only supported as either :class:`dpnp.ndarray` or :class:`dpctl.tensor.usm_ndarray`. + Parameters `where`, `dtype` and `subok` are supported with their default values. + Keyword argument `kwargs` is currently unsupported. + Otherwise the function will be executed sequentially on CPU. Input array data types are limited by supported DPNP :ref:`Data types`. See Also -------- - :obj:`dpnp.power` : First array elements raised to powers from - second array, element-wise. + :obj:`dpnp.exp` : Calculate exponential for all elements in the array. + :obj:`dpnp.expm1` : ``exp(x) - 1``, the inverse of :obj:`dpnp.log1p`. + :obj:`dpnp.power` : First array elements raised to powers from second array, element-wise. Examples -------- >>> import dpnp as np >>> x = np.arange(3.) - >>> out = np.exp2(x) - >>> [i for i in out] - [1.0, 2.0, 4.0] + >>> np.exp2(x) + array([1., 2., 4.]) """ - x1_desc = dpnp.get_dpnp_descriptor( - x1, copy_when_strides=False, copy_when_nondefault_queue=False + return check_nd_call_func( + numpy.exp2, + dpnp_exp2, + x, + out=out, + where=where, + order=order, + dtype=dtype, + subok=subok, + **kwargs, ) - if x1_desc: - return dpnp_exp2(x1_desc).get_pyobj() - - return call_origin(numpy.exp2, x1) def expm1( @@ -1290,6 +1363,71 @@ def reciprocal(x1, **kwargs): return call_origin(numpy.reciprocal, x1, **kwargs) +def rsqrt( + x, + /, + out=None, + *, + order="K", + where=True, + dtype=None, + subok=True, + **kwargs, +): + """ + Computes the reciprocal square-root for each element `x_i` for input array `x`. + + Parameters + ---------- + x : {dpnp.ndarray, usm_ndarray} + Input array, expected to have a real floating-point data type. + out : ({None, dpnp.ndarray, usm_ndarray}, optional): + Output array to populate. + Array must have the correct shape and the expected data type. + order : ({'C', 'F', 'A', 'K'}, optional): + Memory layout of the newly output array, if parameter `out` is `None`. + Default: "K" + + Returns + ------- + out : dpnp.ndarray + The reciprocal square-root, element-wise. + + Limitations + ----------- + Parameter `x` is only supported as either :class:`dpnp.ndarray` or :class:`dpctl.tensor.usm_ndarray`. + Parameters `where`, `dtype` and `subok` are supported with their default values. + Keyword argument `kwargs` is currently unsupported. + Otherwise the function will be executed sequentially on CPU. + Input array data types are limited by real-valued data types. + + See Also + -------- + :obj:`dpnp.sqrt` : Return the positive square-root of an array, element-wise. + :obj:`dpnp.reciprocal` : Return the reciprocal of an array, element-wise. + + Examples + -------- + >>> import dpnp as np + >>> x = np.array([1, 8, 27]) + >>> np.rsqrt(x) + array([1. , 0.35355338, 0.19245009]) + + """ + + return check_nd_call_func( + None, + dpnp_rsqrt, + x, + out=out, + where=where, + order=order, + dtype=dtype, + subok=subok, + **kwargs, + ) + + def rad2deg(x1): """ Convert angles from radians to degrees. @@ -1492,6 +1630,11 @@ def sqrt( Otherwise the function will be executed sequentially on CPU. Input array data types are limited by supported DPNP :ref:`Data types`. + See Also + -------- + :obj:`dpnp.cbrt` : Return the cube-root of an array, element-wise. + :obj:`dpnp.rsqrt` : Return the reciprocal square-root of an array, element-wise. + Examples -------- >>> import dpnp as np diff --git a/tests/skipped_tests.tbl b/tests/skipped_tests.tbl index 05acfa51b740..d32f1ee78c0c 100644 --- a/tests/skipped_tests.tbl +++ b/tests/skipped_tests.tbl @@ -73,9 +73,7 @@ tests/test_linalg.py::test_norm1[None-3-[7]] tests/test_linalg.py::test_norm1[None-3-[1, 2]] tests/test_linalg.py::test_norm1[None-3-[1, 0]] -tests/test_strides.py::test_strides_1arg[(10,)-None-cbrt] tests/test_strides.py::test_strides_1arg[(10,)-None-degrees] -tests/test_strides.py::test_strides_1arg[(10,)-None-exp2] tests/test_strides.py::test_strides_1arg[(10,)-None-fabs] tests/test_strides.py::test_strides_1arg[(10,)-None-radians] tests/test_strides.py::test_strides_erf[(10,)-None] @@ -409,7 +407,6 @@ tests/third_party/cupy/math_tests/test_arithmetic.py::TestArithmeticRaisesWithNu tests/third_party/cupy/math_tests/test_explog.py::TestExplog::test_logaddexp2 tests/third_party/cupy/math_tests/test_explog.py::TestExplog::test_logaddexp2_infinities -tests/third_party/cupy/math_tests/test_floating.py::TestFloating::test_copysign_float tests/third_party/cupy/math_tests/test_floating.py::TestFloating::test_frexp tests/third_party/cupy/math_tests/test_floating.py::TestFloating::test_ldexp tests/third_party/cupy/math_tests/test_floating.py::TestFloating::test_nextafter_combination @@ -446,7 +443,6 @@ tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_external_clip1 tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_external_clip2 tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_external_clip3 tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_clip2 -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_cbrt tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_fabs tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_fabs_negative tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_nan_to_num_scalar_nan diff --git a/tests/skipped_tests_gpu.tbl b/tests/skipped_tests_gpu.tbl index 77c23e454175..c5cf53b2a71c 100644 --- a/tests/skipped_tests_gpu.tbl +++ b/tests/skipped_tests_gpu.tbl @@ -504,7 +504,6 @@ tests/third_party/cupy/math_tests/test_arithmetic.py::TestArithmeticRaisesWithNu tests/third_party/cupy/math_tests/test_explog.py::TestExplog::test_logaddexp2 tests/third_party/cupy/math_tests/test_explog.py::TestExplog::test_logaddexp2_infinities -tests/third_party/cupy/math_tests/test_floating.py::TestFloating::test_copysign_float tests/third_party/cupy/math_tests/test_floating.py::TestFloating::test_frexp tests/third_party/cupy/math_tests/test_floating.py::TestFloating::test_ldexp tests/third_party/cupy/math_tests/test_floating.py::TestFloating::test_nextafter_combination @@ -541,7 +540,6 @@ tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_external_clip1 tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_external_clip2 tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_external_clip3 tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_clip2 -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_cbrt tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_fabs tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_fabs_negative tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_nan_to_num_scalar_nan diff --git a/tests/skipped_tests_gpu_no_fp64.tbl b/tests/skipped_tests_gpu_no_fp64.tbl index 8d197a8d28cf..0e043ee7452b 100644 --- a/tests/skipped_tests_gpu_no_fp64.tbl +++ b/tests/skipped_tests_gpu_no_fp64.tbl @@ -15,26 +15,18 @@ tests/test_mathematical.py::TestGradient::test_gradient_y1_dx[3.5-array0] tests/test_mathematical.py::TestGradient::test_gradient_y1_dx[3.5-array1] tests/test_mathematical.py::TestGradient::test_gradient_y1_dx[3.5-array2] -tests/test_strides.py::test_strides_1arg[(10,)-int32-cbrt] tests/test_strides.py::test_strides_1arg[(10,)-int32-degrees] -tests/test_strides.py::test_strides_1arg[(10,)-int32-exp2] tests/test_strides.py::test_strides_1arg[(10,)-int32-fabs] tests/test_strides.py::test_strides_1arg[(10,)-int32-radians] -tests/test_strides.py::test_strides_1arg[(10,)-int64-cbrt] tests/test_strides.py::test_strides_1arg[(10,)-int64-degrees] -tests/test_strides.py::test_strides_1arg[(10,)-int64-exp2] tests/test_strides.py::test_strides_1arg[(10,)-int64-fabs] tests/test_strides.py::test_strides_1arg[(10,)-int64-radians] -tests/test_strides.py::test_strides_1arg[(10,)-None-cbrt] tests/test_strides.py::test_strides_1arg[(10,)-None-degrees] -tests/test_strides.py::test_strides_1arg[(10,)-None-exp2] tests/test_strides.py::test_strides_1arg[(10,)-None-fabs] tests/test_strides.py::test_strides_1arg[(10,)-None-radians] tests/test_umath.py::test_umaths[('floor_divide', 'ff')] -tests/test_umath.py::TestSqrt::test_sqrt_complex[complex64] - tests/third_party/cupy/linalg_tests/test_eigenvalue.py::TestEigenvalue_param_0_{UPLO='U'}::test_eigh_batched tests/third_party/cupy/linalg_tests/test_eigenvalue.py::TestEigenvalue_param_1_{UPLO='L'}::test_eigh_batched diff --git a/tests/test_strides.py b/tests/test_strides.py index 098ff53f1e0b..071c92a8a35c 100644 --- a/tests/test_strides.py +++ b/tests/test_strides.py @@ -98,6 +98,20 @@ def test_strides_1arg(func_name, dtype, shape): assert_allclose(result, expected, rtol=1e-06) +@pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True, no_complex=True)) +def test_strides_rsqrt(dtype): + a = numpy.arange(1, 11, dtype=dtype) + b = a[::2] + + dpa = dpnp.arange(1, 11, dtype=dtype) + dpb = dpa[::2] + + result = dpnp.rsqrt(dpb) + expected = 1 / numpy.sqrt(b) + + assert_allclose(result, expected, rtol=1e-06) + + @pytest.mark.parametrize( "func_name", [ diff --git a/tests/test_sycl_queue.py b/tests/test_sycl_queue.py index 43d89a27cb8e..6de86cfd915f 100644 --- a/tests/test_sycl_queue.py +++ b/tests/test_sycl_queue.py @@ -340,6 +340,7 @@ def test_meshgrid(device_x, device_y): pytest.param("arctanh", [-0.5, 0.0, 0.5]), pytest.param("argmax", [1.0, 2.0, 4.0, 7.0]), pytest.param("argmin", [1.0, 2.0, 4.0, 7.0]), + pytest.param("cbrt", [1.0, 8.0, 27.0]), pytest.param("ceil", [-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]), pytest.param("conjugate", [[1.0 + 1.0j, 0.0], [0.0, 1.0 + 1.0j]]), pytest.param("copy", [1.0, 2.0, 3.0]), @@ -353,6 +354,7 @@ def test_meshgrid(device_x, device_y): pytest.param("diff", [1.0, 2.0, 4.0, 7.0, 0.0]), pytest.param("ediff1d", [1.0, 2.0, 4.0, 7.0, 0.0]), pytest.param("exp", [1.0, 2.0, 4.0, 7.0]), + pytest.param("exp2", [0.0, 1.0, 2.0]), pytest.param("expm1", [1.0e-10, 1.0, 2.0, 4.0, 7.0]), pytest.param("fabs", [-1.2, 1.2]), pytest.param("floor", [-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]), @@ -443,6 +445,23 @@ def test_proj(device): assert_sycl_queue_equal(result_queue, expected_queue) +@pytest.mark.parametrize( + "device", + valid_devices, + ids=[device.filter_string for device in valid_devices], +) +def test_rsqrt(device): + X = [1.0, 8.0, 27.0] + x = dpnp.array(X, device=device) + result = dpnp.rsqrt(x) + expected = 1 / numpy.sqrt(x.asnumpy()) + assert_allclose(result, expected) + + expected_queue = x.get_array().sycl_queue + result_queue = result.get_array().sycl_queue + assert_sycl_queue_equal(result_queue, expected_queue) + + @pytest.mark.parametrize( "func,data1,data2", [ @@ -452,15 +471,9 @@ def test_proj(device): [0.0, 1.0, 2.0, 0.0, 1.0, 2.0, 0.0, 1.0, 2.0], ), pytest.param( - "allclose", - [1.0, dpnp.inf, -dpnp.inf], - [1.0, dpnp.inf, -dpnp.inf], - ), - pytest.param( - "arctan2", - [[-1, +1, +1, -1]], - [[-1, -1, +1, +1]], + "allclose", [1.0, dpnp.inf, -dpnp.inf], [1.0, dpnp.inf, -dpnp.inf] ), + pytest.param("arctan2", [[-1, +1, +1, -1]], [[-1, -1, +1, +1]]), pytest.param("copysign", [0.0, 1.0, 2.0], [-1.0, 0.0, 1.0]), pytest.param("cross", [1.0, 2.0, 3.0], [4.0, 5.0, 6.0]), pytest.param( @@ -482,15 +495,9 @@ def test_proj(device): [2.0, 2.0, 2.0, 2.0, 2.0, 2.0], ), pytest.param( - "hypot", - [[1.0, 2.0, 3.0, 4.0]], - [[-1.0, -2.0, -4.0, -5.0]], - ), - pytest.param( - "logaddexp", - [[-1, 2, 5, 9]], - [[4, -3, 2, -8]], + "hypot", [[1.0, 2.0, 3.0, 4.0]], [[-1.0, -2.0, -4.0, -5.0]] ), + pytest.param("logaddexp", [[-1, 2, 5, 9]], [[4, -3, 2, -8]]), pytest.param( "matmul", [[1.0, 0.0], [0.0, 1.0]], [[4.0, 1.0], [1.0, 2.0]] ), diff --git a/tests/test_umath.py b/tests/test_umath.py index 2b0db66ec0d7..35955c935bcb 100644 --- a/tests/test_umath.py +++ b/tests/test_umath.py @@ -577,6 +577,167 @@ def test_invalid_shape(self, shape, dtype): dpnp.exp(dp_array, out=dp_out) +class TestExp2: + @pytest.mark.parametrize( + "dtype", get_all_dtypes(no_bool=True, no_complex=True) + ) + def test_exp2(self, dtype): + np_array = numpy.arange(7, dtype=dtype) + np_out = numpy.empty(7, dtype=numpy.float64) + + # DPNP + dp_out_dtype = dpnp.float32 + if has_support_aspect64() and dtype != dpnp.float32: + dp_out_dtype = dpnp.float64 + + dp_array = dpnp.array(np_array, dtype=dp_out_dtype) + dp_out = dpnp.array(np_out, dtype=dp_out_dtype) + result = dpnp.exp2(dp_array, out=dp_out) + + # original + expected = numpy.exp2(np_array, out=np_out) + + tol = dpnp.finfo(dtype=result.dtype).resolution + assert_allclose(expected, result.asnumpy(), rtol=tol) + + @pytest.mark.parametrize("dtype", get_complex_dtypes()) + def test_exp2_complex(self, dtype): + x1 = numpy.linspace(0, 8, num=10) + x2 = numpy.linspace(0, 6, num=10) + Xnp = x1 + 1j * x2 + np_array = numpy.asarray(Xnp, dtype=dtype) + np_out = numpy.empty(10, dtype=numpy.complex128) + + # DPNP + dp_out_dtype = dpnp.complex64 + if has_support_aspect64() and dtype != dpnp.complex64: + dp_out_dtype = dpnp.complex128 + + dp_array = dpnp.array(np_array, dtype=dp_out_dtype) + dp_out = dpnp.array(np_out, dtype=dp_out_dtype) + result = dpnp.exp2(dp_array, out=dp_out) + + # original + expected = numpy.exp2(np_array, out=np_out) + + tol = dpnp.finfo(dtype=result.dtype).resolution + assert_allclose(expected, result.asnumpy(), rtol=tol) + + @pytest.mark.parametrize( + "dtype", get_all_dtypes(no_complex=True, no_none=True)[:-1] + ) + def test_invalid_dtype(self, dtype): + dpnp_dtype = get_all_dtypes(no_complex=True, no_none=True)[-1] + dp_array = dpnp.arange(10, dtype=dpnp_dtype) + dp_out = dpnp.empty(10, dtype=dtype) + + with pytest.raises(TypeError): + dpnp.exp2(dp_array, out=dp_out) + + @pytest.mark.parametrize("dtype", get_float_dtypes()) + @pytest.mark.parametrize( + "shape", [(0,), (15,), (2, 2)], ids=["(0,)", "(15, )", "(2,2)"] + ) + def test_invalid_shape(self, shape, dtype): + dp_array = dpnp.arange(10, dtype=dtype) + dp_out = dpnp.empty(shape, dtype=dtype) + + with pytest.raises(ValueError): + dpnp.exp2(dp_array, out=dp_out) + + +class TestCbrt: + @pytest.mark.parametrize( + "dtype", get_all_dtypes(no_bool=True, no_complex=True) + ) + def test_cbrt(self, dtype): + np_array = numpy.arange(7, dtype=dtype) + np_out = numpy.empty(7, dtype=numpy.float64) + + # DPNP + dp_out_dtype = dpnp.float32 + if has_support_aspect64() and dtype != dpnp.float32: + dp_out_dtype = dpnp.float64 + + dp_array = dpnp.array(np_array, dtype=dp_out_dtype) + dp_out = dpnp.array(np_out, dtype=dp_out_dtype) + result = dpnp.cbrt(dp_array, out=dp_out) + + # original + expected = numpy.cbrt(np_array, out=np_out) + + tol = dpnp.finfo(dtype=result.dtype).resolution + assert_allclose(expected, result.asnumpy(), rtol=tol) + + @pytest.mark.parametrize( + "dtype", get_all_dtypes(no_complex=True, no_none=True)[:-1] + ) + def test_invalid_dtype(self, dtype): + dpnp_dtype = get_all_dtypes(no_complex=True, no_none=True)[-1] + dp_array = dpnp.arange(10, dtype=dpnp_dtype) + dp_out = dpnp.empty(10, dtype=dtype) + + with pytest.raises(TypeError): + dpnp.cbrt(dp_array, out=dp_out) + + @pytest.mark.parametrize("dtype", get_float_dtypes()) + @pytest.mark.parametrize( + "shape", [(0,), (15,), (2, 2)], ids=["(0,)", "(15, )", "(2,2)"] + ) + def test_invalid_shape(self, shape, dtype): + dp_array = dpnp.arange(10, dtype=dtype) + dp_out = dpnp.empty(shape, dtype=dtype) + + with pytest.raises(ValueError): + dpnp.cbrt(dp_array, out=dp_out) + + +class TestRsqrt: + @pytest.mark.parametrize( + "dtype", get_all_dtypes(no_bool=True, no_complex=True) + ) + def test_rsqrt(self, dtype): + np_array = numpy.arange(1, 10, dtype=dtype) + np_out = numpy.empty(9, dtype=numpy.float64) + + # DPNP + dp_out_dtype = dpnp.float32 + if has_support_aspect64() and dtype != dpnp.float32: + dp_out_dtype = dpnp.float64 + + dp_array = dpnp.array(np_array, dtype=dp_out_dtype) + dp_out = dpnp.array(np_out, dtype=dp_out_dtype) + result = dpnp.rsqrt(dp_array, out=dp_out) + + # original + expected = numpy.reciprocal(numpy.sqrt(np_array), out=np_out) + + tol = dpnp.finfo(dtype=result.dtype).resolution + assert_allclose(expected, result.asnumpy(), rtol=tol) + + @pytest.mark.parametrize( + "dtype", get_all_dtypes(no_complex=True, no_none=True)[:-1] + ) + def test_invalid_dtype(self, dtype): + dpnp_dtype = get_all_dtypes(no_complex=True, no_none=True)[-1] + dp_array = dpnp.arange(10, dtype=dpnp_dtype) + dp_out = dpnp.empty(10, dtype=dtype) + + with pytest.raises(TypeError): + dpnp.rsqrt(dp_array, out=dp_out) + + @pytest.mark.parametrize("dtype", get_float_dtypes()) + @pytest.mark.parametrize( + "shape", [(0,), (15,), (2, 2)], ids=["(0,)", "(15, )", "(2,2)"] + ) + def test_invalid_shape(self, shape, dtype): + dp_array = dpnp.arange(10, dtype=dtype) + dp_out = dpnp.empty(shape, dtype=dtype) + + with pytest.raises(ValueError): + dpnp.rsqrt(dp_array, out=dp_out) + + class TestArccos: @pytest.mark.parametrize("dtype", get_float_dtypes()) @pytest.mark.usefixtures("suppress_invalid_numpy_warnings") @@ -917,6 +1078,55 @@ def test_invalid_shape(self, shape, dtype): dpnp.arctan2(dp_array, dp_array, out=dp_out) +class TestCopySign: + @pytest.mark.parametrize("dtype", get_float_dtypes()) + def test_copysign(self, dtype): + array_data = numpy.arange(10) + out = numpy.empty(10, dtype=dtype) + + # DPNP + dp_array = dpnp.array(array_data, dtype=dtype) + dp_out = dpnp.array(out, dtype=dtype) + result = dpnp.copysign(dp_array, -dp_array, out=dp_out) + + # original + np_array = numpy.array(array_data, dtype=dtype) + expected = numpy.copysign(np_array, -np_array, out=out) + + assert_allclose(expected, result) + + @pytest.mark.parametrize( + "dtype", get_all_dtypes(no_bool=True, no_complex=True, no_none=True) + ) + def test_out_dtypes(self, dtype): + if has_support_aspect64() and dtype != numpy.float32: + dtype_out = numpy.float64 + else: + dtype_out = numpy.float32 + size = 2 if dtype == dpnp.bool else 10 + + np_array = numpy.arange(size, dtype=dtype) + np_out = numpy.empty(size, dtype=dtype_out) + expected = numpy.copysign(np_array, -np_array, out=np_out) + + dp_array = dpnp.arange(size, dtype=dtype) + dp_out = dpnp.empty(size, dtype=dtype_out) + result = dpnp.copysign(dp_array, -dp_array, out=dp_out) + + assert_allclose(expected, result) + + @pytest.mark.parametrize("dtype", get_float_dtypes()) + @pytest.mark.parametrize( + "shape", [(0,), (15,), (2, 2)], ids=["(0,)", "(15, )", "(2,2)"] + ) + def test_invalid_shape(self, shape, dtype): + dp_array = dpnp.arange(10, dtype=dtype) + dp_out = dpnp.empty(shape, dtype=dtype) + + with pytest.raises(ValueError): + dpnp.copysign(dp_array, dp_array, out=dp_out) + + class TestSqrt: @pytest.mark.parametrize( "dtype", get_all_dtypes(no_bool=True, no_complex=True) diff --git a/tests/test_usm_type.py b/tests/test_usm_type.py index 7d31cb7f25aa..bc0919643bd3 100644 --- a/tests/test_usm_type.py +++ b/tests/test_usm_type.py @@ -374,6 +374,7 @@ def test_meshgrid(usm_type_x, usm_type_y): pytest.param("arctanh", [-0.5, 0.0, 0.5]), pytest.param("argmax", [1.0, 2.0, 4.0, 7.0]), pytest.param("argmin", [1.0, 2.0, 4.0, 7.0]), + pytest.param("cbrt", [1, 8, 27]), pytest.param("ceil", [-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]), pytest.param("conjugate", [[1.0 + 1.0j, 0.0], [0.0, 1.0 + 1.0j]]), pytest.param( @@ -382,6 +383,7 @@ def test_meshgrid(usm_type_x, usm_type_y): pytest.param("cosh", [-5.0, -3.5, 0.0, 3.5, 5.0]), pytest.param("count_nonzero", [0, 1, 7, 0]), pytest.param("exp", [1.0, 2.0, 4.0, 7.0]), + pytest.param("exp2", [0.0, 1.0, 2.0]), pytest.param("expm1", [1.0e-10, 1.0, 2.0, 4.0, 7.0]), pytest.param("floor", [-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]), pytest.param( @@ -402,6 +404,7 @@ def test_meshgrid(usm_type_x, usm_type_y): pytest.param( "real", [complex(1.0, 2.0), complex(3.0, 4.0), complex(5.0, 6.0)] ), + pytest.param("rsqrt", [1, 8, 27]), pytest.param("sign", [-5.0, 0.0, 4.5]), pytest.param("signbit", [-5.0, 0.0, 4.5]), pytest.param( @@ -432,46 +435,21 @@ def test_1in_1out(func, data, usm_type): [[1.2, -0.0], [-7, 2.34567]], [[1.2, 0.0], [-7, 2.34567]], ), - pytest.param( - "arctan2", - [[-1, +1, +1, -1]], - [[-1, -1, +1, +1]], - ), + pytest.param("arctan2", [[-1, +1, +1, -1]], [[-1, -1, +1, +1]]), + pytest.param("copysign", [0.0, 1.0, 2.0], [-1.0, 0.0, 1.0]), pytest.param( "dot", [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]], [[4.0, 4.0], [4.0, 4.0], [4.0, 4.0]], ), + pytest.param("fmax", [[0.0, 1.0, 2.0]], [[3.0, 4.0, 5.0]]), + pytest.param("fmin", [[0.0, 1.0, 2.0]], [[3.0, 4.0, 5.0]]), pytest.param( - "fmax", - [[0.0, 1.0, 2.0]], - [[3.0, 4.0, 5.0]], - ), - pytest.param( - "fmin", - [[0.0, 1.0, 2.0]], - [[3.0, 4.0, 5.0]], - ), - pytest.param( - "hypot", - [[1.0, 2.0, 3.0, 4.0]], - [[-1.0, -2.0, -4.0, -5.0]], - ), - pytest.param( - "logaddexp", - [[-1, 2, 5, 9]], - [[4, -3, 2, -8]], - ), - pytest.param( - "maximum", - [[0.0, 1.0, 2.0]], - [[3.0, 4.0, 5.0]], - ), - pytest.param( - "minimum", - [[0.0, 1.0, 2.0]], - [[3.0, 4.0, 5.0]], + "hypot", [[1.0, 2.0, 3.0, 4.0]], [[-1.0, -2.0, -4.0, -5.0]] ), + pytest.param("logaddexp", [[-1, 2, 5, 9]], [[4, -3, 2, -8]]), + pytest.param("maximum", [[0.0, 1.0, 2.0]], [[3.0, 4.0, 5.0]]), + pytest.param("minimum", [[0.0, 1.0, 2.0]], [[3.0, 4.0, 5.0]]), ], ) @pytest.mark.parametrize("usm_type_x", list_of_usm_types, ids=list_of_usm_types) diff --git a/tests/third_party/cupy/math_tests/test_misc.py b/tests/third_party/cupy/math_tests/test_misc.py index f8e0a32f6426..c05432e36427 100644 --- a/tests/third_party/cupy/math_tests/test_misc.py +++ b/tests/third_party/cupy/math_tests/test_misc.py @@ -2,12 +2,13 @@ import pytest import dpnp as cupy +from tests.helper import has_support_aspect64 from tests.third_party.cupy import testing class TestMisc: @testing.for_all_dtypes() - @testing.numpy_cupy_allclose(atol=1e-5, type_check=False) + @testing.numpy_cupy_allclose(atol=1e-5, type_check=has_support_aspect64()) def check_unary(self, name, xp, dtype, no_bool=False): if no_bool and numpy.dtype(dtype).char == "?": return numpy.int_(0) @@ -164,7 +165,7 @@ def test_sqrt(self): self.check_unary("sqrt") @testing.for_all_dtypes(no_complex=True) - @testing.numpy_cupy_allclose(atol=1e-5) + @testing.numpy_cupy_allclose(atol=1e-5, type_check=has_support_aspect64()) def test_cbrt(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) return xp.cbrt(a) From 912bb776e776bd0e5b7fa6f80af349037ddd63d6 Mon Sep 17 00:00:00 2001 From: Anton <100830759+antonwolfy@users.noreply.github.com> Date: Tue, 28 Nov 2023 15:57:15 +0100 Subject: [PATCH 23/38] Update deploy step of a job with building docs (#1631) * Update deploy step of a job with building docs * Added global env variables for GitHub events --- .github/workflows/build-sphinx.yml | 40 +++++++++++++++++++----------- 1 file changed, 26 insertions(+), 14 deletions(-) diff --git a/.github/workflows/build-sphinx.yml b/.github/workflows/build-sphinx.yml index 90efdc044c9e..702d4e325cd5 100644 --- a/.github/workflows/build-sphinx.yml +++ b/.github/workflows/build-sphinx.yml @@ -7,6 +7,12 @@ on: types: [opened, synchronize, reopened, closed] env: + GH_BOT_NAME: 'github-actions[bot]' + GH_BOT_EMAIL: 'github-actions[bot]@users.noreply.github.com' + GH_EVENT_OPEN_PR_UPSTREAM: ${{ github.event_name == 'pull_request' && github.event.action != 'closed' && + github.event.pull_request && !github.event.pull_request.head.repo.fork }} + GH_EVENT_PUSH_UPSTREAM: ${{ github.ref == 'refs/heads/master' && github.event_name == 'push' && + github.event.ref == 'refs/heads/master' && github.event.repository && !github.event.repository.fork }} PUBLISH_DIR: doc/_build/html/ defaults: @@ -125,21 +131,25 @@ jobs: working-directory: 'dpnp/backend/doc' - name: Copy backend docs - run: cp -r dpnp/backend/doc/html doc/_build/html/backend_doc + run: cp -r dpnp/backend/doc/html ${{ env.PUBLISH_DIR }}/backend_doc # https://github.com/marketplace/actions/github-pages-action + # The step is only used to build docs while pushing a PR to "master" - name: Deploy docs - if: | - github.event.pull_request && !github.event.pull_request.head.repo.fork && - (github.ref == 'refs/heads/master' || (startsWith(github.ref, 'refs/heads/release') == true) || github.event_name == 'push' && contains(github.ref, 'refs/tags/')) + if: env.GH_EVENT_PUSH_UPSTREAM uses: peaceiris/actions-gh-pages@v3.9.3 with: github_token: ${{ secrets.GITHUB_TOKEN }} publish_dir: ${{ env.PUBLISH_DIR }} + keep_files: true + commit_message: ${{ github.event.head_commit.message }} + publish_branch: gh-pages + user_name: ${{ env.GH_BOT_NAME }} + user_email: ${{ env.GH_BOT_EMAIL }} + # The step is only used to build docs while pushing to PR branch - name: Publish pull-request docs - if: | - github.event.pull_request && !github.event.pull_request.head.repo.fork && github.event.action != 'closed' + if: env.GH_EVENT_OPEN_PR_UPSTREAM uses: peaceiris/actions-gh-pages@v3.9.3 with: github_token: ${{ secrets.GITHUB_TOKEN }} @@ -147,14 +157,14 @@ jobs: destination_dir: ./pull/${{ github.event.number }} allow_empty_commit : true keep_files: true - commit_message: ${{ github.event.head_commit.message }} + commit_message: ${{ github.event.pull_request.title }} publish_branch: gh-pages - user_name: 'github-actions[bot]' - user_email: 'github-actions[bot]@users.noreply.github.com' + user_name: ${{ env.GH_BOT_NAME }} + user_email: ${{ env.GH_BOT_EMAIL }} + # The step is only used to build docs while pushing to PR branch - name: Comment with URL to published pull-request docs - if: | - github.event.pull_request && !github.event.pull_request.head.repo.fork && github.event.action != 'closed' + if: env.GH_EVENT_OPEN_PR_UPSTREAM env: PR_NUM: ${{ github.event.number }} uses: mshick/add-pr-comment@v2.8.1 @@ -163,9 +173,11 @@ jobs: View rendered docs @ https://intelpython.github.io/dpnp/pull/${{ env.PR_NUM }}/index.html allow-repeats: false + # The job is only used to build docs when PR is closed (action from PR branch) clean: if: | - github.event.pull_request && !github.event.pull_request.head.repo.fork && github.event.action == 'closed' + github.event_name == 'pull_request' && github.event.action == 'closed' && + github.event.pull_request && !github.event.pull_request.head.repo.fork needs: build-and-deploy @@ -185,8 +197,8 @@ jobs: git checkout --track tokened_docs/gh-pages echo `pwd` [ -d pull/${PR_NUM} ] && git rm -rf pull/${PR_NUM} - git config --global user.name 'github-actions[bot]' - git config --global user.email 'github-actions[bot]@users.noreply.github.com' + git config --global user.name ${{ env.GH_BOT_NAME }} + git config --global user.email ${{ env.GH_BOT_EMAIL }} git commit -m "Removing docs for closed pull request ${PR_NUM}" git push tokened_docs gh-pages From ad90f66b35e56113061ee0134ab7f02bf1fa5120 Mon Sep 17 00:00:00 2001 From: vtavana <120411540+vtavana@users.noreply.github.com> Date: Wed, 29 Nov 2023 14:40:36 -0600 Subject: [PATCH 24/38] implement dpnp.mean (#1632) * implement dpnp.mean * address comments --- .github/workflows/conda-package.yml | 2 + dpnp/dpnp_array.py | 12 +++-- dpnp/dpnp_iface_statistics.py | 73 +++++++---------------------- tests/test_mathematical.py | 39 --------------- tests/test_statistics.py | 69 ++++++++++++++++++++++++++- tests/test_sycl_queue.py | 1 + tests/test_usm_type.py | 1 + 7 files changed, 98 insertions(+), 99 deletions(-) diff --git a/.github/workflows/conda-package.yml b/.github/workflows/conda-package.yml index 853776867d8d..8c1c6bfe4352 100644 --- a/.github/workflows/conda-package.yml +++ b/.github/workflows/conda-package.yml @@ -25,6 +25,7 @@ env: test_random_state.py test_sort.py test_special.py + test_statistics.py test_sycl_queue.py test_umath.py test_usm_type.py @@ -47,6 +48,7 @@ env: third_party/cupy/math_tests/test_trigonometric.py third_party/cupy/sorting_tests/test_sort.py third_party/cupy/sorting_tests/test_count.py + third_party/cupy/statistics_tests/test_meanvar.py VER_JSON_NAME: 'version.json' VER_SCRIPT1: "import json; f = open('version.json', 'r'); j = json.load(f); f.close(); " VER_SCRIPT2: "d = j['dpnp'][0]; print('='.join((d[s] for s in ('version', 'build'))))" diff --git a/dpnp/dpnp_array.py b/dpnp/dpnp_array.py index 88db5d695f9d..c1fbbc1d124d 100644 --- a/dpnp/dpnp_array.py +++ b/dpnp/dpnp_array.py @@ -915,10 +915,16 @@ def max( return dpnp.max(self, axis, out, keepdims, initial, where) - def mean(self, axis=None, **kwargs): - """Returns the average of the array elements.""" + def mean( + self, axis=None, dtype=None, out=None, keepdims=False, *, where=True + ): + """ + Returns the average of the array elements. + + Refer to :obj:`dpnp.mean` for full documentation. + """ - return dpnp.mean(self, axis=axis, **kwargs) + return dpnp.mean(self, axis, dtype, out, keepdims, where=where) def min( self, diff --git a/dpnp/dpnp_iface_statistics.py b/dpnp/dpnp_iface_statistics.py index cc11aeede1a5..38b2d88eef07 100644 --- a/dpnp/dpnp_iface_statistics.py +++ b/dpnp/dpnp_iface_statistics.py @@ -40,7 +40,6 @@ import dpctl.tensor as dpt import numpy -from numpy.core.numeric import normalize_axis_tuple import dpnp from dpnp.dpnp_algo import * @@ -417,7 +416,7 @@ def max(a, axis=None, out=None, keepdims=False, initial=None, where=True): return dpnp.get_result_array(result, out) -def mean(x, /, *, axis=None, dtype=None, keepdims=False, out=None, where=True): +def mean(a, /, axis=None, dtype=None, out=None, keepdims=False, *, where=True): """ Compute the arithmetic mean along the specified axis. @@ -425,16 +424,16 @@ def mean(x, /, *, axis=None, dtype=None, keepdims=False, out=None, where=True): Returns ------- - y : dpnp.ndarray + out : dpnp.ndarray an array containing the mean values of the elements along the specified axis(axes). - If the input array is empty, an array containing a single NaN value is returned. + If the input is a zero-size array, an array containing NaN values is returned. Limitations ----------- - Parameters `x` is supported as either :class:`dpnp.ndarray` + Parameters `a` is supported as either :class:`dpnp.ndarray` or :class:`dpctl.tensor.usm_ndarray`. - Parameters `keepdims`, `out` and `where` are supported with their default values. - Otherwise the function will be executed sequentially on CPU. + Parameter `where` is supported only with their default values. + Otherwise ``NotImplementedError`` exception will be raised. Input array data types are limited by supported DPNP :ref:`Data types`. See Also @@ -459,59 +458,21 @@ def mean(x, /, *, axis=None, dtype=None, keepdims=False, out=None, where=True): array([2., 3.]) >>> np.mean(a, axis=1) array([1.5, 3.5]) + """ - if keepdims is not False: - pass - elif out is not None: - pass - elif where is not True: - pass + if where is not True: + raise NotImplementedError( + "where keyword argument is only supported by its default value." + ) else: - if dtype is None and dpnp.issubdtype(x.dtype, dpnp.inexact): - dtype = x.dtype - - if axis is None: - if x.size == 0: - return dpnp.array(dpnp.nan, dtype=dtype) - else: - result = dpnp.sum(x, dtype=dtype) / x.size - return result.astype(dtype) if result.dtype != dtype else result - - if not isinstance(axis, (tuple, list)): - axis = (axis,) - - axis = normalize_axis_tuple(axis, x.ndim, "axis") - res_sum = dpnp.sum(x, axis=axis, dtype=dtype) - - del_ = 1.0 - for axis_value in axis: - del_ *= x.shape[axis_value] - - # performing an inplace operation on arrays of bool or integer types - # is not possible due to incompatible data types because - # it returns a floating value - if dpnp.issubdtype(res_sum.dtype, dpnp.inexact): - res_sum /= del_ - else: - new_res_sum = res_sum / del_ - return ( - new_res_sum.astype(dtype) - if new_res_sum.dtype != dtype - else new_res_sum - ) - - return res_sum.astype(dtype) if res_sum.dtype != dtype else res_sum + dpt_array = dpnp.get_usm_ndarray(a) + result = dpnp_array._create_from_usm_ndarray( + dpt.mean(dpt_array, axis=axis, keepdims=keepdims) + ) + result = result.astype(dtype) if dtype is not None else result - return call_origin( - numpy.mean, - x, - axis=axis, - dtype=dtype, - out=out, - keepdims=keepdims, - where=where, - ) + return dpnp.get_result_array(result, out) def median(x1, axis=None, out=None, overwrite_input=False, keepdims=False): diff --git a/tests/test_mathematical.py b/tests/test_mathematical.py index b0c8caff3ad9..9c8850aad188 100644 --- a/tests/test_mathematical.py +++ b/tests/test_mathematical.py @@ -1993,45 +1993,6 @@ def test_sum(shape, dtype_in, dtype_out, transpose, keepdims, order): assert_array_equal(numpy_res, dpnp_res.asnumpy()) -class TestMean: - @pytest.mark.parametrize("dtype", get_all_dtypes()) - def test_mean_axis_tuple(self, dtype): - dp_array = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dtype) - np_array = dpnp.asnumpy(dp_array) - - result = dpnp.mean(dp_array, axis=(0, 1)) - expected = numpy.mean(np_array, axis=(0, 1)) - assert_allclose(expected, result) - - def test_mean_axis_zero_size(self): - dp_array = dpnp.array([], dtype="int64") - np_array = dpnp.asnumpy(dp_array) - - result = dpnp.mean(dp_array) - expected = numpy.mean(np_array) - assert_allclose(expected, result) - - def test_mean_strided(self): - dp_array = dpnp.array([-2, -1, 0, 1, 0, 2], dtype="f4") - np_array = dpnp.asnumpy(dp_array) - - result = dpnp.mean(dp_array[::-1]) - expected = numpy.mean(np_array[::-1]) - assert_allclose(expected, result) - - result = dpnp.mean(dp_array[::2]) - expected = numpy.mean(np_array[::2]) - assert_allclose(expected, result) - - def test_mean_scalar(self): - dp_array = dpnp.array(5) - np_array = dpnp.asnumpy(dp_array) - - result = dp_array.mean() - expected = np_array.mean() - assert_allclose(expected, result) - - @pytest.mark.parametrize( "dtype", get_all_dtypes(no_bool=True, no_none=True, no_complex=True) ) diff --git a/tests/test_statistics.py b/tests/test_statistics.py index fdfea361e6f5..50a9ae5aa36d 100644 --- a/tests/test_statistics.py +++ b/tests/test_statistics.py @@ -8,7 +8,7 @@ import dpnp -from .helper import get_all_dtypes +from .helper import assert_dtype_allclose, get_all_dtypes @pytest.mark.parametrize( @@ -88,6 +88,73 @@ def test_max_min_NotImplemented(func): getattr(dpnp, func)(ia, initial=6) +class TestMean: + @pytest.mark.parametrize("dtype", get_all_dtypes()) + def test_mean_axis_tuple(self, dtype): + dp_array = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dtype) + np_array = dpnp.asnumpy(dp_array) + + result = dpnp.mean(dp_array, axis=(0, 1)) + expected = numpy.mean(np_array, axis=(0, 1)) + assert_allclose(expected, result) + + @pytest.mark.parametrize("dtype", get_all_dtypes()) + @pytest.mark.parametrize("axis", [0, 1, (0, 1)]) + def test_mean_out(self, dtype, axis): + dp_array = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dtype) + np_array = dpnp.asnumpy(dp_array) + + expected = numpy.mean(np_array, axis=axis) + result = dpnp.empty_like(dpnp.asarray(expected)) + dpnp.mean(dp_array, axis=axis, out=result) + assert_dtype_allclose(result, expected) + + @pytest.mark.parametrize("dtype", get_all_dtypes()) + def test_mean_dtype(self, dtype): + dp_array = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype="i4") + np_array = dpnp.asnumpy(dp_array) + + expected = numpy.mean(np_array, dtype=dtype) + result = dpnp.mean(dp_array, dtype=dtype) + assert_allclose(expected, result) + + @pytest.mark.usefixtures("suppress_invalid_numpy_warnings") + @pytest.mark.parametrize("axis", [0, 1, (0, 1)]) + @pytest.mark.parametrize("shape", [(2, 3), (2, 0), (0, 3)]) + def test_mean_empty(self, axis, shape): + dp_array = dpnp.empty(shape, dtype=dpnp.int64) + np_array = dpnp.asnumpy(dp_array) + + result = dpnp.mean(dp_array, axis=axis) + expected = numpy.mean(np_array, axis=axis) + assert_allclose(expected, result) + + def test_mean_strided(self): + dp_array = dpnp.array([-2, -1, 0, 1, 0, 2], dtype="f4") + np_array = dpnp.asnumpy(dp_array) + + result = dpnp.mean(dp_array[::-1]) + expected = numpy.mean(np_array[::-1]) + assert_allclose(expected, result) + + result = dpnp.mean(dp_array[::2]) + expected = numpy.mean(np_array[::2]) + assert_allclose(expected, result) + + def test_mean_scalar(self): + dp_array = dpnp.array(5) + np_array = dpnp.asnumpy(dp_array) + + result = dp_array.mean() + expected = np_array.mean() + assert_allclose(expected, result) + + def test_mean_NotImplemented(func): + ia = dpnp.arange(5) + with pytest.raises(NotImplementedError): + dpnp.mean(ia, where=False) + + @pytest.mark.usefixtures("allow_fall_back_on_numpy") @pytest.mark.parametrize( "array", diff --git a/tests/test_sycl_queue.py b/tests/test_sycl_queue.py index 6de86cfd915f..265d412b2ac1 100644 --- a/tests/test_sycl_queue.py +++ b/tests/test_sycl_queue.py @@ -367,6 +367,7 @@ def test_meshgrid(device_x, device_y): pytest.param("log1p", [1.0e-10, 1.0, 2.0, 4.0, 7.0]), pytest.param("log2", [1.0, 2.0, 4.0, 7.0]), pytest.param("max", [1.0, 2.0, 4.0, 7.0]), + pytest.param("mean", [1.0, 2.0, 4.0, 7.0]), pytest.param("min", [1.0, 2.0, 4.0, 7.0]), pytest.param("nancumprod", [1.0, dpnp.nan]), pytest.param("nancumsum", [1.0, dpnp.nan]), diff --git a/tests/test_usm_type.py b/tests/test_usm_type.py index bc0919643bd3..18af427e423f 100644 --- a/tests/test_usm_type.py +++ b/tests/test_usm_type.py @@ -395,6 +395,7 @@ def test_meshgrid(usm_type_x, usm_type_y): pytest.param("log2", [1.0, 2.0, 4.0, 7.0]), pytest.param("nanprod", [1.0, 2.0, dp.nan]), pytest.param("max", [1.0, 2.0, 4.0, 7.0]), + pytest.param("mean", [1.0, 2.0, 4.0, 7.0]), pytest.param("min", [1.0, 2.0, 4.0, 7.0]), pytest.param("negative", [1.0, 0.0, -1.0]), pytest.param("positive", [1.0, 0.0, -1.0]), From 2c8cbb56bf5ee357049cae68a7d5c52916f4d91d Mon Sep 17 00:00:00 2001 From: vlad-perevezentsev Date: Tue, 5 Dec 2023 21:07:50 +0100 Subject: [PATCH 25/38] Add type checking in `assert_dtype_allclose` for inexact dtypes (#1634) * Add dtype checking for inexact dtype in assert_dtype_allclose * Update test_out_dtypes in TestDivide * Add a check for support of 16 bit types * Add an empty line after the description * fix condition when numpy`s array is not float16 * Address the remarks * Update test_sum_float in test_sum.py * Add a new check_only_type_kind param to assert_dtype_allclose * Update test_sum and test_fft * Use check_only_type_kind in test_fft_rfft --- tests/helper.py | 50 ++++++++++++++++++++++++++++++++++---- tests/test_fft.py | 8 +++--- tests/test_mathematical.py | 5 +++- tests/test_sum.py | 8 +++++- tests/test_sycl_queue.py | 2 +- 5 files changed, 61 insertions(+), 12 deletions(-) diff --git a/tests/helper.py b/tests/helper.py index b3d816e769ac..243c61504a50 100644 --- a/tests/helper.py +++ b/tests/helper.py @@ -7,14 +7,31 @@ import dpnp -def assert_dtype_allclose(dpnp_arr, numpy_arr, check_type=True): +def assert_dtype_allclose( + dpnp_arr, numpy_arr, check_type=True, check_only_type_kind=False +): """ Assert DPNP and NumPy array based on maximum dtype resolution of input arrays for floating and complex types. For other dtypes the assertion is based on exact matching of the arrays. - - """ - + When 'check_type' is True (default), the function asserts: + - Equal dtypes for exact types. + For inexact types: + - If the numpy array's dtype is `numpy.float16`, checks if the device + of the `dpnp_arr` supports 64-bit precision floating point operations. + If supported, asserts equal dtypes. + Otherwise, asserts equal type kinds. + - For other inexact types, asserts equal dtypes if the device of the `dpnp_arr` + supports 64-bit precision floating point operations or if the numpy array's inexact + dtype is not a double precision type. + Otherwise, asserts equal type kinds. + The 'check_only_type_kind' parameter (False by default) asserts only equal type kinds + for all data types supported by DPNP when set to True. + It is effective only when 'check_type' is also set to True. + + """ + + list_64bit_types = [numpy.float64, numpy.complex128] is_inexact = lambda x: dpnp.issubdtype(x.dtype, dpnp.inexact) if is_inexact(dpnp_arr) or is_inexact(numpy_arr): tol = 8 * max( @@ -22,10 +39,33 @@ def assert_dtype_allclose(dpnp_arr, numpy_arr, check_type=True): numpy.finfo(numpy_arr.dtype).resolution, ) assert_allclose(dpnp_arr.asnumpy(), numpy_arr, atol=tol, rtol=tol) + if check_type: + numpy_arr_dtype = numpy_arr.dtype + dpnp_arr_dtype = dpnp_arr.dtype + dpnp_arr_dev = dpnp_arr.sycl_device + + if check_only_type_kind: + assert dpnp_arr_dtype.kind == numpy_arr_dtype.kind + else: + is_np_arr_f2 = numpy_arr_dtype == numpy.float16 + + if is_np_arr_f2: + if has_support_aspect16(dpnp_arr_dev): + assert dpnp_arr_dtype == numpy_arr_dtype + elif ( + numpy_arr_dtype not in list_64bit_types + or has_support_aspect64(dpnp_arr_dev) + ): + assert dpnp_arr_dtype == numpy_arr_dtype + else: + assert dpnp_arr_dtype.kind == numpy_arr_dtype.kind else: assert_array_equal(dpnp_arr.asnumpy(), numpy_arr) if check_type: - assert dpnp_arr.dtype == numpy_arr.dtype + if check_only_type_kind: + assert dpnp_arr.dtype.kind == numpy_arr.dtype.kind + else: + assert dpnp_arr.dtype == numpy_arr.dtype def get_complex_dtypes(device=None): diff --git a/tests/test_fft.py b/tests/test_fft.py index 0d2ea664b58d..b439ef38cce6 100644 --- a/tests/test_fft.py +++ b/tests/test_fft.py @@ -16,7 +16,7 @@ def test_fft(dtype, norm): np_res = numpy.fft.fft(data, norm=norm) dpnp_res = dpnp.fft.fft(dpnp_data, norm=norm) - assert_dtype_allclose(dpnp_res, np_res) + assert_dtype_allclose(dpnp_res, np_res, check_only_type_kind=True) @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) @@ -29,7 +29,7 @@ def test_fft_ndim(dtype, shape, norm): np_res = numpy.fft.fft(np_data, norm=norm) dpnp_res = dpnp.fft.fft(dpnp_data, norm=norm) - assert_dtype_allclose(dpnp_res, np_res) + assert_dtype_allclose(dpnp_res, np_res, check_only_type_kind=True) @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) @@ -44,7 +44,7 @@ def test_fft_ifft(dtype, shape, norm): np_res = numpy.fft.ifft(np_data, norm=norm) dpnp_res = dpnp.fft.ifft(dpnp_data, norm=norm) - assert_dtype_allclose(dpnp_res, np_res) + assert_dtype_allclose(dpnp_res, np_res, check_only_type_kind=True) @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True, no_complex=True)) @@ -58,7 +58,7 @@ def test_fft_rfft(dtype, shape): np_res = numpy.fft.rfft(np_data) dpnp_res = dpnp.fft.rfft(dpnp_data) - assert_dtype_allclose(dpnp_res, np_res) + assert_dtype_allclose(dpnp_res, np_res, check_only_type_kind=True) @pytest.mark.parametrize( diff --git a/tests/test_mathematical.py b/tests/test_mathematical.py index 9c8850aad188..4f751b697fef 100644 --- a/tests/test_mathematical.py +++ b/tests/test_mathematical.py @@ -1105,6 +1105,7 @@ def test_out_dtypes(self, dtype): dp_array2 = dpnp.arange(size, dtype=dtype) dp_out = dpnp.empty(size, dtype=dpnp.complex64) + check_dtype = True if dtype != dpnp.complex64: # dtype of out mismatches types of input arrays with pytest.raises(TypeError): @@ -1112,9 +1113,11 @@ def test_out_dtypes(self, dtype): # allocate new out with expected type dp_out = dpnp.empty(size, dtype=dtype) + # Set check_dtype to False as dtype does not match + check_dtype = False result = dpnp.divide(dp_array1, dp_array2, out=dp_out) - assert_dtype_allclose(result, expected) + assert_dtype_allclose(result, expected, check_type=check_dtype) @pytest.mark.usefixtures("suppress_divide_invalid_numpy_warnings") @pytest.mark.parametrize("dtype", get_float_complex_dtypes()) diff --git a/tests/test_sum.py b/tests/test_sum.py index 16b17847f270..4104b33a6248 100644 --- a/tests/test_sum.py +++ b/tests/test_sum.py @@ -27,10 +27,16 @@ def test_sum_float(dtype): ) ia = dpnp.array(a) + # Flag for type check in special cases + # Use only type kinds checks when dpnp handles float32 arrays + # as `dpnp.sum()` and `numpy.sum()` return different dtypes + check_type_kind = dtype == dpnp.float32 for axis in range(len(a)): result = dpnp.sum(ia, axis=axis) expected = numpy.sum(a, axis=axis) - assert_dtype_allclose(result, expected) + assert_dtype_allclose( + result, expected, check_only_type_kind=check_type_kind + ) def test_sum_int(): diff --git a/tests/test_sycl_queue.py b/tests/test_sycl_queue.py index 265d412b2ac1..3618a9bb4c54 100644 --- a/tests/test_sycl_queue.py +++ b/tests/test_sycl_queue.py @@ -875,7 +875,7 @@ def test_fft_rfft(type, shape, device): np_res = numpy.fft.rfft(np_data) dpnp_res = dpnp.fft.rfft(dpnp_data) - assert_dtype_allclose(dpnp_res, np_res) + assert_dtype_allclose(dpnp_res, np_res, check_only_type_kind=True) expected_queue = dpnp_data.get_array().sycl_queue result_queue = dpnp_res.get_array().sycl_queue From a7add8e5d57d62a9c73c34a305d8d4a626c9357c Mon Sep 17 00:00:00 2001 From: Anton <100830759+antonwolfy@users.noreply.github.com> Date: Wed, 6 Dec 2023 18:47:22 +0100 Subject: [PATCH 26/38] Redesign `dpnp.put_along_axis` and `dpnp.take_along_axis` thorough existing calls (#1636) * Redesigned `put_along_axis` and `take_along_axis` thorugh existing calls * Simplified check for * Move check of array type in dpnp.prod after the TODO comment --- dpnp/backend/include/dpnp_iface_fptr.hpp | 28 +- dpnp/backend/kernels/dpnp_krnl_indexing.cpp | 22 -- dpnp/dpnp_algo/dpnp_algo.pxd | 2 - dpnp/dpnp_algo/dpnp_algo_indexing.pxi | 129 --------- dpnp/dpnp_iface.py | 37 +++ dpnp/dpnp_iface_indexing.py | 215 +++++++++++---- dpnp/dpnp_iface_manipulation.py | 10 +- dpnp/dpnp_iface_mathematical.py | 20 +- dpnp/linalg/dpnp_iface_linalg.py | 7 +- tests/helper.py | 10 +- tests/test_indexing.py | 246 +++++++++++++----- tests/test_sycl_queue.py | 11 +- tests/test_usm_type.py | 5 +- .../cupy/indexing_tests/test_indexing.py | 3 - 14 files changed, 424 insertions(+), 321 deletions(-) diff --git a/dpnp/backend/include/dpnp_iface_fptr.hpp b/dpnp/backend/include/dpnp_iface_fptr.hpp index c56f38ffcb5e..6a174b3b647e 100644 --- a/dpnp/backend/include/dpnp_iface_fptr.hpp +++ b/dpnp/backend/include/dpnp_iface_fptr.hpp @@ -231,21 +231,19 @@ enum class DPNPFuncName : size_t DPNP_FN_PTP, /**< Used in numpy.ptp() impl */ DPNP_FN_PUT, /**< Used in numpy.put() impl */ DPNP_FN_PUT_ALONG_AXIS, /**< Used in numpy.put_along_axis() impl */ - DPNP_FN_PUT_ALONG_AXIS_EXT, /**< Used in numpy.put_along_axis() impl, - requires extra parameters */ - DPNP_FN_QR, /**< Used in numpy.linalg.qr() impl */ - DPNP_FN_QR_EXT, /**< Used in numpy.linalg.qr() impl, requires extra - parameters */ - DPNP_FN_RADIANS, /**< Used in numpy.radians() impl */ - DPNP_FN_RADIANS_EXT, /**< Used in numpy.radians() impl, requires extra - parameters */ - DPNP_FN_REMAINDER, /**< Used in numpy.remainder() impl */ - DPNP_FN_RECIP, /**< Used in numpy.recip() impl */ - DPNP_FN_RECIP_EXT, /**< Used in numpy.recip() impl, requires extra - parameters */ - DPNP_FN_REPEAT, /**< Used in numpy.repeat() impl */ - DPNP_FN_RIGHT_SHIFT, /**< Used in numpy.right_shift() impl */ - DPNP_FN_RNG_BETA, /**< Used in numpy.random.beta() impl */ + DPNP_FN_QR, /**< Used in numpy.linalg.qr() impl */ + DPNP_FN_QR_EXT, /**< Used in numpy.linalg.qr() impl, requires extra + parameters */ + DPNP_FN_RADIANS, /**< Used in numpy.radians() impl */ + DPNP_FN_RADIANS_EXT, /**< Used in numpy.radians() impl, requires extra + parameters */ + DPNP_FN_REMAINDER, /**< Used in numpy.remainder() impl */ + DPNP_FN_RECIP, /**< Used in numpy.recip() impl */ + DPNP_FN_RECIP_EXT, /**< Used in numpy.recip() impl, requires extra + parameters */ + DPNP_FN_REPEAT, /**< Used in numpy.repeat() impl */ + DPNP_FN_RIGHT_SHIFT, /**< Used in numpy.right_shift() impl */ + DPNP_FN_RNG_BETA, /**< Used in numpy.random.beta() impl */ DPNP_FN_RNG_BETA_EXT, /**< Used in numpy.random.beta() impl, requires extra parameters */ DPNP_FN_RNG_BINOMIAL, /**< Used in numpy.random.binomial() impl */ diff --git a/dpnp/backend/kernels/dpnp_krnl_indexing.cpp b/dpnp/backend/kernels/dpnp_krnl_indexing.cpp index 2cca84f9e61f..e9addf36b707 100644 --- a/dpnp/backend/kernels/dpnp_krnl_indexing.cpp +++ b/dpnp/backend/kernels/dpnp_krnl_indexing.cpp @@ -796,19 +796,6 @@ void (*dpnp_put_along_axis_default_c)(void *, size_t) = dpnp_put_along_axis_c<_DataType>; -template -DPCTLSyclEventRef (*dpnp_put_along_axis_ext_c)(DPCTLSyclQueueRef, - void *, - long *, - void *, - size_t, - const shape_elem_type *, - size_t, - size_t, - size_t, - const DPCTLEventVectorRef) = - dpnp_put_along_axis_c<_DataType>; - template class dpnp_take_c_kernel; @@ -1005,15 +992,6 @@ void func_map_init_indexing_func(func_map_t &fmap) fmap[DPNPFuncName::DPNP_FN_PUT_ALONG_AXIS][eft_DBL][eft_DBL] = { eft_DBL, (void *)dpnp_put_along_axis_default_c}; - fmap[DPNPFuncName::DPNP_FN_PUT_ALONG_AXIS_EXT][eft_INT][eft_INT] = { - eft_INT, (void *)dpnp_put_along_axis_ext_c}; - fmap[DPNPFuncName::DPNP_FN_PUT_ALONG_AXIS_EXT][eft_LNG][eft_LNG] = { - eft_LNG, (void *)dpnp_put_along_axis_ext_c}; - fmap[DPNPFuncName::DPNP_FN_PUT_ALONG_AXIS_EXT][eft_FLT][eft_FLT] = { - eft_FLT, (void *)dpnp_put_along_axis_ext_c}; - fmap[DPNPFuncName::DPNP_FN_PUT_ALONG_AXIS_EXT][eft_DBL][eft_DBL] = { - eft_DBL, (void *)dpnp_put_along_axis_ext_c}; - fmap[DPNPFuncName::DPNP_FN_TAKE][eft_BLN][eft_INT] = { eft_BLN, (void *)dpnp_take_default_c}; fmap[DPNPFuncName::DPNP_FN_TAKE][eft_INT][eft_INT] = { diff --git a/dpnp/dpnp_algo/dpnp_algo.pxd b/dpnp/dpnp_algo/dpnp_algo.pxd index 80c6035d7a9f..d49adcf0b7fc 100644 --- a/dpnp/dpnp_algo/dpnp_algo.pxd +++ b/dpnp/dpnp_algo/dpnp_algo.pxd @@ -156,8 +156,6 @@ cdef extern from "dpnp_iface_fptr.hpp" namespace "DPNPFuncName": # need this na DPNP_FN_RNG_POISSON_EXT DPNP_FN_RNG_POWER DPNP_FN_RNG_POWER_EXT - DPNP_FN_PUT_ALONG_AXIS - DPNP_FN_PUT_ALONG_AXIS_EXT DPNP_FN_RNG_RAYLEIGH DPNP_FN_RNG_RAYLEIGH_EXT DPNP_FN_RNG_SHUFFLE diff --git a/dpnp/dpnp_algo/dpnp_algo_indexing.pxi b/dpnp/dpnp_algo/dpnp_algo_indexing.pxi index 36fc7ff8eb91..25cebe84d18b 100644 --- a/dpnp/dpnp_algo/dpnp_algo_indexing.pxi +++ b/dpnp/dpnp_algo/dpnp_algo_indexing.pxi @@ -41,10 +41,8 @@ __all__ += [ "dpnp_diagonal", "dpnp_fill_diagonal", "dpnp_indices", - "dpnp_put_along_axis", "dpnp_putmask", "dpnp_select", - "dpnp_take_along_axis", "dpnp_tril_indices", "dpnp_tril_indices_from", "dpnp_triu_indices", @@ -69,16 +67,6 @@ ctypedef c_dpctl.DPCTLSyclEventRef(*custom_indexing_2in_1out_func_ptr_t_)(c_dpct ctypedef c_dpctl.DPCTLSyclEventRef(*custom_indexing_2in_func_ptr_t)(c_dpctl.DPCTLSyclQueueRef, void *, void * , shape_elem_type * , const size_t, const c_dpctl.DPCTLEventVectorRef) -ctypedef c_dpctl.DPCTLSyclEventRef(*custom_indexing_3in_with_axis_func_ptr_t)(c_dpctl.DPCTLSyclQueueRef, - void * , - void * , - void * , - const size_t, - shape_elem_type * , - const size_t, - const size_t, - const size_t, - const c_dpctl.DPCTLEventVectorRef) cpdef utils.dpnp_descriptor dpnp_choose(utils.dpnp_descriptor x1, list choices1): @@ -283,35 +271,6 @@ cpdef object dpnp_indices(dimensions): return dpnp_result -cpdef dpnp_put_along_axis(dpnp_descriptor arr, dpnp_descriptor indices, dpnp_descriptor values, int axis): - cdef shape_type_c arr_shape = arr.shape - cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(arr.dtype) - - cdef DPNPFuncData kernel_data = get_dpnp_function_ptr(DPNP_FN_PUT_ALONG_AXIS_EXT, param1_type, param1_type) - - utils.get_common_usm_allocation(arr, indices) # check USM allocation is common - _, _, result_sycl_queue = utils.get_common_usm_allocation(arr, values) - - cdef c_dpctl.SyclQueue q = result_sycl_queue - cdef c_dpctl.DPCTLSyclQueueRef q_ref = q.get_queue_ref() - - cdef custom_indexing_3in_with_axis_func_ptr_t func = kernel_data.ptr - - cdef c_dpctl.DPCTLSyclEventRef event_ref = func(q_ref, - arr.get_data(), - indices.get_data(), - values.get_data(), - axis, - arr_shape.data(), - arr.ndim, - indices.size, - values.size, - NULL) # dep_events_ref - - with nogil: c_dpctl.DPCTLEvent_WaitAndThrow(event_ref) - c_dpctl.DPCTLEvent_Delete(event_ref) - - cpdef dpnp_putmask(utils.dpnp_descriptor arr, utils.dpnp_descriptor mask, utils.dpnp_descriptor values): cdef int values_size = values.size @@ -341,94 +300,6 @@ cpdef utils.dpnp_descriptor dpnp_select(list condlist, list choicelist, default) return res_array -cpdef object dpnp_take_along_axis(object arr, object indices, int axis): - cdef long size_arr = arr.size - cdef shape_type_c shape_arr = arr.shape - cdef shape_type_c output_shape - cdef long size_indices = indices.size - res_type = arr.dtype - - if axis != arr.ndim - 1: - res_shape_list = list(shape_arr) - res_shape_list[axis] = 1 - res_shape = tuple(res_shape_list) - - output_shape = (0,) * (len(shape_arr) - 1) - ind = 0 - for id, shape_axis in enumerate(shape_arr): - if id != axis: - output_shape[ind] = shape_axis - ind += 1 - - prod = 1 - for i in range(len(output_shape)): - if output_shape[i] != 0: - prod *= output_shape[i] - - result_array = dpnp.empty((prod, ), dtype=res_type) - ind_array = [None] * prod - arr_shape_offsets = [None] * len(shape_arr) - acc = 1 - - for i in range(len(shape_arr)): - ind = len(shape_arr) - 1 - i - arr_shape_offsets[ind] = acc - acc *= shape_arr[ind] - - output_shape_offsets = [None] * len(shape_arr) - acc = 1 - - for i in range(len(output_shape)): - ind = len(output_shape) - 1 - i - output_shape_offsets[ind] = acc - acc *= output_shape[ind] - result_offsets = arr_shape_offsets[:] # need copy. not a reference - result_offsets[axis] = 0 - - for source_idx in range(size_arr): - - # reconstruct x,y,z from linear source_idx - xyz = [] - remainder = source_idx - for i in arr_shape_offsets: - quotient, remainder = divmod(remainder, i) - xyz.append(quotient) - - # extract result axis - result_axis = [] - for idx, offset in enumerate(xyz): - if idx != axis: - result_axis.append(offset) - - # Construct result offset - result_offset = 0 - for i, result_axis_val in enumerate(result_axis): - result_offset += (output_shape_offsets[i] * result_axis_val) - - arr_elem = arr.item(source_idx) - if ind_array[result_offset] is None: - ind_array[result_offset] = 0 - else: - ind_array[result_offset] += 1 - - if ind_array[result_offset] % size_indices == indices.item(result_offset % size_indices): - result_array[result_offset] = arr_elem - - dpnp_result_array = dpnp.reshape(result_array, res_shape) - return dpnp_result_array - - else: - result_array = utils_py.create_output_descriptor_py(shape_arr, res_type, None).get_pyobj() - - result_array_flatiter = result_array.flat - - for i in range(size_arr): - ind = size_indices * (i // size_indices) + indices.item(i % size_indices) - result_array_flatiter[i] = arr.item(ind) - - return result_array - - cpdef tuple dpnp_tril_indices(n, k=0, m=None): array1 = [] array2 = [] diff --git a/dpnp/dpnp_iface.py b/dpnp/dpnp_iface.py index e91a9b991f89..247264a79c56 100644 --- a/dpnp/dpnp_iface.py +++ b/dpnp/dpnp_iface.py @@ -58,6 +58,7 @@ "array_equal", "asnumpy", "astype", + "check_supported_arrays_type", "convert_single_elem_array_to_scalar", "default_float_type", "dpnp_queue_initialize", @@ -203,6 +204,42 @@ def astype(x1, dtype, order="K", casting="unsafe", copy=True): return dpnp_array._create_from_usm_ndarray(array_obj) +def check_supported_arrays_type(*arrays, scalar_type=False): + """ + Return ``True`` if each array has either type of scalar, + :class:`dpnp.ndarray` or :class:`dpctl.tensor.usm_ndarray`. + But if any array has unsupported type, ``TypeError`` will be raised. + + Parameters + ---------- + arrays : {dpnp_array, usm_ndarray} + Input arrays to check for supported types. + scalar_type : {bool}, optional + A scalar type is also considered as supported if flag is True. + + Returns + ------- + out : bool + ``True`` if each type of input `arrays` is supported type, + ``False`` otherwise. + + Raises + ------ + TypeError + If any input array from `arrays` is of unsupported array type. + + """ + + for a in arrays: + if scalar_type and dpnp.isscalar(a) or is_supported_array_type(a): + continue + + raise TypeError( + "An array must be any of supported type, but got {}".format(type(a)) + ) + return True + + def convert_single_elem_array_to_scalar(obj, keepdims=False): """Convert array with single element to scalar.""" diff --git a/dpnp/dpnp_iface_indexing.py b/dpnp/dpnp_iface_indexing.py index bc04a47efafd..6a61f728e7d2 100644 --- a/dpnp/dpnp_iface_indexing.py +++ b/dpnp/dpnp_iface_indexing.py @@ -41,6 +41,7 @@ import dpctl.tensor as dpt import numpy +from numpy.core.numeric import normalize_axis_index import dpnp from dpnp.dpnp_algo import * @@ -70,6 +71,52 @@ ] +def _build_along_axis_index(a, indices, axis): + """ + Build a fancy index used by a family of `_along_axis` functions. + + The fancy index consists of orthogonal arranges, with the + requested index inserted at the right location. + + The resulting index is going to be used inside `dpnp.put_along_axis` + and `dpnp.take_along_axis` implementations. + + """ + + if not dpnp.issubdtype(indices.dtype, dpnp.integer): + raise IndexError("`indices` must be an integer array") + + # normalize array shape and input axis + if axis is None: + a_shape = (a.size,) + axis = 0 + else: + a_shape = a.shape + axis = normalize_axis_index(axis, a.ndim) + + if len(a_shape) != indices.ndim: + raise ValueError( + "`indices` and `a` must have the same number of dimensions" + ) + + # compute dimensions to iterate over + dest_dims = list(range(axis)) + [None] + list(range(axis + 1, indices.ndim)) + shape_ones = (1,) * indices.ndim + + # build the index + fancy_index = [] + for dim, n in zip(dest_dims, a_shape): + if dim is None: + fancy_index.append(indices) + else: + ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim + 1 :] + fancy_index.append( + dpnp.arange(n, dtype=indices.dtype).reshape(ind_shape) + ) + + return tuple(fancy_index) + + def choose(x1, choices, out=None, mode="raise"): """ Construct an array from an index array and a set of arrays to choose from. @@ -78,7 +125,8 @@ def choose(x1, choices, out=None, mode="raise"): See also -------- - :obj:`take_along_axis` : Preferable if choices is an array. + :obj:`dpnp.take_along_axis` : Preferable if choices is an array. + """ x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) @@ -247,7 +295,7 @@ def extract(condition, x): Returns ------- - y : dpnp.ndarray + out : dpnp.ndarray Rank 1 array of values from `x` where `condition` is True. Limitations @@ -342,7 +390,7 @@ def nonzero(x, /): Returns ------- - y : tuple[dpnp.ndarray] + out : tuple[dpnp.ndarray] Indices of elements that are non-zero. Limitations @@ -496,39 +544,55 @@ def put(a, indices, vals, /, *, axis=None, mode="wrap"): return call_origin(numpy.put, a, indices, vals, mode, dpnp_inplace=True) -def put_along_axis(x1, indices, values, axis): +def put_along_axis(a, indices, values, axis): """ Put values into the destination array by matching 1d index and data slices. For full documentation refer to :obj:`numpy.put_along_axis`. + Limitations + ----------- + Parameters `a` and `indices` are supported either as :class:`dpnp.ndarray` + or :class:`dpctl.tensor.usm_ndarray`. + Parameter `values` is supported either as scalar, :class:`dpnp.ndarray` + or :class:`dpctl.tensor.usm_ndarray`. + Otherwise ``TypeError`` exception will be raised. + See Also -------- - :obj:`take_along_axis` : Take values from the input array by matching 1d index and data slices. + :obj:`dpnp.put` : Put values along an axis, using the same indices for every 1d slice. + :obj:`dpnp.take_along_axis` : Take values from the input array by matching 1d index and data slices. + + Examples + -------- + For this sample array + + >>> import dpnp as np + >>> a = np.array([[10, 30, 20], [60, 40, 50]]) + + We can replace the maximum values with: + + >>> ai = np.argmax(a, axis=1, keepdims=True) + >>> ai + array([[1], + [0]]) + >>> np.put_along_axis(a, ai, 99, axis=1) + >>> a + array([[10, 99, 20], + [99, 40, 50]]) + """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) - indices_desc = dpnp.get_dpnp_descriptor( - indices, copy_when_nondefault_queue=False - ) - values_desc = dpnp.get_dpnp_descriptor( - values, copy_when_nondefault_queue=False - ) - if x1_desc and indices_desc and values_desc: - if x1_desc.ndim != indices_desc.ndim: - pass - elif not isinstance(axis, int): - pass - elif axis >= x1_desc.ndim: - pass - elif indices_desc.size != values_desc.size: - pass - else: - return dpnp_put_along_axis(x1_desc, indices_desc, values_desc, axis) + dpnp.check_supported_arrays_type(a, indices) - return call_origin( - numpy.put_along_axis, x1, indices, values, axis, dpnp_inplace=True - ) + # TODO: remove when #1382(dpctl) is resolved + if dpnp.is_supported_array_type(values) and a.dtype != values.dtype: + values = values.astype(a.dtype) + + if axis is None: + a = a.ravel() + + a[_build_along_axis_index(a, indices, axis)] = values def putmask(x1, mask, values): @@ -596,7 +660,7 @@ def take(x, indices, /, *, axis=None, out=None, mode="wrap"): Returns ------- - dpnp.ndarray + out : dpnp.ndarray An array with shape x.shape[:axis] + indices.shape + x.shape[axis + 1:] filled with elements from `x`. @@ -613,7 +677,7 @@ def take(x, indices, /, *, axis=None, out=None, mode="wrap"): See Also -------- :obj:`dpnp.compress` : Take elements using a boolean mask. - :obj:`take_along_axis` : Take elements by matching the array and the index arrays. + :obj:`dpnp.take_along_axis` : Take elements by matching the array and the index arrays. Notes ----- @@ -666,44 +730,83 @@ def take(x, indices, /, *, axis=None, out=None, mode="wrap"): return call_origin(numpy.take, x, indices, axis, out, mode) -def take_along_axis(x1, indices, axis): +def take_along_axis(a, indices, axis): """ Take values from the input array by matching 1d index and data slices. For full documentation refer to :obj:`numpy.take_along_axis`. + Returns + ------- + out : dpnp.ndarray + The indexed result. + + Limitations + ----------- + Parameters `a` and `indices` are supported either as :class:`dpnp.ndarray` + or :class:`dpctl.tensor.usm_ndarray`. + Otherwise ``TypeError`` exception will be raised. + See Also -------- :obj:`dpnp.take` : Take along an axis, using the same indices for every 1d slice. - :obj:`put_along_axis` : Put values into the destination array by matching 1d index and data slices. - """ + :obj:`dpnp.put_along_axis` : Put values into the destination array by matching 1d index and data slices. - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) - indices_desc = dpnp.get_dpnp_descriptor( - indices, copy_when_nondefault_queue=False - ) - if x1_desc and indices_desc: - if x1_desc.ndim != indices_desc.ndim: - pass - elif not isinstance(axis, int): - pass - elif axis >= x1_desc.ndim: - pass - elif x1_desc.ndim == indices_desc.ndim: - val_list = [] - for i in list(indices_desc.shape)[:-1]: - if i == 1: - val_list.append(True) - else: - val_list.append(False) - if not all(val_list): - pass - else: - return dpnp_take_along_axis(x1, indices, axis) - else: - return dpnp_take_along_axis(x1, indices, axis) + Examples + -------- + For this sample array - return call_origin(numpy.take_along_axis, x1, indices, axis) + >>> import dpnp as np + >>> a = np.array([[10, 30, 20], [60, 40, 50]]) + + We can sort either by using sort directly, or argsort and this function + + >>> np.sort(a, axis=1) + array([[10, 20, 30], + [40, 50, 60]]) + >>> ai = np.argsort(a, axis=1) + >>> ai + array([[0, 2, 1], + [1, 2, 0]]) + >>> np.take_along_axis(a, ai, axis=1) + array([[10, 20, 30], + [40, 50, 60]]) + + The same works for max and min, if you maintain the trivial dimension + with ``keepdims``: + + >>> np.max(a, axis=1, keepdims=True) + array([[30], + [60]]) + >>> ai = np.argmax(a, axis=1, keepdims=True) + >>> ai + array([[1], + [0]]) + >>> np.take_along_axis(a, ai, axis=1) + array([[30], + [60]]) + + If we want to get the max and min at the same time, we can stack the + indices first + + >>> ai_min = np.argmin(a, axis=1, keepdims=True) + >>> ai_max = np.argmax(a, axis=1, keepdims=True) + >>> ai = np.concatenate([ai_min, ai_max], axis=1) + >>> ai + array([[0, 1], + [1, 0]]) + >>> np.take_along_axis(a, ai, axis=1) + array([[10, 30], + [40, 60]]) + + """ + + dpnp.check_supported_arrays_type(a, indices) + + if axis is None: + a = a.ravel() + + return a[_build_along_axis_index(a, indices, axis)] def tril_indices(n, k=0, m=None): diff --git a/dpnp/dpnp_iface_manipulation.py b/dpnp/dpnp_iface_manipulation.py index b805a3a906e9..7ee53bca3775 100644 --- a/dpnp/dpnp_iface_manipulation.py +++ b/dpnp/dpnp_iface_manipulation.py @@ -799,10 +799,7 @@ def fliplr(m): """ - if not dpnp.is_supported_array_type(m): - raise TypeError( - "An array must be any of supported type, but got {}".format(type(m)) - ) + dpnp.check_supported_arrays_type(m) if m.ndim < 2: raise ValueError(f"Input must be >= 2-d, but got {m.ndim}") @@ -857,10 +854,7 @@ def flipud(m): """ - if not dpnp.is_supported_array_type(m): - raise TypeError( - "An array must be any of supported type, but got {}".format(type(m)) - ) + dpnp.check_supported_arrays_type(m) if m.ndim < 1: raise ValueError(f"Input must be >= 1-d, but got {m.ndim}") diff --git a/dpnp/dpnp_iface_mathematical.py b/dpnp/dpnp_iface_mathematical.py index cacab84510bc..d619b5662b1b 100644 --- a/dpnp/dpnp_iface_mathematical.py +++ b/dpnp/dpnp_iface_mathematical.py @@ -1785,15 +1785,12 @@ def nanprod( """ - if dpnp.is_supported_array_or_scalar(a): - if issubclass(a.dtype.type, dpnp.inexact): - mask = dpnp.isnan(a) - a = dpnp.array(a, copy=True) - dpnp.copyto(a, 1, where=mask) - else: - raise TypeError( - "An array must be any of supported type, but got {}".format(type(a)) - ) + dpnp.check_supported_arrays_type(a) + + if issubclass(a.dtype.type, dpnp.inexact): + mask = dpnp.isnan(a) + a = dpnp.array(a, copy=True) + dpnp.copyto(a, 1, where=mask) return dpnp.prod( a, @@ -2108,10 +2105,7 @@ def prod( # Product reduction for complex output are known to fail for Gen9 with 2024.0 compiler # TODO: get rid of this temporary work around when OneAPI 2024.1 is released - if not isinstance(a, (dpnp_array, dpt.usm_ndarray)): - raise TypeError( - "An array must be any of supported type, but got {}".format(type(a)) - ) + dpnp.check_supported_arrays_type(a) _dtypes = (a.dtype, dtype) _any_complex = any( dpnp.issubdtype(dt, dpnp.complexfloating) for dt in _dtypes diff --git a/dpnp/linalg/dpnp_iface_linalg.py b/dpnp/linalg/dpnp_iface_linalg.py index b41b96c70525..c7437b30da60 100644 --- a/dpnp/linalg/dpnp_iface_linalg.py +++ b/dpnp/linalg/dpnp_iface_linalg.py @@ -233,14 +233,11 @@ def eigh(a, UPLO="L"): """ + dpnp.check_supported_arrays_type(a) + if UPLO not in ("L", "U"): raise ValueError("UPLO argument must be 'L' or 'U'") - if not dpnp.is_supported_array_type(a): - raise TypeError( - "An array must be any of supported type, but got {}".format(type(a)) - ) - if a.ndim < 2: raise ValueError( "%d-dimensional array given. Array must be " diff --git a/tests/helper.py b/tests/helper.py index 243c61504a50..de4db998a7bf 100644 --- a/tests/helper.py +++ b/tests/helper.py @@ -68,6 +68,14 @@ def assert_dtype_allclose( assert dpnp_arr.dtype == numpy_arr.dtype +def get_integer_dtypes(): + """ + Build a list of integer types supported by DPNP. + """ + + return [dpnp.int32, dpnp.int64] + + def get_complex_dtypes(device=None): """ Build a list of complex types supported by DPNP based on device capabilities. @@ -123,7 +131,7 @@ def get_all_dtypes( dtypes = [dpnp.bool] if not no_bool else [] # add integer types - dtypes.extend([dpnp.int32, dpnp.int64]) + dtypes.extend(get_integer_dtypes()) # add floating types dtypes.extend(get_float_dtypes(no_float16=no_float16, device=dev)) diff --git a/tests/test_indexing.py b/tests/test_indexing.py index 03541dc2d55d..4d8229e53ce7 100644 --- a/tests/test_indexing.py +++ b/tests/test_indexing.py @@ -1,10 +1,32 @@ +import functools + import numpy import pytest -from numpy.testing import assert_, assert_array_equal, assert_equal +from numpy.testing import ( + assert_, + assert_array_equal, + assert_equal, + assert_raises, +) import dpnp -from .helper import get_all_dtypes +from .helper import get_all_dtypes, get_integer_dtypes + + +def _add_keepdims(func): + """ + Hack in keepdims behavior into a function taking an axis. + """ + + @functools.wraps(func) + def wrapped(a, axis, **kwargs): + res = func(a, axis=axis, **kwargs) + if axis is None: + axis = 0 # res is now 0d and we can insert this anywhere + return dpnp.expand_dims(res, axis=axis) + + return wrapped class TestIndexing: @@ -63,6 +85,168 @@ def test_indexing_array_negative_strides(self): assert_array_equal(arr, 10.0) +class TestPutAlongAxis: + @pytest.mark.parametrize( + "arr_dt", get_all_dtypes(no_bool=True, no_none=True) + ) + @pytest.mark.parametrize("axis", list(range(2)) + [None]) + def test_replace_max(self, arr_dt, axis): + a = dpnp.array([[10, 30, 20], [60, 40, 50]], dtype=arr_dt) + + # replace the max with a small value + i_max = _add_keepdims(dpnp.argmax)(a, axis=axis) + dpnp.put_along_axis(a, i_max, -99, axis=axis) + + # find the new minimum, which should max + i_min = _add_keepdims(dpnp.argmin)(a, axis=axis) + assert_array_equal(i_min, i_max) + + @pytest.mark.parametrize( + "arr_dt", get_all_dtypes(no_bool=True, no_none=True) + ) + @pytest.mark.parametrize("idx_dt", get_integer_dtypes()) + @pytest.mark.parametrize("ndim", list(range(1, 4))) + @pytest.mark.parametrize( + "values", + [ + 777, + [100, 200, 300, 400], + (42,), + range(4), + numpy.arange(4), + dpnp.ones(4), + ], + ids=[ + "scalar", + "list", + "tuple", + "range", + "numpy.ndarray", + "dpnp.ndarray", + ], + ) + def test_values(self, arr_dt, idx_dt, ndim, values): + np_a = numpy.arange(4**ndim, dtype=arr_dt).reshape((4,) * ndim) + np_ai = numpy.array([3, 0, 2, 1], dtype=idx_dt).reshape( + (1,) * (ndim - 1) + (4,) + ) + + dp_a = dpnp.array(np_a, dtype=arr_dt) + dp_ai = dpnp.array(np_ai, dtype=idx_dt) + + for axis in range(ndim): + numpy.put_along_axis(np_a, np_ai, values, axis) + dpnp.put_along_axis(dp_a, dp_ai, values, axis) + assert_array_equal(np_a, dp_a) + + @pytest.mark.parametrize("arr_dt", get_all_dtypes()) + @pytest.mark.parametrize("idx_dt", get_integer_dtypes()) + def test_broadcast(self, arr_dt, idx_dt): + np_a = numpy.ones((3, 4, 1), dtype=arr_dt) + np_ai = numpy.arange(10, dtype=idx_dt).reshape((1, 2, 5)) % 4 + + dp_a = dpnp.array(np_a, dtype=arr_dt) + dp_ai = dpnp.array(np_ai, dtype=idx_dt) + + numpy.put_along_axis(np_a, np_ai, 20, axis=1) + dpnp.put_along_axis(dp_a, dp_ai, 20, axis=1) + assert_array_equal(np_a, dp_a) + + +class TestTakeAlongAxis: + # TODO: remove fixture once `dpnp.sort` is fully implemented + @pytest.mark.usefixtures("allow_fall_back_on_numpy") + @pytest.mark.parametrize( + "func, argfunc, kwargs", + [ + pytest.param(dpnp.sort, dpnp.argsort, {}), + pytest.param( + _add_keepdims(dpnp.min), _add_keepdims(dpnp.argmin), {} + ), + pytest.param( + _add_keepdims(dpnp.max), _add_keepdims(dpnp.argmax), {} + ), + # TODO: unmute, once `dpnp.argpartition` is implemented + # pytest.param(dpnp.partition, dpnp.argpartition, {"kth": 2}), + ], + ) + def test_argequivalent(self, func, argfunc, kwargs): + a = dpnp.random.random(size=(3, 4, 5)) + + for axis in list(range(a.ndim)) + [None]: + a_func = func(a, axis=axis, **kwargs) + ai_func = argfunc(a, axis=axis, **kwargs) + assert_array_equal( + a_func, dpnp.take_along_axis(a, ai_func, axis=axis) + ) + + @pytest.mark.parametrize( + "arr_dt", get_all_dtypes(no_bool=True, no_none=True) + ) + @pytest.mark.parametrize("idx_dt", get_integer_dtypes()) + @pytest.mark.parametrize("ndim", list(range(1, 4))) + def test_multi_dimensions(self, arr_dt, idx_dt, ndim): + np_a = numpy.arange(4**ndim, dtype=arr_dt).reshape((4,) * ndim) + np_ai = numpy.array([3, 0, 2, 1], dtype=idx_dt).reshape( + (1,) * (ndim - 1) + (4,) + ) + + dp_a = dpnp.array(np_a, dtype=arr_dt) + dp_ai = dpnp.array(np_ai, dtype=idx_dt) + + for axis in range(ndim): + expected = numpy.take_along_axis(np_a, np_ai, axis) + result = dpnp.take_along_axis(dp_a, dp_ai, axis) + assert_array_equal(expected, result) + + @pytest.mark.parametrize("xp", [numpy, dpnp]) + def test_invalid(self, xp): + a = xp.ones((10, 10)) + ai = xp.ones((10, 2), dtype=xp.intp) + + # not enough indices + assert_raises(ValueError, xp.take_along_axis, a, xp.array(1), axis=1) + + # bool arrays not allowed + assert_raises( + IndexError, xp.take_along_axis, a, ai.astype(bool), axis=1 + ) + + # float arrays not allowed + assert_raises( + IndexError, xp.take_along_axis, a, ai.astype(numpy.float32), axis=1 + ) + + # invalid axis + assert_raises(numpy.AxisError, xp.take_along_axis, a, ai, axis=10) + + @pytest.mark.parametrize("arr_dt", get_all_dtypes()) + @pytest.mark.parametrize("idx_dt", get_integer_dtypes()) + def test_empty(self, arr_dt, idx_dt): + np_a = numpy.ones((3, 4, 5), dtype=arr_dt) + np_ai = numpy.ones((3, 0, 5), dtype=idx_dt) + + dp_a = dpnp.array(np_a, dtype=arr_dt) + dp_ai = dpnp.array(np_ai, dtype=idx_dt) + + expected = numpy.take_along_axis(np_a, np_ai, axis=1) + result = dpnp.take_along_axis(dp_a, dp_ai, axis=1) + assert_array_equal(expected, result) + + @pytest.mark.parametrize("arr_dt", get_all_dtypes()) + @pytest.mark.parametrize("idx_dt", get_integer_dtypes()) + def test_broadcast(self, arr_dt, idx_dt): + np_a = numpy.ones((3, 4, 1), dtype=arr_dt) + np_ai = numpy.ones((1, 2, 5), dtype=idx_dt) + + dp_a = dpnp.array(np_a, dtype=arr_dt) + dp_ai = dpnp.array(np_ai, dtype=idx_dt) + + expected = numpy.take_along_axis(np_a, np_ai, axis=1) + result = dpnp.take_along_axis(dp_a, dp_ai, axis=1) + assert_array_equal(expected, result) + + @pytest.mark.usefixtures("allow_fall_back_on_numpy") def test_choose(): a = numpy.r_[:4] @@ -459,42 +643,6 @@ def test_put_invalid_axis(axis): dpnp.put(a, ind, vals, axis=axis) -@pytest.mark.usefixtures("allow_fall_back_on_numpy") -def test_put_along_axis_val_int(): - a = numpy.arange(16).reshape(4, 4) - ai = dpnp.array(a) - ind_r = numpy.array([[3, 0, 2, 1]]) - ind_r_i = dpnp.array(ind_r) - for axis in range(2): - numpy.put_along_axis(a, ind_r, 777, axis) - dpnp.put_along_axis(ai, ind_r_i, 777, axis) - assert_array_equal(a, ai) - - -@pytest.mark.usefixtures("allow_fall_back_on_numpy") -def test_put_along_axis1(): - a = numpy.arange(64).reshape(4, 4, 4) - ai = dpnp.array(a) - ind_r = numpy.array([[[3, 0, 2, 1]]]) - ind_r_i = dpnp.array(ind_r) - for axis in range(3): - numpy.put_along_axis(a, ind_r, 777, axis) - dpnp.put_along_axis(ai, ind_r_i, 777, axis) - assert_array_equal(a, ai) - - -@pytest.mark.usefixtures("allow_fall_back_on_numpy") -def test_put_along_axis2(): - a = numpy.arange(64).reshape(4, 4, 4) - ai = dpnp.array(a) - ind_r = numpy.array([[[3, 0, 2, 1]]]) - ind_r_i = dpnp.array(ind_r) - for axis in range(3): - numpy.put_along_axis(a, ind_r, [100, 200, 300, 400], axis) - dpnp.put_along_axis(ai, ind_r_i, [100, 200, 300, 400], axis) - assert_array_equal(a, ai) - - @pytest.mark.parametrize("vals", [[100, 200]], ids=["[100, 200]"]) @pytest.mark.parametrize( "mask", @@ -688,28 +836,6 @@ def test_take_over_index(indices, array_type, mode): assert_array_equal(expected, result) -def test_take_along_axis(): - a = numpy.arange(16).reshape(4, 4) - ai = dpnp.array(a) - ind_r = numpy.array([[3, 0, 2, 1]]) - ind_r_i = dpnp.array(ind_r) - for axis in range(2): - expected = numpy.take_along_axis(a, ind_r, axis) - result = dpnp.take_along_axis(ai, ind_r_i, axis) - assert_array_equal(expected, result) - - -def test_take_along_axis1(): - a = numpy.arange(64).reshape(4, 4, 4) - ai = dpnp.array(a) - ind_r = numpy.array([[[3, 0, 2, 1]]]) - ind_r_i = dpnp.array(ind_r) - for axis in range(3): - expected = numpy.take_along_axis(a, ind_r, axis) - result = dpnp.take_along_axis(ai, ind_r_i, axis) - assert_array_equal(expected, result) - - @pytest.mark.parametrize( "m", [None, 0, 1, 2, 3, 4], ids=["None", "0", "1", "2", "3", "4"] ) diff --git a/tests/test_sycl_queue.py b/tests/test_sycl_queue.py index 3618a9bb4c54..fc4dbf9f0d6e 100644 --- a/tests/test_sycl_queue.py +++ b/tests/test_sycl_queue.py @@ -1299,20 +1299,21 @@ def test_asarray(device_x, device_y): assert_sycl_queue_equal(y.sycl_queue, x.to_device(device_y).sycl_queue) +@pytest.mark.parametrize("func", ["take", "take_along_axis"]) @pytest.mark.parametrize( "device", valid_devices, ids=[device.filter_string for device in valid_devices], ) -def test_take(device): +def test_take(func, device): numpy_data = numpy.arange(5) dpnp_data = dpnp.array(numpy_data, device=device) - ind = [0, 2, 4] - dpnp_ind = dpnp.array(ind, device=device) + dpnp_ind = dpnp.array([0, 2, 4], device=device) + np_ind = dpnp_ind.asnumpy() - result = dpnp.take(dpnp_data, dpnp_ind) - expected = numpy.take(numpy_data, ind) + result = getattr(dpnp, func)(dpnp_data, dpnp_ind, axis=None) + expected = getattr(numpy, func)(numpy_data, np_ind, axis=None) assert_allclose(expected, result) expected_queue = dpnp_data.get_array().sycl_queue diff --git a/tests/test_usm_type.py b/tests/test_usm_type.py index 18af427e423f..f82e04a2a566 100644 --- a/tests/test_usm_type.py +++ b/tests/test_usm_type.py @@ -472,14 +472,15 @@ def test_broadcast_to(usm_type): assert x.usm_type == y.usm_type +@pytest.mark.parametrize("func", ["take", "take_along_axis"]) @pytest.mark.parametrize("usm_type_x", list_of_usm_types, ids=list_of_usm_types) @pytest.mark.parametrize( "usm_type_ind", list_of_usm_types, ids=list_of_usm_types ) -def test_take(usm_type_x, usm_type_ind): +def test_take(func, usm_type_x, usm_type_ind): x = dp.arange(5, usm_type=usm_type_x) ind = dp.array([0, 2, 4], usm_type=usm_type_ind) - z = dp.take(x, ind) + z = getattr(dp, func)(x, ind, axis=None) assert x.usm_type == usm_type_x assert ind.usm_type == usm_type_ind diff --git a/tests/third_party/cupy/indexing_tests/test_indexing.py b/tests/third_party/cupy/indexing_tests/test_indexing.py index 9e323990891c..20890056ae05 100644 --- a/tests/third_party/cupy/indexing_tests/test_indexing.py +++ b/tests/third_party/cupy/indexing_tests/test_indexing.py @@ -7,7 +7,6 @@ from tests.third_party.cupy import testing -@testing.gpu class TestIndexing(unittest.TestCase): @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.numpy_cupy_array_equal() @@ -51,14 +50,12 @@ def test_take_index_range_overflow(self, xp, dtype): b = xp.array([0], dtype=dtype) return a.take(b) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.numpy_cupy_array_equal() def test_take_along_axis(self, xp): a = testing.shaped_random((2, 4, 3), xp, dtype="float32") b = testing.shaped_random((2, 6, 3), xp, dtype="int64", scale=4) return xp.take_along_axis(a, b, axis=-2) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.numpy_cupy_array_equal() def test_take_along_axis_none_axis(self, xp): a = testing.shaped_random((2, 4, 3), xp, dtype="float32") From 9575363306df5a7bc7cc2991c1ca507c0ef4e89e Mon Sep 17 00:00:00 2001 From: Anton <100830759+antonwolfy@users.noreply.github.com> Date: Thu, 7 Dec 2023 19:10:21 +0100 Subject: [PATCH 27/38] Redesign `dpnp.diff` thorough existing calls (#1637) * Redesigned `put_along_axis` and `take_along_axis` thorugh existing calls * Redesigned `dpnp.diff` thorugh existing calls * Proper resolving conflicts after rebase of master branch * Increased tests coverage * Fixed test_logspace_axis to be passed in wheels tests * Resolved pre-commit issue --- .github/workflows/conda-package.yml | 9 +- dpnp/dpnp_algo/dpnp_algo_mathematical.pxi | 30 --- dpnp/dpnp_iface_indexing.py | 42 ++-- dpnp/dpnp_iface_mathematical.py | 144 +++++++++--- tests/helper.py | 8 + tests/skipped_tests.tbl | 12 +- tests/skipped_tests_gpu.tbl | 10 +- tests/test_arraycreation.py | 3 +- tests/test_mathematical.py | 205 +++++++++++++++--- tests/test_sycl_queue.py | 27 ++- tests/test_usm_type.py | 1 + .../cupy/math_tests/test_sumprod.py | 11 +- 12 files changed, 368 insertions(+), 134 deletions(-) diff --git a/.github/workflows/conda-package.yml b/.github/workflows/conda-package.yml index 8c1c6bfe4352..1a6650798e92 100644 --- a/.github/workflows/conda-package.yml +++ b/.github/workflows/conda-package.yml @@ -38,14 +38,7 @@ env: third_party/cupy/manipulation_tests/test_join.py third_party/cupy/manipulation_tests/test_rearrange.py third_party/cupy/manipulation_tests/test_transpose.py - third_party/cupy/math_tests/test_arithmetic.py - third_party/cupy/math_tests/test_explog.py - third_party/cupy/math_tests/test_floating.py - third_party/cupy/math_tests/test_hyperbolic.py - third_party/cupy/math_tests/test_matmul.py - third_party/cupy/math_tests/test_misc.py - third_party/cupy/math_tests/test_rounding.py - third_party/cupy/math_tests/test_trigonometric.py + third_party/cupy/math_tests third_party/cupy/sorting_tests/test_sort.py third_party/cupy/sorting_tests/test_count.py third_party/cupy/statistics_tests/test_meanvar.py diff --git a/dpnp/dpnp_algo/dpnp_algo_mathematical.pxi b/dpnp/dpnp_algo/dpnp_algo_mathematical.pxi index 431892f10217..ce1b0c5f894f 100644 --- a/dpnp/dpnp_algo/dpnp_algo_mathematical.pxi +++ b/dpnp/dpnp_algo/dpnp_algo_mathematical.pxi @@ -39,7 +39,6 @@ __all__ += [ "dpnp_cross", "dpnp_cumprod", "dpnp_cumsum", - "dpnp_diff", "dpnp_ediff1d", "dpnp_fabs", "dpnp_fmod", @@ -95,35 +94,6 @@ cpdef utils.dpnp_descriptor dpnp_cumsum(utils.dpnp_descriptor x1): return call_fptr_1in_1out(DPNP_FN_CUMSUM_EXT, x1, (x1.size,)) -cpdef utils.dpnp_descriptor dpnp_diff(utils.dpnp_descriptor x1, int n): - cdef utils.dpnp_descriptor res - - x1_obj = x1.get_array() - - if x1.size - n < 1: - res_obj = dpnp_container.empty(0, - dtype=x1.dtype, - device=x1_obj.sycl_device, - usm_type=x1_obj.usm_type, - sycl_queue=x1_obj.sycl_queue) - res = utils.dpnp_descriptor(res_obj) - return res - - res_obj = dpnp_container.empty(x1.size - 1, - dtype=x1.dtype, - device=x1_obj.sycl_device, - usm_type=x1_obj.usm_type, - sycl_queue=x1_obj.sycl_queue) - res = utils.dpnp_descriptor(res_obj) - for i in range(res.size): - res.get_pyobj()[i] = x1.get_pyobj()[i + 1] - x1.get_pyobj()[i] - - if n == 1: - return res - - return dpnp_diff(res, n - 1) - - cpdef utils.dpnp_descriptor dpnp_ediff1d(utils.dpnp_descriptor x1): if x1.size <= 1: diff --git a/dpnp/dpnp_iface_indexing.py b/dpnp/dpnp_iface_indexing.py index 6a61f728e7d2..ef21c3b9b185 100644 --- a/dpnp/dpnp_iface_indexing.py +++ b/dpnp/dpnp_iface_indexing.py @@ -550,17 +550,24 @@ def put_along_axis(a, indices, values, axis): For full documentation refer to :obj:`numpy.put_along_axis`. - Limitations - ----------- - Parameters `a` and `indices` are supported either as :class:`dpnp.ndarray` - or :class:`dpctl.tensor.usm_ndarray`. - Parameter `values` is supported either as scalar, :class:`dpnp.ndarray` - or :class:`dpctl.tensor.usm_ndarray`. - Otherwise ``TypeError`` exception will be raised. + Parameters + ---------- + a : {dpnp.ndarray, usm_ndarray}, (Ni..., M, Nk...) + Destination array. + indices : {dpnp.ndarray, usm_ndarray}, (Ni..., J, Nk...) + Indices to change along each 1d slice of `a`. This must match the + dimension of input array, but dimensions in ``Ni`` and ``Nj`` + may be 1 to broadcast against `a`. + values : {scalar, array_like}, (Ni..., J, Nk...) + Values to insert at those indices. Its shape and dimension are + broadcast to match that of `indices`. + axis : int + The axis to take 1d slices along. If axis is ``None``, the destination + array is treated as if a flattened 1d view had been created of it. See Also -------- - :obj:`dpnp.put` : Put values along an axis, using the same indices for every 1d slice. + :obj:`dpnp.put` : Put values along an axis, using the same indices for every 1d slice. :obj:`dpnp.take_along_axis` : Take values from the input array by matching 1d index and data slices. Examples @@ -736,17 +743,24 @@ def take_along_axis(a, indices, axis): For full documentation refer to :obj:`numpy.take_along_axis`. + Parameters + ---------- + a : {dpnp.ndarray, usm_ndarray}, (Ni..., M, Nk...) + Source array + indices : {dpnp.ndarray, usm_ndarray}, (Ni..., J, Nk...) + Indices to take along each 1d slice of `a`. This must match the + dimension of the input array, but dimensions ``Ni`` and ``Nj`` + only need to broadcast against `a`. + axis : int + The axis to take 1d slices along. If axis is ``None``, the input + array is treated as if it had first been flattened to 1d, + for consistency with `sort` and `argsort`. + Returns ------- out : dpnp.ndarray The indexed result. - Limitations - ----------- - Parameters `a` and `indices` are supported either as :class:`dpnp.ndarray` - or :class:`dpctl.tensor.usm_ndarray`. - Otherwise ``TypeError`` exception will be raised. - See Also -------- :obj:`dpnp.take` : Take along an axis, using the same indices for every 1d slice. diff --git a/dpnp/dpnp_iface_mathematical.py b/dpnp/dpnp_iface_mathematical.py index d619b5662b1b..89f4e831dcc2 100644 --- a/dpnp/dpnp_iface_mathematical.py +++ b/dpnp/dpnp_iface_mathematical.py @@ -43,7 +43,10 @@ import dpctl.tensor as dpt import dpctl.utils as du import numpy -from numpy.core.numeric import normalize_axis_tuple +from numpy.core.numeric import ( + normalize_axis_index, + normalize_axis_tuple, +) import dpnp from dpnp.dpnp_array import dpnp_array @@ -129,6 +132,31 @@ ] +def _append_to_diff_array(a, axis, combined, values): + """ + Append `values` to `combined` list based on data of array `a`. + + Scalar value (including case with 0d array) is expanded to an array + with length=1 in the direction of axis and the shape of the input array `a` + in along all other axes. + Note, if `values` is a scalar. then it is converted to 0d array allocating + on the same SYCL queue as the input array `a` and with the same USM type. + + """ + + dpnp.check_supported_arrays_type(values, scalar_type=True) + if dpnp.isscalar(values): + values = dpnp.asarray( + values, sycl_queue=a.sycl_queue, usm_type=a.usm_type + ) + + if values.ndim == 0: + shape = list(a.shape) + shape[axis] = 1 + values = dpnp.broadcast_to(values, tuple(shape)) + combined.append(values) + + def absolute( x, /, @@ -609,6 +637,10 @@ def cumsum(x1, **kwargs): Otherwise the function will be executed sequentially on CPU. Input array data types are limited by supported DPNP :ref:`Data types`. + See Also + -------- + :obj:`dpnp.diff` : Calculate the n-th discrete difference along the given axis. + Examples -------- >>> import dpnp as np @@ -630,39 +662,95 @@ def cumsum(x1, **kwargs): return call_origin(numpy.cumsum, x1, **kwargs) -def diff(x1, n=1, axis=-1, prepend=numpy._NoValue, append=numpy._NoValue): +def diff(a, n=1, axis=-1, prepend=None, append=None): """ Calculate the n-th discrete difference along the given axis. For full documentation refer to :obj:`numpy.diff`. - Limitations - ----------- - Input array is supported as :obj:`dpnp.ndarray`. - Parameters `axis`, `prepend` and `append` are supported only with default values. - Otherwise the function will be executed sequentially on CPU. + Parameters + ---------- + a : {dpnp.ndarray, usm_ndarray} + Input array + n : int, optional + The number of times values are differenced. If zero, the input + is returned as-is. + axis : int, optional + The axis along which the difference is taken, default is the + last axis. + prepend, append : {scalar, dpnp.ndarray, usm_ndarray}, optional + Values to prepend or append to `a` along axis prior to + performing the difference. Scalar values are expanded to + arrays with length 1 in the direction of axis and the shape + of the input array in along all other axes. Otherwise the + dimension and shape must match `a` except along axis. + + Returns + ------- + out : dpnp.ndarray + The n-th differences. The shape of the output is the same as `a` + except along `axis` where the dimension is smaller by `n`. The + type of the output is the same as the type of the difference + between any two elements of `a`. This is the same as the type of + `a` in most cases. + + See Also + -------- + :obj:`dpnp.gradient` : Return the gradient of an N-dimensional array. + :obj:`dpnp.ediff1d` : Compute the differences between consecutive elements of an array. + :obj:`dpnp.cumsum` : Return the cumulative sum of the elements along a given axis. + + Examples + -------- + >>> import dpnp as np + >>> x = np.array([1, 2, 4, 7, 0]) + >>> np.diff(x) + array([ 1, 2, 3, -7]) + >>> np.diff(x, n=2) + array([ 1, 1, -10]) + + >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) + >>> np.diff(x) + array([[2, 3, 4], + [5, 1, 2]]) + >>> np.diff(x, axis=0) + array([[-1, 2, 0, -2]]) + """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) - if x1_desc: - if not isinstance(n, int): - pass - elif n < 1: - pass - elif x1_desc.ndim != 1: - pass - elif axis != -1: - pass - elif prepend is not numpy._NoValue: - pass - elif append is not numpy._NoValue: - pass - else: - return dpnp_diff(x1_desc, n).get_pyobj() + dpnp.check_supported_arrays_type(a) + if n == 0: + return a + if n < 0: + raise ValueError(f"order must be non-negative but got {n}") - return call_origin( - numpy.diff, x1, n=n, axis=axis, prepend=prepend, append=append - ) + nd = a.ndim + if nd == 0: + raise ValueError("diff requires input that is at least one dimensional") + axis = normalize_axis_index(axis, nd) + + combined = [] + if prepend is not None: + _append_to_diff_array(a, axis, combined, prepend) + + combined.append(a) + if append is not None: + _append_to_diff_array(a, axis, combined, append) + + if len(combined) > 1: + a = dpnp.concatenate(combined, axis=axis) + + slice1 = [slice(None)] * nd + slice2 = [slice(None)] * nd + slice1[axis] = slice(1, None) + slice2[axis] = slice(None, -1) + slice1 = tuple(slice1) + slice2 = tuple(slice2) + + op = dpnp.not_equal if a.dtype == numpy.bool_ else dpnp.subtract + for _ in range(n): + a = op(a[slice1], a[slice2]) + return a def divide( @@ -1276,6 +1364,10 @@ def gradient(x1, *varargs, **kwargs): Otherwise the function will be executed sequentially on CPU. Input array data types are limited by supported DPNP :ref:`Data types`. + See Also + -------- + :obj:`dpnp.diff` : Calculate the n-th discrete difference along the given axis. + Examples -------- >>> import dpnp as np diff --git a/tests/helper.py b/tests/helper.py index de4db998a7bf..8fa26116756d 100644 --- a/tests/helper.py +++ b/tests/helper.py @@ -76,6 +76,14 @@ def get_integer_dtypes(): return [dpnp.int32, dpnp.int64] +def get_integer_dtypes(): + """ + Build a list of integer types supported by DPNP. + """ + + return [dpnp.int32, dpnp.int64] + + def get_complex_dtypes(device=None): """ Build a list of complex types supported by DPNP based on device capabilities. diff --git a/tests/skipped_tests.tbl b/tests/skipped_tests.tbl index d32f1ee78c0c..87a29e2cb0c5 100644 --- a/tests/skipped_tests.tbl +++ b/tests/skipped_tests.tbl @@ -557,24 +557,22 @@ tests/third_party/cupy/math_tests/test_misc.py::TestConvolve::test_convolve_diff tests/third_party/cupy/math_tests/test_rounding.py::TestRounding::test_fix tests/third_party/cupy/math_tests/test_sumprod.py::TestSumprod::test_sum_out tests/third_party/cupy/math_tests/test_sumprod.py::TestSumprod::test_sum_out_wrong_shape + tests/third_party/cupy/math_tests/test_sumprod.py::TestCumprod::test_ndarray_cumprod_2dim_with_axis -tests/third_party/cupy/math_tests/test_sumprod.py::TestDiff::test_diff_1dim -tests/third_party/cupy/math_tests/test_sumprod.py::TestDiff::test_diff_1dim_with_n -tests/third_party/cupy/math_tests/test_sumprod.py::TestDiff::test_diff_2dim_without_axis tests/third_party/cupy/math_tests/test_sumprod.py::TestCumprod::test_cumprod_arraylike tests/third_party/cupy/math_tests/test_sumprod.py::TestCumprod::test_cumprod_huge_array tests/third_party/cupy/math_tests/test_sumprod.py::TestCumprod::test_cumprod_numpy_array tests/third_party/cupy/math_tests/test_sumprod.py::TestCumprod::test_cumprod_out_noncontiguous +tests/third_party/cupy/math_tests/test_sumprod.py::TestCumprod::test_cumprod_1dim +tests/third_party/cupy/math_tests/test_sumprod.py::TestCumprod::test_cumprod_2dim_without_axis + tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_0_{axis=0}::test_cumsum_arraylike tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_0_{axis=0}::test_cumsum_numpy_array tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_1_{axis=1}::test_cumsum_arraylike tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_1_{axis=1}::test_cumsum_numpy_array tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_2_{axis=2}::test_cumsum_arraylike tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_2_{axis=2}::test_cumsum_numpy_array -tests/third_party/cupy/math_tests/test_sumprod.py::TestDiff::test_diff_2dim_with_append -tests/third_party/cupy/math_tests/test_sumprod.py::TestDiff::test_diff_2dim_with_axis -tests/third_party/cupy/math_tests/test_sumprod.py::TestDiff::test_diff_2dim_with_n_and_axis -tests/third_party/cupy/math_tests/test_sumprod.py::TestDiff::test_diff_2dim_with_prepend + tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodAxes_param_0_{axis=(1, 3), shape=(2, 3, 4, 5)}::test_nansum_axes tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodAxes_param_1_{axis=(1, 3), shape=(20, 30, 40, 50)}::test_nansum_axes tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodAxes_param_2_{axis=(0, 2, 3), shape=(2, 3, 4, 5)}::test_nansum_axes diff --git a/tests/skipped_tests_gpu.tbl b/tests/skipped_tests_gpu.tbl index c5cf53b2a71c..b6f6ceb45913 100644 --- a/tests/skipped_tests_gpu.tbl +++ b/tests/skipped_tests_gpu.tbl @@ -654,16 +654,15 @@ tests/third_party/cupy/math_tests/test_misc.py::TestConvolve::test_convolve_diff tests/third_party/cupy/math_tests/test_rounding.py::TestRounding::test_fix tests/third_party/cupy/math_tests/test_sumprod.py::TestSumprod::test_sum_out tests/third_party/cupy/math_tests/test_sumprod.py::TestSumprod::test_sum_out_wrong_shape + tests/third_party/cupy/math_tests/test_sumprod.py::TestCumprod::test_ndarray_cumprod_2dim_with_axis -tests/third_party/cupy/math_tests/test_sumprod.py::TestDiff::test_diff_1dim -tests/third_party/cupy/math_tests/test_sumprod.py::TestDiff::test_diff_1dim_with_n -tests/third_party/cupy/math_tests/test_sumprod.py::TestDiff::test_diff_2dim_without_axis tests/third_party/cupy/math_tests/test_sumprod.py::TestCumprod::test_cumprod_arraylike tests/third_party/cupy/math_tests/test_sumprod.py::TestCumprod::test_cumprod_huge_array tests/third_party/cupy/math_tests/test_sumprod.py::TestCumprod::test_cumprod_numpy_array tests/third_party/cupy/math_tests/test_sumprod.py::TestCumprod::test_cumprod_out_noncontiguous tests/third_party/cupy/math_tests/test_sumprod.py::TestCumprod::test_cumprod_1dim tests/third_party/cupy/math_tests/test_sumprod.py::TestCumprod::test_cumprod_2dim_without_axis + tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_0_{axis=0}::test_cumsum tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_0_{axis=0}::test_cumsum_2dim tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_1_{axis=1}::test_cumsum @@ -676,10 +675,7 @@ tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_1_{axis=1}:: tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_1_{axis=1}::test_cumsum_numpy_array tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_2_{axis=2}::test_cumsum_arraylike tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_2_{axis=2}::test_cumsum_numpy_array -tests/third_party/cupy/math_tests/test_sumprod.py::TestDiff::test_diff_2dim_with_append -tests/third_party/cupy/math_tests/test_sumprod.py::TestDiff::test_diff_2dim_with_axis -tests/third_party/cupy/math_tests/test_sumprod.py::TestDiff::test_diff_2dim_with_n_and_axis -tests/third_party/cupy/math_tests/test_sumprod.py::TestDiff::test_diff_2dim_with_prepend + tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodAxes_param_0_{axis=(1, 3), shape=(2, 3, 4, 5)}::test_nansum_axes tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodAxes_param_1_{axis=(1, 3), shape=(20, 30, 40, 50)}::test_nansum_axes tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodAxes_param_2_{axis=(0, 2, 3), shape=(2, 3, 4, 5)}::test_nansum_axes diff --git a/tests/test_arraycreation.py b/tests/test_arraycreation.py index 779e62237a08..0a4ce2063379 100644 --- a/tests/test_arraycreation.py +++ b/tests/test_arraycreation.py @@ -14,6 +14,7 @@ import dpnp from .helper import ( + assert_dtype_allclose, get_all_dtypes, has_support_aspect64, ) @@ -876,4 +877,4 @@ def test_logspace_axis(axis): func = lambda xp: xp.logspace( [2, 3], [20, 15], num=2, base=[[1, 3], [5, 7]], axis=axis ) - assert_allclose(func(dpnp), func(numpy)) + assert_dtype_allclose(func(dpnp), func(numpy)) diff --git a/tests/test_mathematical.py b/tests/test_mathematical.py index 4f751b697fef..7484a66bfb53 100644 --- a/tests/test_mathematical.py +++ b/tests/test_mathematical.py @@ -5,6 +5,7 @@ import pytest from numpy.testing import ( assert_allclose, + assert_almost_equal, assert_array_almost_equal, assert_array_equal, assert_equal, @@ -19,12 +20,183 @@ get_complex_dtypes, get_float_complex_dtypes, get_float_dtypes, + get_integer_dtypes, has_support_aspect64, is_cpu_device, is_win_platform, ) +class TestDiff: + @pytest.mark.parametrize("n", list(range(0, 3))) + @pytest.mark.parametrize("dt", get_integer_dtypes()) + def test_basic_integer(self, n, dt): + x = [1, 4, 6, 7, 12] + np_a = numpy.array(x, dtype=dt) + dpnp_a = dpnp.array(x, dtype=dt) + + expected = numpy.diff(np_a, n=n) + result = dpnp.diff(dpnp_a, n=n) + assert_array_equal(expected, result) + + @pytest.mark.parametrize("dt", get_float_dtypes()) + def test_basic_floating(self, dt): + x = [1.1, 2.2, 3.0, -0.2, -0.1] + np_a = numpy.array(x, dtype=dt) + dpnp_a = dpnp.array(x, dtype=dt) + + expected = numpy.diff(np_a) + result = dpnp.diff(dpnp_a) + assert_almost_equal(expected, result) + + @pytest.mark.parametrize("n", [1, 2]) + def test_basic_boolean(self, n): + x = [True, True, False, False] + np_a = numpy.array(x) + dpnp_a = dpnp.array(x) + + expected = numpy.diff(np_a, n=n) + result = dpnp.diff(dpnp_a, n=n) + assert_array_equal(expected, result) + + @pytest.mark.parametrize("dt", get_complex_dtypes()) + def test_basic_complex(self, dt): + x = [1.1 + 1j, 2.2 + 4j, 3.0 + 6j, -0.2 + 7j, -0.1 + 12j] + np_a = numpy.array(x, dtype=dt) + dpnp_a = dpnp.array(x, dtype=dt) + + expected = numpy.diff(np_a) + result = dpnp.diff(dpnp_a) + assert_allclose(expected, result) + + @pytest.mark.parametrize("axis", [None] + list(range(-3, 2))) + def test_axis(self, axis): + np_a = numpy.zeros((10, 20, 30)) + np_a[:, 1::2, :] = 1 + dpnp_a = dpnp.array(np_a) + + kwargs = {} if axis is None else {"axis": axis} + expected = numpy.diff(np_a, **kwargs) + result = dpnp.diff(dpnp_a, **kwargs) + assert_array_equal(expected, result) + + @pytest.mark.parametrize("xp", [numpy, dpnp]) + @pytest.mark.parametrize("axis", [-4, 3]) + def test_axis_error(self, xp, axis): + a = xp.ones((10, 20, 30)) + assert_raises(numpy.AxisError, xp.diff, a, axis=axis) + + @pytest.mark.parametrize("xp", [numpy, dpnp]) + def test_ndim_error(self, xp): + a = xp.array(1.1111111, xp.float32) + assert_raises(ValueError, xp.diff, a) + + @pytest.mark.parametrize("n", [None, 2]) + @pytest.mark.parametrize("axis", [None, 0]) + def test_nd(self, n, axis): + np_a = 20 * numpy.random.rand(10, 20, 30) + dpnp_a = dpnp.array(np_a) + + kwargs = {} if n is None else {"n": n} + if axis is not None: + kwargs.update({"axis": axis}) + + expected = numpy.diff(np_a, **kwargs) + result = dpnp.diff(dpnp_a, **kwargs) + assert_dtype_allclose(result, expected) + + @pytest.mark.parametrize("n", list(range(0, 5))) + def test_n(self, n): + np_a = numpy.array(list(range(3))) + dpnp_a = dpnp.array(np_a) + + expected = numpy.diff(np_a, n=n) + result = dpnp.diff(dpnp_a, n=n) + assert_array_equal(expected, result) + + @pytest.mark.parametrize("xp", [numpy, dpnp]) + def test_n_error(self, xp): + a = xp.array(list(range(3))) + assert_raises(ValueError, xp.diff, a, n=-1) + + @pytest.mark.parametrize("prepend", [0, [0], [-1, 0]]) + def test_prepend(self, prepend): + np_a = numpy.arange(5) + 1 + dpnp_a = dpnp.array(np_a) + + np_p = prepend if numpy.isscalar(prepend) else numpy.array(prepend) + dpnp_p = prepend if dpnp.isscalar(prepend) else dpnp.array(prepend) + + expected = numpy.diff(np_a, prepend=np_p) + result = dpnp.diff(dpnp_a, prepend=dpnp_p) + assert_array_equal(expected, result) + + @pytest.mark.parametrize( + "axis, prepend", + [ + pytest.param(0, 0), + pytest.param(0, [[0, 0]]), + pytest.param(1, 0), + pytest.param(1, [[0], [0]]), + ], + ) + def test_prepend_axis(self, axis, prepend): + np_a = numpy.arange(4).reshape(2, 2) + dpnp_a = dpnp.array(np_a) + + np_p = prepend if numpy.isscalar(prepend) else numpy.array(prepend) + dpnp_p = prepend if dpnp.isscalar(prepend) else dpnp.array(prepend) + + expected = numpy.diff(np_a, axis=axis, prepend=np_p) + result = dpnp.diff(dpnp_a, axis=axis, prepend=dpnp_p) + assert_array_equal(expected, result) + + @pytest.mark.parametrize("append", [0, [0], [0, 2]]) + def test_append(self, append): + np_a = numpy.arange(5) + dpnp_a = dpnp.array(np_a) + + np_ap = append if numpy.isscalar(append) else numpy.array(append) + dpnp_ap = append if dpnp.isscalar(append) else dpnp.array(append) + + expected = numpy.diff(np_a, append=np_ap) + result = dpnp.diff(dpnp_a, append=dpnp_ap) + assert_array_equal(expected, result) + + @pytest.mark.parametrize( + "axis, append", + [ + pytest.param(0, 0), + pytest.param(0, [[0, 0]]), + pytest.param(1, 0), + pytest.param(1, [[0], [0]]), + ], + ) + def test_append_axis(self, axis, append): + np_a = numpy.arange(4).reshape(2, 2) + dpnp_a = dpnp.array(np_a) + + np_ap = append if numpy.isscalar(append) else numpy.array(append) + dpnp_ap = append if dpnp.isscalar(append) else dpnp.array(append) + + expected = numpy.diff(np_a, axis=axis, append=np_ap) + result = dpnp.diff(dpnp_a, axis=axis, append=dpnp_ap) + assert_array_equal(expected, result) + + @pytest.mark.parametrize("xp", [numpy, dpnp]) + def test_prepend_append_error(self, xp): + a = xp.arange(4).reshape(2, 2) + p = xp.zeros((3, 3)) + assert_raises(ValueError, xp.diff, a, prepend=p) + assert_raises(ValueError, xp.diff, a, append=p) + + @pytest.mark.parametrize("xp", [numpy, dpnp]) + def test_prepend_append_axis_error(self, xp): + a = xp.arange(4).reshape(2, 2) + assert_raises(numpy.AxisError, xp.diff, a, axis=3, prepend=0) + assert_raises(numpy.AxisError, xp.diff, a, axis=3, append=0) + + @pytest.mark.usefixtures("allow_fall_back_on_numpy") class TestConvolve: def test_object(self): @@ -54,35 +226,6 @@ def test_mode(self): dpnp.convolve(d, k, mode=None) -@pytest.mark.usefixtures("allow_fall_back_on_numpy") -@pytest.mark.parametrize( - "array", - [ - [[0, 0], [0, 0]], - [[1, 2], [1, 2]], - [[1, 2], [3, 4]], - [[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]], - [ - [[[1, 2], [3, 4]], [[1, 2], [2, 1]]], - [[[1, 3], [3, 1]], [[0, 1], [1, 3]]], - ], - ], - ids=[ - "[[0, 0], [0, 0]]", - "[[1, 2], [1, 2]]", - "[[1, 2], [3, 4]]", - "[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]", - "[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]", - ], -) -def test_diff(array): - np_a = numpy.array(array) - dpnp_a = dpnp.array(array) - expected = numpy.diff(np_a) - result = dpnp.diff(dpnp_a) - assert_allclose(expected, result) - - @pytest.mark.parametrize("dtype1", get_all_dtypes()) @pytest.mark.parametrize("dtype2", get_all_dtypes()) @pytest.mark.parametrize( @@ -110,10 +253,6 @@ def test_op_multiple_dtypes(dtype1, func, dtype2, data): "rhs", [[[1, 2, 3], [4, 5, 6]], [2.0, 1.5, 1.0], 3, 0.3] ) @pytest.mark.parametrize("lhs", [[[6, 5, 4], [3, 2, 1]], [1.3, 2.6, 3.9]]) -# TODO: achieve the same level of dtype support for all mathematical operations, like -# @pytest.mark.parametrize("dtype", get_all_dtypes()) -# and to get rid of fallbacks on numpy allowed by below fixture -# @pytest.mark.usefixtures("allow_fall_back_on_numpy") class TestMathematical: @staticmethod def array_or_scalar(xp, data, dtype=None): diff --git a/tests/test_sycl_queue.py b/tests/test_sycl_queue.py index fc4dbf9f0d6e..3c658c14fe52 100644 --- a/tests/test_sycl_queue.py +++ b/tests/test_sycl_queue.py @@ -1299,6 +1299,32 @@ def test_asarray(device_x, device_y): assert_sycl_queue_equal(y.sycl_queue, x.to_device(device_y).sycl_queue) +@pytest.mark.parametrize( + "device", + valid_devices, + ids=[device.filter_string for device in valid_devices], +) +@pytest.mark.parametrize( + "kwargs", + [ + pytest.param({"prepend": 7}), + pytest.param({"append": -2}), + pytest.param({"prepend": -4, "append": 5}), + ], +) +def test_diff_scalar_append(device, kwargs): + numpy_data = numpy.arange(7) + dpnp_data = dpnp.array(numpy_data, device=device) + + expected = numpy.diff(numpy_data, **kwargs) + result = dpnp.diff(dpnp_data, **kwargs) + assert_allclose(expected, result) + + expected_queue = dpnp_data.get_array().sycl_queue + result_queue = result.get_array().sycl_queue + assert_sycl_queue_equal(result_queue, expected_queue) + + @pytest.mark.parametrize("func", ["take", "take_along_axis"]) @pytest.mark.parametrize( "device", @@ -1318,5 +1344,4 @@ def test_take(func, device): expected_queue = dpnp_data.get_array().sycl_queue result_queue = result.get_array().sycl_queue - assert_sycl_queue_equal(result_queue, expected_queue) diff --git a/tests/test_usm_type.py b/tests/test_usm_type.py index f82e04a2a566..4982ed424140 100644 --- a/tests/test_usm_type.py +++ b/tests/test_usm_type.py @@ -382,6 +382,7 @@ def test_meshgrid(usm_type_x, usm_type_y): ), pytest.param("cosh", [-5.0, -3.5, 0.0, 3.5, 5.0]), pytest.param("count_nonzero", [0, 1, 7, 0]), + pytest.param("diff", [1.0, 2.0, 4.0, 7.0, 0.0]), pytest.param("exp", [1.0, 2.0, 4.0, 7.0]), pytest.param("exp2", [0.0, 1.0, 2.0]), pytest.param("expm1", [1.0e-10, 1.0, 2.0, 4.0, 7.0]), diff --git a/tests/third_party/cupy/math_tests/test_sumprod.py b/tests/third_party/cupy/math_tests/test_sumprod.py index 5834ac94fe2f..0728382a5b43 100644 --- a/tests/third_party/cupy/math_tests/test_sumprod.py +++ b/tests/third_party/cupy/math_tests/test_sumprod.py @@ -360,10 +360,10 @@ def test_nansum_axes(self, xp, dtype): @testing.parameterize(*testing.product({"axis": axes})) @pytest.mark.usefixtures("allow_fall_back_on_numpy") -@testing.gpu +# TODO: remove "type_check=False" once leveraged on dpctl call class TestCumsum(unittest.TestCase): @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(type_check=False) def test_cumsum(self, xp, dtype): a = testing.shaped_arange((5,), xp, dtype) return xp.cumsum(a) @@ -385,7 +385,7 @@ def test_cumsum_out_noncontiguous(self, xp, dtype): return out @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(type_check=False) def test_cumsum_2dim(self, xp, dtype): a = testing.shaped_arange((4, 5), xp, dtype) return xp.cumsum(a) @@ -569,8 +569,7 @@ def test_cumprod_numpy_array(self, dtype): return cupy.cumprod(a_numpy) -@testing.gpu -class TestDiff(unittest.TestCase): +class TestDiff: @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_diff_1dim(self, xp, dtype): @@ -617,7 +616,6 @@ def test_diff_2dim_with_append(self, xp, dtype): b = testing.shaped_arange((1, 5), xp, dtype) return xp.diff(a, axis=0, append=b, n=2) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.with_requires("numpy>=1.16") @testing.for_all_dtypes() @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) @@ -625,7 +623,6 @@ def test_diff_2dim_with_scalar_append(self, xp, dtype): a = testing.shaped_arange((4, 5), xp, dtype) return xp.diff(a, prepend=1, append=0) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.with_requires("numpy>=1.16") def test_diff_invalid_axis(self): for xp in (numpy, cupy): From d090de902f219b543e67b4a5762cefe9114496be Mon Sep 17 00:00:00 2001 From: Anton <100830759+antonwolfy@users.noreply.github.com> Date: Mon, 11 Dec 2023 17:55:59 +0100 Subject: [PATCH 28/38] Add support of missing arguments in `dpnp.amax` and `dpnp.amin` (#1639) * Added support of missing arguments in amax and amin functions * Updated See also links * Updated alias reference --- dpnp/dpnp_iface_mathematical.py | 16 +++++----- dpnp/dpnp_iface_searching.py | 4 +-- dpnp/dpnp_iface_statistics.py | 53 ++++++++++----------------------- tests/test_amin_amax.py | 26 ++++------------ 4 files changed, 30 insertions(+), 69 deletions(-) diff --git a/dpnp/dpnp_iface_mathematical.py b/dpnp/dpnp_iface_mathematical.py index 89f4e831dcc2..70cc5e9cbac5 100644 --- a/dpnp/dpnp_iface_mathematical.py +++ b/dpnp/dpnp_iface_mathematical.py @@ -1477,11 +1477,11 @@ def maximum( -------- :obj:`dpnp.minimum` : Element-wise minimum of two arrays, propagates NaNs. :obj:`dpnp.fmax` : Element-wise maximum of two arrays, ignores NaNs. - :obj:`dpnp.amax` : The maximum value of an array along a given axis, propagates NaNs. - :obj:`dpnp.nanmax` : The maximum value of an array along a given axis, ignores NaNs. - :obj:`dpnp.fmax` : Element-wise maximum of two arrays, ignores NaNs. - :obj:`dpnp.amax` : The maximum value of an array along a given axis, propagates NaNs. + :obj:`dpnp.max` : The maximum value of an array along a given axis, propagates NaNs. :obj:`dpnp.nanmax` : The maximum value of an array along a given axis, ignores NaNs. + :obj:`dpnp.fmin` : Element-wise minimum of two arrays, ignores NaNs. + :obj:`dpnp.min` : The minimum value of an array along a given axis, propagates NaNs. + :obj:`dpnp.nanmin` : The minimum value of an array along a given axis, ignores NaNs. Examples -------- @@ -1556,11 +1556,11 @@ def minimum( -------- :obj:`dpnp.maximum` : Element-wise maximum of two arrays, propagates NaNs. :obj:`dpnp.fmin` : Element-wise minimum of two arrays, ignores NaNs. - :obj:`dpnp.amin` : The minimum value of an array along a given axis, propagates NaNs. - :obj:`dpnp.nanmin` : The minimum value of an array along a given axis, ignores NaNs. - :obj:`dpnp.fmin` : Element-wise minimum of two arrays, ignores NaNs. - :obj:`dpnp.amin` : The minimum value of an array along a given axis, propagates NaNs. + :obj:`dpnp.min` : The minimum value of an array along a given axis, propagates NaNs. :obj:`dpnp.nanmin` : The minimum value of an array along a given axis, ignores NaNs. + :obj:`dpnp.fmax` : Element-wise maximum of two arrays, ignores NaNs. + :obj:`dpnp.max` : The maximum value of an array along a given axis, propagates NaNs. + :obj:`dpnp.nanmax` : The maximum value of an array along a given axis, ignores NaNs. Examples -------- diff --git a/dpnp/dpnp_iface_searching.py b/dpnp/dpnp_iface_searching.py index e74c0c1beccf..0210535e1697 100644 --- a/dpnp/dpnp_iface_searching.py +++ b/dpnp/dpnp_iface_searching.py @@ -82,7 +82,7 @@ def argmax(a, axis=None, out=None, *, keepdims=False): -------- :obj:`dpnp.ndarray.argmax` : Equivalent function. :obj:`dpnp.argmin` : Returns the indices of the minimum values along an axis. - :obj:`dpnp.amax` : The maximum value along a given axis. + :obj:`dpnp.max` : The maximum value along a given axis. :obj:`dpnp.unravel_index` : Convert a flat index into an index tuple. :obj:`dpnp.take_along_axis` : Apply ``np.expand_dims(index_array, axis)`` from argmax to an array as if by calling max. @@ -162,7 +162,7 @@ def argmin(a, axis=None, out=None, *, keepdims=False): -------- :obj:`dpnp.ndarray.argmin` : Equivalent function. :obj:`dpnp.argmax` : Returns the indices of the maximum values along an axis. - :obj:`dpnp.amin` : The minimum value along a given axis. + :obj:`dpnp.min` : The minimum value along a given axis. :obj:`dpnp.unravel_index` : Convert a flat index into an index tuple. :obj:`dpnp.take_along_axis` : Apply ``np.expand_dims(index_array, axis)`` from argmin to an array as if by calling min. diff --git a/dpnp/dpnp_iface_statistics.py b/dpnp/dpnp_iface_statistics.py index 38b2d88eef07..07cad8e4f30c 100644 --- a/dpnp/dpnp_iface_statistics.py +++ b/dpnp/dpnp_iface_statistics.py @@ -66,63 +66,40 @@ ] -def amax(input, axis=None, out=None): +def amax(a, axis=None, out=None, keepdims=False, initial=None, where=True): """ Return the maximum of an array or maximum along an axis. - For full documentation refer to :obj:`numpy.amax`. + `amax` is an alias of :obj:`dpnp.max`. See Also -------- - :obj:`dpnp.amin` : The minimum value of an array along a given axis, - propagating any NaNs. - :obj:`dpnp.nanmax` : The maximum value of an array along a given axis, - ignoring any NaNs. - :obj:`dpnp.maximum` : Element-wise maximum of two arrays, - propagating any NaNs. - :obj:`dpnp.fmax` : Element-wise maximum of two arrays, ignoring any NaNs. - :obj:`dpnp.argmax` : Return the indices of the maximum values. - :obj:`dpnp.nanmin` : Return minimum of an array or minimum along an axis, - ignoring any NaNs. - :obj:`dpnp.minimum` : Element-wise minimum of array elements. - :obj:`dpnp.fmin` : Element-wise minimum of array elements. - - Notes - ----- - This function works exactly the same as :obj:`dpnp.max`. + :obj:`dpnp.max` : alias of this function + :obj:`dpnp.ndarray.max` : equivalent method """ - return max(input, axis=axis, out=out) + + return max( + a, axis=axis, out=out, keepdims=keepdims, initial=initial, where=where + ) -def amin(input, axis=None, out=None): +def amin(a, axis=None, out=None, keepdims=False, initial=None, where=True): """ Return the minimum of an array or minimum along an axis. - For full documentation refer to :obj:`numpy.amin`. + `amin` is an alias of :obj:`dpnp.min`. See Also -------- - :obj:`dpnp.amax` : The maximum value of an array along a given axis, - propagating any NaNs. - :obj:`dpnp.nanmin` : Return minimum of an array or minimum along an axis, - ignoring any NaNs. - :obj:`dpnp.minimum` : Element-wise minimum of array elements. - :obj:`dpnp.fmin` : Element-wise minimum of array elements. - :obj:`dpnp.argmin` : Return the indices of the minimum values. - :obj:`dpnp.nanmax` : The maximum value of an array along a given axis, - ignoring any NaNs. - :obj:`dpnp.maximum` : Element-wise maximum of two arrays, - propagating any NaNs. - :obj:`dpnp.fmax` : Element-wise maximum of two arrays, ignoring any NaNs. - - Notes - ----- - This function works exactly the same as :obj:`dpnp.min`. + :obj:`dpnp.min` : alias of this function + :obj:`dpnp.ndarray.min` : equivalent method """ - return min(input, axis=axis, out=out) + return min( + a, axis=axis, out=out, keepdims=keepdims, initial=initial, where=where + ) def average(x1, axis=None, weights=None, returned=False): diff --git a/tests/test_amin_amax.py b/tests/test_amin_amax.py index 5e197f5bf132..1b119ab225b4 100644 --- a/tests/test_amin_amax.py +++ b/tests/test_amin_amax.py @@ -7,8 +7,10 @@ from .helper import get_all_dtypes +@pytest.mark.parametrize("func", ["amax", "amin"]) +@pytest.mark.parametrize("keepdims", [True, False]) @pytest.mark.parametrize("dtype", get_all_dtypes()) -def test_amax(dtype): +def test_amax_amin(func, keepdims, dtype): a = numpy.array( [ [[-2.0, 3.0], [9.1, 0.2]], @@ -20,26 +22,8 @@ def test_amax(dtype): ia = dpnp.array(a) for axis in range(len(a)): - result = dpnp.amax(ia, axis=axis) - expected = numpy.amax(a, axis=axis) - assert_allclose(expected, result) - - -@pytest.mark.parametrize("dtype", get_all_dtypes()) -def test_amin(dtype): - a = numpy.array( - [ - [[-2.0, 3.0], [9.1, 0.2]], - [[-2.0, 5.0], [-2, -1.2]], - [[1.0, -2.0], [5.0, -1.1]], - ], - dtype=dtype, - ) - ia = dpnp.array(a) - - for axis in range(len(a)): - result = dpnp.amin(ia, axis=axis) - expected = numpy.amin(a, axis=axis) + result = getattr(dpnp, func)(ia, axis=axis, keepdims=keepdims) + expected = getattr(numpy, func)(a, axis=axis, keepdims=keepdims) assert_allclose(expected, result) From 4f3b2de93e9411aa86f0ff060dc6e92477046547 Mon Sep 17 00:00:00 2001 From: Anton <100830759+antonwolfy@users.noreply.github.com> Date: Tue, 12 Dec 2023 14:40:20 +0100 Subject: [PATCH 29/38] Stay away from 2024.0.1 compiler package (#1643) * Stay away from 2024.0.1 compiler package * Limit max compiler version while building docs --- .github/workflows/build-sphinx.yml | 2 +- conda-recipe/meta.yaml | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-sphinx.yml b/.github/workflows/build-sphinx.yml index 702d4e325cd5..623644a89a66 100644 --- a/.github/workflows/build-sphinx.yml +++ b/.github/workflows/build-sphinx.yml @@ -97,7 +97,7 @@ jobs: - name: Install dpnp dependencies run: | - conda install numpy"<1.24" dpctl">=0.15.1dev2" mkl-devel-dpcpp onedpl-devel tbb-devel dpcpp_linux-64 \ + conda install numpy"<1.24" dpctl">=0.15.1dev2" mkl-devel-dpcpp onedpl-devel tbb-devel dpcpp_linux-64"<2024.0.1" \ cmake cython pytest ninja scikit-build sysroot_linux-64">=2.28" ${{ env.CHANNELS }} - name: Install cuPy dependencies diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index 874f02bbae30..7f0b57db654d 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -1,4 +1,5 @@ {% set required_compiler_and_mkl_version = "2024.0" %} +{% set max_compiler_version = "2024.0.1" %} {% set required_dpctl_version = "0.15.1dev2" %} package: @@ -24,7 +25,7 @@ requirements: - scikit-build build: - {{ compiler('cxx') }} - - {{ compiler('dpcpp') }} >={{ required_compiler_and_mkl_version }} # [not osx] + - {{ compiler('dpcpp') }} >={{ required_compiler_and_mkl_version }},<{{ max_compiler_version }} # [not osx] - sysroot_linux-64 >=2.28 # [linux] run: - python From 28d334851b8a1beffa1f0f6e9f0ae813d29135b8 Mon Sep 17 00:00:00 2001 From: Anton <100830759+antonwolfy@users.noreply.github.com> Date: Tue, 12 Dec 2023 16:38:21 +0100 Subject: [PATCH 30/38] Retrying build coverage step on failure (#1640) * Retry coverage step on error * Added timeout to limit single retry time * Added shell option to the step --- .github/workflows/generate_coverage.yaml | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/.github/workflows/generate_coverage.yaml b/.github/workflows/generate_coverage.yaml index 9d6b2e511f4b..a9e923957628 100644 --- a/.github/workflows/generate_coverage.yaml +++ b/.github/workflows/generate_coverage.yaml @@ -53,8 +53,22 @@ jobs: conda list - name: Build dpnp with coverage + id: build_coverage + uses: nick-fields/retry@v2.9.0 + with: + shell: bash + timeout_minutes: 60 + max_attempts: 5 + retry_on: error + command: | + . $CONDA/etc/profile.d/conda.sh + conda activate coverage + git clean -fxd + python scripts/gen_coverage.py --pytest-opts="--ignore tests/test_random.py" + + - name: Total number of coverage attempts run: | - python scripts/gen_coverage.py --pytest-opts="--ignore tests/test_random.py" + echo "Total number of coverage attempts made: ${{ steps.build_coverage.outputs.total_attempts }}" - name: Install coverall dependencies run: | From e2188ede3e85df0ce17670ec0c684e3ad2013251 Mon Sep 17 00:00:00 2001 From: Anton <100830759+antonwolfy@users.noreply.github.com> Date: Tue, 12 Dec 2023 21:17:24 +0100 Subject: [PATCH 31/38] Align arguments of true_divide with divide (#1641) --- dpnp/dpnp_iface_mathematical.py | 37 ++++++++++++++++++--------------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/dpnp/dpnp_iface_mathematical.py b/dpnp/dpnp_iface_mathematical.py index 70cc5e9cbac5..f6ca59e69070 100644 --- a/dpnp/dpnp_iface_mathematical.py +++ b/dpnp/dpnp_iface_mathematical.py @@ -254,6 +254,10 @@ def add( Otherwise the function will be executed sequentially on CPU. Input array data types are limited by supported DPNP :ref:`Data types`. + Notes + ----- + Equivalent to `x1` + `x2` in terms of array broadcasting. + Examples -------- >>> import dpnp as np @@ -784,6 +788,13 @@ def divide( Otherwise the function will be executed sequentially on CPU. Input array data types are limited by supported DPNP :ref:`Data types`. + Notes + ----- + Equivalent to `x1` / `x2` in terms of array-broadcasting. + + The ``true_divide(x1, x2)`` function is an alias for + ``divide(x1, x2)``. + Examples -------- >>> import dpnp as np @@ -1717,6 +1728,10 @@ def multiply( Otherwise the function will be executed sequentially on CPU. Input array data types are limited by supported DPNP :ref:`Data types`. + Notes + ----- + Equivalent to `x1` * `x2` in terms of array broadcasting. + Examples -------- >>> import dpnp as np @@ -2685,6 +2700,10 @@ def subtract( Otherwise the function will be executed sequentially on CPU. Input array data types are limited by supported DPNP :ref:`Data types`. + Notes + ----- + Equivalent to `x1` - `x2` in terms of array broadcasting. + Examples -------- >>> import dpnp as np @@ -2908,23 +2927,7 @@ def trapz(y1, x1=None, dx=1.0, axis=-1): return call_origin(numpy.trapz, y1, x1, dx, axis) -def true_divide(*args, **kwargs): - """ - Provide a true division of the inputs, element-wise. - - For full documentation refer to :obj:`numpy.true_divide`. - - See Also - -------- - :obj:`dpnp.divide` : Standard division. - - Notes - ----- - This function works the same as :obj:`dpnp.divide`. - - """ - - return dpnp.divide(*args, **kwargs) +true_divide = divide def trunc( From 74609d6315929139ef5782fdd9f12419d2b5813f Mon Sep 17 00:00:00 2001 From: vlad-perevezentsev Date: Wed, 13 Dec 2023 18:28:53 +0100 Subject: [PATCH 32/38] Add dpnp.linalg.solve() function (#1598) * Add dpnp.linalg.solve() function * Add cupy tests for dpnp.linalg.solve() * Register a LinAlgError in dpnp.linalg submodule * Implementation of dtype dispatching with _common_type for dpnp.linalg.solve * Add a common_helpers.hpp file * Add validation functions for array types and dimensions for linalg funcs * Skip test_solve_singular_empty --------- Co-authored-by: Anton <100830759+antonwolfy@users.noreply.github.com> --- .github/workflows/conda-package.yml | 1 + dpnp/backend/extensions/lapack/CMakeLists.txt | 1 + .../extensions/lapack/common_helpers.hpp | 55 ++++ dpnp/backend/extensions/lapack/gesv.cpp | 297 ++++++++++++++++++ dpnp/backend/extensions/lapack/gesv.hpp | 51 +++ dpnp/backend/extensions/lapack/lapack_py.cpp | 15 + .../extensions/lapack/linalg_exceptions.hpp | 54 ++++ .../extensions/lapack/types_matrix.hpp | 26 ++ dpnp/linalg/dpnp_iface_linalg.py | 61 +++- dpnp/linalg/dpnp_utils_linalg.py | 270 +++++++++++++++- tests/test_linalg.py | 114 ++++++- tests/test_sycl_queue.py | 24 ++ tests/test_usm_type.py | 38 +++ .../cupy/linalg_tests/test_solve.py | 90 ++++++ 14 files changed, 1093 insertions(+), 4 deletions(-) create mode 100644 dpnp/backend/extensions/lapack/common_helpers.hpp create mode 100644 dpnp/backend/extensions/lapack/gesv.cpp create mode 100644 dpnp/backend/extensions/lapack/gesv.hpp create mode 100644 dpnp/backend/extensions/lapack/linalg_exceptions.hpp create mode 100644 tests/third_party/cupy/linalg_tests/test_solve.py diff --git a/.github/workflows/conda-package.yml b/.github/workflows/conda-package.yml index 1a6650798e92..9b6b093801ac 100644 --- a/.github/workflows/conda-package.yml +++ b/.github/workflows/conda-package.yml @@ -31,6 +31,7 @@ env: test_usm_type.py third_party/cupy/core_tests third_party/cupy/linalg_tests/test_product.py + third_party/cupy/linalg_tests/test_solve.py third_party/cupy/logic_tests/test_comparison.py third_party/cupy/logic_tests/test_truth.py third_party/cupy/manipulation_tests/test_basic.py diff --git a/dpnp/backend/extensions/lapack/CMakeLists.txt b/dpnp/backend/extensions/lapack/CMakeLists.txt index 7679db38d6a7..d224c623c8cb 100644 --- a/dpnp/backend/extensions/lapack/CMakeLists.txt +++ b/dpnp/backend/extensions/lapack/CMakeLists.txt @@ -27,6 +27,7 @@ set(python_module_name _lapack_impl) set(_module_src ${CMAKE_CURRENT_SOURCE_DIR}/lapack_py.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/gesv.cpp ${CMAKE_CURRENT_SOURCE_DIR}/heevd.cpp ${CMAKE_CURRENT_SOURCE_DIR}/syevd.cpp ) diff --git a/dpnp/backend/extensions/lapack/common_helpers.hpp b/dpnp/backend/extensions/lapack/common_helpers.hpp new file mode 100644 index 000000000000..2f3815320cad --- /dev/null +++ b/dpnp/backend/extensions/lapack/common_helpers.hpp @@ -0,0 +1,55 @@ +//***************************************************************************** +// Copyright (c) 2023, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** + +#pragma once +#include +#include + +namespace dpnp +{ +namespace backend +{ +namespace ext +{ +namespace lapack +{ +namespace helper +{ +template +struct value_type_of +{ + using type = T; +}; + +template +struct value_type_of> +{ + using type = T; +}; +} // namespace helper +} // namespace lapack +} // namespace ext +} // namespace backend +} // namespace dpnp diff --git a/dpnp/backend/extensions/lapack/gesv.cpp b/dpnp/backend/extensions/lapack/gesv.cpp new file mode 100644 index 000000000000..72e5aa806714 --- /dev/null +++ b/dpnp/backend/extensions/lapack/gesv.cpp @@ -0,0 +1,297 @@ +//***************************************************************************** +// Copyright (c) 2023, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** + +#include + +// dpctl tensor headers +#include "utils/memory_overlap.hpp" +#include "utils/type_utils.hpp" + +#include "common_helpers.hpp" +#include "gesv.hpp" +#include "linalg_exceptions.hpp" +#include "types_matrix.hpp" + +#include "dpnp_utils.hpp" + +namespace dpnp +{ +namespace backend +{ +namespace ext +{ +namespace lapack +{ +namespace mkl_lapack = oneapi::mkl::lapack; +namespace py = pybind11; +namespace type_utils = dpctl::tensor::type_utils; + +typedef sycl::event (*gesv_impl_fn_ptr_t)(sycl::queue, + const std::int64_t, + const std::int64_t, + char *, + std::int64_t, + char *, + std::int64_t, + std::vector &, + const std::vector &); + +static gesv_impl_fn_ptr_t gesv_dispatch_vector[dpctl_td_ns::num_types]; + +template +static sycl::event gesv_impl(sycl::queue exec_q, + const std::int64_t n, + const std::int64_t nrhs, + char *in_a, + std::int64_t lda, + char *in_b, + std::int64_t ldb, + std::vector &host_task_events, + const std::vector &depends) +{ + type_utils::validate_type_for_device(exec_q); + + T *a = reinterpret_cast(in_a); + T *b = reinterpret_cast(in_b); + + const std::int64_t scratchpad_size = + mkl_lapack::gesv_scratchpad_size(exec_q, n, nrhs, lda, ldb); + T *scratchpad = nullptr; + + std::int64_t *ipiv = nullptr; + + std::stringstream error_msg; + std::int64_t info = 0; + bool sycl_exception_caught = false; + + sycl::event gesv_event; + try { + scratchpad = sycl::malloc_device(scratchpad_size, exec_q); + ipiv = sycl::malloc_device(n, exec_q); + + gesv_event = mkl_lapack::gesv( + exec_q, + n, // The order of the matrix A (0 ≤ n). + nrhs, // The number of right-hand sides B (0 ≤ nrhs). + a, // Pointer to the square coefficient matrix A (n x n). + lda, // The leading dimension of a, must be at least max(1, n). + ipiv, // The pivot indices that define the permutation matrix P; + // row i of the matrix was interchanged with row ipiv(i), + // must be at least max(1, n). + b, // Pointer to the right hand side matrix B (n x nrhs). + ldb, // The leading dimension of b, must be at least max(1, n). + scratchpad, // Pointer to scratchpad memory to be used by MKL + // routine for storing intermediate results. + scratchpad_size, depends); + } catch (mkl_lapack::exception const &e) { + info = e.info(); + + if (info < 0) { + error_msg << "Parameter number " << -info + << " had an illegal value."; + } + else if (info > 0) { + T host_U; + exec_q.memcpy(&host_U, &a[(info - 1) * lda + info - 1], sizeof(T)) + .wait(); + + using ThresholdType = typename helper::value_type_of::type; + + const auto threshold = + std::numeric_limits::epsilon() * 100; + if (std::abs(host_U) < threshold) { + sycl::free(scratchpad, exec_q); + throw LinAlgError("The input coefficient matrix is singular."); + } + else { + error_msg << "Unexpected MKL exception caught during gesv() " + "call:\nreason: " + << e.what() << "\ninfo: " << e.info(); + } + } + else if (info == scratchpad_size && e.detail() != 0) { + error_msg + << "Insufficient scratchpad size. Required size is at least " + << e.detail(); + } + else { + error_msg << "Unexpected MKL exception caught during gesv() " + "call:\nreason: " + << e.what() << "\ninfo: " << e.info(); + } + } catch (sycl::exception const &e) { + error_msg << "Unexpected SYCL exception caught during gesv() call:\n" + << e.what(); + sycl_exception_caught = true; + } + + if (info != 0 || sycl_exception_caught) // an unexpected error occurs + { + if (scratchpad != nullptr) { + sycl::free(scratchpad, exec_q); + } + if (ipiv != nullptr) { + sycl::free(ipiv, exec_q); + } + throw std::runtime_error(error_msg.str()); + } + + sycl::event clean_up_event = exec_q.submit([&](sycl::handler &cgh) { + cgh.depends_on(gesv_event); + auto ctx = exec_q.get_context(); + cgh.host_task([ctx, scratchpad, ipiv]() { + sycl::free(scratchpad, ctx); + sycl::free(ipiv, ctx); + }); + }); + host_task_events.push_back(clean_up_event); + + return gesv_event; +} + +std::pair + gesv(sycl::queue exec_q, + dpctl::tensor::usm_ndarray coeff_matrix, + dpctl::tensor::usm_ndarray dependent_vals, + const std::vector &depends) +{ + const int coeff_matrix_nd = coeff_matrix.get_ndim(); + const int dependent_vals_nd = dependent_vals.get_ndim(); + + if (coeff_matrix_nd != 2) { + throw py::value_error("The coefficient matrix has ndim=" + + std::to_string(coeff_matrix_nd) + + ", but a 2-dimensional array is expected."); + } + + if (dependent_vals_nd > 2) { + throw py::value_error( + "The dependent values array has ndim=" + + std::to_string(dependent_vals_nd) + + ", but a 1-dimensional or a 2-dimensional array is expected."); + } + + const py::ssize_t *coeff_matrix_shape = coeff_matrix.get_shape_raw(); + const py::ssize_t *dependent_vals_shape = dependent_vals.get_shape_raw(); + + if (coeff_matrix_shape[0] != coeff_matrix_shape[1]) { + throw py::value_error("The coefficient matrix must be square," + " but got a shape of (" + + std::to_string(coeff_matrix_shape[0]) + ", " + + std::to_string(coeff_matrix_shape[1]) + ")."); + } + + // check compatibility of execution queue and allocation queue + if (!dpctl::utils::queues_are_compatible(exec_q, + {coeff_matrix, dependent_vals})) + { + throw py::value_error( + "Execution queue is not compatible with allocation queues"); + } + + auto const &overlap = dpctl::tensor::overlap::MemoryOverlap(); + if (overlap(coeff_matrix, dependent_vals)) { + throw py::value_error( + "The arrays of coefficients and dependent variables " + "are overlapping segments of memory"); + } + + bool is_coeff_matrix_f_contig = coeff_matrix.is_f_contiguous(); + if (!is_coeff_matrix_f_contig) { + throw py::value_error("The coefficient matrix " + "must be F-contiguous"); + } + + bool is_dependent_vals_f_contig = dependent_vals.is_f_contiguous(); + if (!is_dependent_vals_f_contig) { + throw py::value_error("The array of dependent variables " + "must be F-contiguous"); + } + + auto array_types = dpctl_td_ns::usm_ndarray_types(); + int coeff_matrix_type_id = + array_types.typenum_to_lookup_id(coeff_matrix.get_typenum()); + int dependent_vals_type_id = + array_types.typenum_to_lookup_id(dependent_vals.get_typenum()); + + if (coeff_matrix_type_id != dependent_vals_type_id) { + throw py::value_error("The types of the coefficient matrix and " + "dependent variables are mismatched"); + } + + gesv_impl_fn_ptr_t gesv_fn = gesv_dispatch_vector[coeff_matrix_type_id]; + if (gesv_fn == nullptr) { + throw py::value_error( + "No gesv implementation defined for the provided type " + "of the coefficient matrix."); + } + + char *coeff_matrix_data = coeff_matrix.get_data(); + char *dependent_vals_data = dependent_vals.get_data(); + + const std::int64_t n = coeff_matrix_shape[0]; + const std::int64_t m = dependent_vals_shape[0]; + const std::int64_t nrhs = + (dependent_vals_nd > 1) ? dependent_vals_shape[1] : 1; + + const std::int64_t lda = std::max(1UL, n); + const std::int64_t ldb = std::max(1UL, m); + + std::vector host_task_events; + sycl::event gesv_ev = + gesv_fn(exec_q, n, nrhs, coeff_matrix_data, lda, dependent_vals_data, + ldb, host_task_events, depends); + + sycl::event args_ev = dpctl::utils::keep_args_alive( + exec_q, {coeff_matrix, dependent_vals}, host_task_events); + + return std::make_pair(args_ev, gesv_ev); +} + +template +struct GesvContigFactory +{ + fnT get() + { + if constexpr (types::GesvTypePairSupportFactory::is_defined) { + return gesv_impl; + } + else { + return nullptr; + } + } +}; + +void init_gesv_dispatch_vector(void) +{ + dpctl_td_ns::DispatchVectorBuilder + contig; + contig.populate_dispatch_vector(gesv_dispatch_vector); +} +} // namespace lapack +} // namespace ext +} // namespace backend +} // namespace dpnp diff --git a/dpnp/backend/extensions/lapack/gesv.hpp b/dpnp/backend/extensions/lapack/gesv.hpp new file mode 100644 index 000000000000..24ac0d2e5be1 --- /dev/null +++ b/dpnp/backend/extensions/lapack/gesv.hpp @@ -0,0 +1,51 @@ +//***************************************************************************** +// Copyright (c) 2023, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** + +#pragma once + +#include +#include + +#include + +namespace dpnp +{ +namespace backend +{ +namespace ext +{ +namespace lapack +{ +extern std::pair + gesv(sycl::queue exec_q, + dpctl::tensor::usm_ndarray coeff_matrix, + dpctl::tensor::usm_ndarray dependent_vals, + const std::vector &depends); + +extern void init_gesv_dispatch_vector(void); +} // namespace lapack +} // namespace ext +} // namespace backend +} // namespace dpnp diff --git a/dpnp/backend/extensions/lapack/lapack_py.cpp b/dpnp/backend/extensions/lapack/lapack_py.cpp index 97b67d59e24e..c0765be7509d 100644 --- a/dpnp/backend/extensions/lapack/lapack_py.cpp +++ b/dpnp/backend/extensions/lapack/lapack_py.cpp @@ -30,7 +30,9 @@ #include #include +#include "gesv.hpp" #include "heevd.hpp" +#include "linalg_exceptions.hpp" #include "syevd.hpp" namespace lapack_ext = dpnp::backend::ext::lapack; @@ -39,6 +41,7 @@ namespace py = pybind11; // populate dispatch vectors void init_dispatch_vectors(void) { + lapack_ext::init_gesv_dispatch_vector(); lapack_ext::init_syevd_dispatch_vector(); } @@ -50,9 +53,21 @@ void init_dispatch_tables(void) PYBIND11_MODULE(_lapack_impl, m) { + // Register a custom LinAlgError exception in the dpnp.linalg submodule + py::module_ linalg_module = py::module_::import("dpnp.linalg"); + py::register_exception( + linalg_module, "LinAlgError", PyExc_ValueError); + init_dispatch_vectors(); init_dispatch_tables(); + m.def("_gesv", &lapack_ext::gesv, + "Call `gesv` from OneMKL LAPACK library to return " + "the solution of a system of linear equations with " + "a square coefficient matrix A and multiple dependent variables", + py::arg("sycl_queue"), py::arg("coeff_matrix"), + py::arg("dependent_vals"), py::arg("depends") = py::list()); + m.def("_heevd", &lapack_ext::heevd, "Call `heevd` from OneMKL LAPACK library to return " "the eigenvalues and eigenvectors of a complex Hermitian matrix", diff --git a/dpnp/backend/extensions/lapack/linalg_exceptions.hpp b/dpnp/backend/extensions/lapack/linalg_exceptions.hpp new file mode 100644 index 000000000000..083be22429c0 --- /dev/null +++ b/dpnp/backend/extensions/lapack/linalg_exceptions.hpp @@ -0,0 +1,54 @@ +//***************************************************************************** +// Copyright (c) 2023, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** + +#pragma once +#include +#include + +namespace dpnp +{ +namespace backend +{ +namespace ext +{ +namespace lapack +{ +class LinAlgError : public std::exception +{ +public: + explicit LinAlgError(const char *message) : msg_(message) {} + + const char *what() const noexcept override + { + return msg_.c_str(); + } + +private: + std::string msg_; +}; +} // namespace lapack +} // namespace ext +} // namespace backend +} // namespace dpnp diff --git a/dpnp/backend/extensions/lapack/types_matrix.hpp b/dpnp/backend/extensions/lapack/types_matrix.hpp index 3cab18d3c63d..60521cb75a3c 100644 --- a/dpnp/backend/extensions/lapack/types_matrix.hpp +++ b/dpnp/backend/extensions/lapack/types_matrix.hpp @@ -43,6 +43,32 @@ namespace lapack { namespace types { +/** + * @brief A factory to define pairs of supported types for which + * MKL LAPACK library provides support in oneapi::mkl::lapack::gesv + * function. + * + * @tparam T Type of array containing the coefficient matrix A and + * the array of multiple dependent variables. Upon execution, the array of + * multiple dependent variables will be overwritten with the solution. + */ +template +struct GesvTypePairSupportFactory +{ + static constexpr bool is_defined = std::disjunction< + dpctl_td_ns::TypePairDefinedEntry, + dpctl_td_ns::TypePairDefinedEntry, + dpctl_td_ns::TypePairDefinedEntry, + T, + std::complex>, + dpctl_td_ns::TypePairDefinedEntry, + T, + std::complex>, + // fall-through + dpctl_td_ns::NotDefinedEntry>::is_defined; +}; /** * @brief A factory to define pairs of supported types for which * MKL LAPACK library provides support in oneapi::mkl::lapack::heevd diff --git a/dpnp/linalg/dpnp_iface_linalg.py b/dpnp/linalg/dpnp_iface_linalg.py index c7437b30da60..c9c9c855728a 100644 --- a/dpnp/linalg/dpnp_iface_linalg.py +++ b/dpnp/linalg/dpnp_iface_linalg.py @@ -47,7 +47,12 @@ from dpnp.dpnp_utils import * from dpnp.linalg.dpnp_algo_linalg import * -from .dpnp_utils_linalg import dpnp_eigh +from .dpnp_utils_linalg import ( + check_stacked_2d, + check_stacked_square, + dpnp_eigh, + dpnp_solve, +) __all__ = [ "cholesky", @@ -62,6 +67,7 @@ "multi_dot", "norm", "qr", + "solve", "svd", ] @@ -499,6 +505,59 @@ def qr(x1, mode="reduced"): return call_origin(numpy.linalg.qr, x1, mode) +def solve(a, b): + """ + Solve a linear matrix equation, or system of linear scalar equations. + + For full documentation refer to :obj:`numpy.linalg.solve`. + + Returns + ------- + out : {(…, M,), (…, M, K)} dpnp.ndarray + Solution to the system ax = b. Returned shape is identical to b. + + Limitations + ----------- + Parameters `a` and `b` are supported as either :class:`dpnp.ndarray` + or :class:`dpctl.tensor.usm_ndarray`. + Input array data types are limited by supported DPNP :ref:`Data types`. + + See Also + -------- + :obj:`dpnp.dot` : Returns the dot product of two arrays. + + Examples + -------- + >>> import dpnp as dp + >>> a = dp.array([[1, 2], [3, 5]]) + >>> b = dp.array([1, 2]) + >>> x = dp.linalg.solve(a, b) + >>> x + array([-1., 1.]) + + Check that the solution is correct: + + >>> dp.allclose(dp.dot(a, x), b) + array([ True]) + + """ + + dpnp.check_supported_arrays_type(a, b) + check_stacked_2d(a) + check_stacked_square(a) + + if not ( + (a.ndim == b.ndim or a.ndim == b.ndim + 1) + and a.shape[:-1] == b.shape[: a.ndim - 1] + ): + raise dpnp.linalg.LinAlgError( + "a must have (..., M, M) shape and b must have (..., M) " + "or (..., M, K)" + ) + + return dpnp_solve(a, b) + + def svd(x1, full_matrices=True, compute_uv=True, hermitian=False): """ Singular Value Decomposition. diff --git a/dpnp/linalg/dpnp_utils_linalg.py b/dpnp/linalg/dpnp_utils_linalg.py index 54c01c20248e..a3a2802072c9 100644 --- a/dpnp/linalg/dpnp_utils_linalg.py +++ b/dpnp/linalg/dpnp_utils_linalg.py @@ -28,16 +28,152 @@ import dpctl.tensor._tensor_impl as ti +from numpy import issubdtype import dpnp import dpnp.backend.extensions.lapack._lapack_impl as li +from dpnp.dpnp_utils import get_usm_allocations -__all__ = ["dpnp_eigh"] +__all__ = [ + "check_stacked_2d", + "check_stacked_square", + "dpnp_eigh", + "dpnp_solve", +] _jobz = {"N": 0, "V": 1} _upper_lower = {"U": 0, "L": 1} +def check_stacked_2d(*arrays): + """ + Return ``True`` if each array in `arrays` has at least two dimensions. + + If any array is less than two-dimensional, `dpnp.linalg.LinAlgError` will be raised. + + Parameters + ---------- + arrays : {dpnp_array, usm_ndarray} + A sequence of input arrays to check for dimensionality. + + Returns + ------- + out : bool + ``True`` if each array in `arrays` is at least two-dimensional. + + Raises + ------ + dpnp.linalg.LinAlgError + If any array in `arrays` is less than two-dimensional. + + """ + + for a in arrays: + if a.ndim < 2: + raise dpnp.linalg.LinAlgError( + f"{a.ndim}-dimensional array given. The input " + "array must be at least two-dimensional" + ) + + +def check_stacked_square(*arrays): + """ + Return ``True`` if each array in `arrays` is a square matrix. + + If any array does not form a square matrix, `dpnp.linalg.LinAlgError` will be raised. + + Precondition: `arrays` are at least 2d. The caller should assert it + beforehand. For example, + + >>> def solve(a): + ... check_stacked_2d(a) + ... check_stacked_square(a) + ... ... + + Parameters + ---------- + arrays : {dpnp_array, usm_ndarray} + A sequence of input arrays to check for square matrix shape. + + Returns + ------- + out : bool + ``True`` if each array in `arrays` forms a square matrix. + + Raises + ------ + dpnp.linalg.LinAlgError + If any array in `arrays` does not form a square matrix. + + """ + + for a in arrays: + m, n = a.shape[-2:] + if m != n: + raise dpnp.linalg.LinAlgError( + "Last 2 dimensions of the input array must be square" + ) + + +def _common_type(*arrays): + """ + _common_type(*arrays) + + Common type for linear algebra operations. + + This function determines the common data type for linalg operations. + It's designed to be similar in logic to `numpy.linalg.linalg._commonType`. + + Key differences from `numpy.common_type`: + - It accepts ``bool_`` arrays. + - The default floating-point data type is determined by the capabilities of the device + on which `arrays` are created, as indicated by `dpnp.default_float_type()`. + + Args: + *arrays (dpnp.ndarray): Input arrays. + + Returns: + dtype_common (dtype): The common data type for linalg operations. + + This returned value is applicable both as the precision to be used + in linalg calls and as the dtype of (possibly complex) output(s). + + """ + + dtypes = [arr.dtype for arr in arrays] + + default = dpnp.default_float_type(device=arrays[0].device) + dtype_common = _common_inexact_type(default, *dtypes) + + return dtype_common + + +def _common_inexact_type(default_dtype, *dtypes): + """ + _common_inexact_type(default_dtype, *dtypes) + + Determines the common 'inexact' data type for linear algebra operations. + + This function selects an 'inexact' data type appropriate for the device's capabilities. + It defaults to `default_dtype` when provided types are not 'inexact'. + + Args: + default_dtype: The default data type. This is determined by the capabilities of + the device and is used when none of the provided types are 'inexact'. + *dtypes: A variable number of data types to be evaluated to find + the common 'inexact' type. + + Returns: + dpnp.result_type (dtype) : The resultant 'inexact' data type for linalg operations, + ensuring computational compatibility. + + """ + inexact_dtypes = [ + dt if issubdtype(dt, dpnp.inexact) else default_dtype for dt in dtypes + ] + return dpnp.result_type(*inexact_dtypes) + + def dpnp_eigh(a, UPLO): """ dpnp_eigh(a, UPLO) @@ -164,3 +300,135 @@ def dpnp_eigh(a, UPLO): ht_copy_ev.wait() return w, out_v + + +def dpnp_solve(a, b): + """ + dpnp_solve(a, b) + + Return the solution to the system of linear equations with + a square coefficient matrix `a` and multiple dependent variables + array `b`. + + """ + + a_usm_arr = dpnp.get_usm_ndarray(a) + b_usm_arr = dpnp.get_usm_ndarray(b) + + b_order = "C" if b.flags.c_contiguous else "F" + a_shape = a.shape + b_shape = b.shape + + res_usm_type, exec_q = get_usm_allocations([a, b]) + + res_type = _common_type(a, b) + if b.size == 0: + return dpnp.empty_like(b, dtype=res_type, usm_type=res_usm_type) + + if a.ndim > 2: + reshape = False + orig_shape_b = b_shape + if a.ndim > 3: + # get 3d input arrays by reshape + if a.ndim == b.ndim: + b = b.reshape(-1, b_shape[-2], b_shape[-1]) + else: + b = b.reshape(-1, b_shape[-1]) + + a = a.reshape(-1, a_shape[-2], a_shape[-1]) + + a_usm_arr = dpnp.get_usm_ndarray(a) + b_usm_arr = dpnp.get_usm_ndarray(b) + reshape = True + + batch_size = a.shape[0] + + coeff_vecs = [None] * batch_size + val_vecs = [None] * batch_size + a_ht_copy_ev = [None] * batch_size + b_ht_copy_ev = [None] * batch_size + ht_lapack_ev = [None] * batch_size + + for i in range(batch_size): + # oneMKL LAPACK assumes fortran-like array as input, so + # allocate a memory with 'F' order for dpnp array of coefficient matrix + # and multiple dependent variables array + coeff_vecs[i] = dpnp.empty_like( + a[i], order="F", dtype=res_type, usm_type=res_usm_type + ) + val_vecs[i] = dpnp.empty_like( + b[i], order="F", dtype=res_type, usm_type=res_usm_type + ) + + # use DPCTL tensor function to fill the coefficient matrix array + # and the array of multiple dependent variables with content + # from the input arrays + a_ht_copy_ev[i], a_copy_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=a_usm_arr[i], + dst=coeff_vecs[i].get_array(), + sycl_queue=a.sycl_queue, + ) + b_ht_copy_ev[i], b_copy_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=b_usm_arr[i], + dst=val_vecs[i].get_array(), + sycl_queue=b.sycl_queue, + ) + + # Call the LAPACK extension function _gesv to solve the system of linear + # equations using a portion of the coefficient square matrix and a + # corresponding portion of the dependent variables array. + ht_lapack_ev[i], _ = li._gesv( + exec_q, + coeff_vecs[i].get_array(), + val_vecs[i].get_array(), + depends=[a_copy_ev, b_copy_ev], + ) + + for i in range(batch_size): + ht_lapack_ev[i].wait() + b_ht_copy_ev[i].wait() + a_ht_copy_ev[i].wait() + + # combine the list of solutions into a single array + out_v = dpnp.array( + val_vecs, order=b_order, dtype=res_type, usm_type=res_usm_type + ) + if reshape: + # shape of the out_v must be equal to the shape of the array of + # dependent variables + out_v = out_v.reshape(orig_shape_b) + return out_v + else: + # oneMKL LAPACK gesv overwrites `a` and `b` and assumes fortran-like array as input. + # Allocate 'F' order memory for dpnp arrays to comply with these requirements. + a_f = dpnp.empty_like( + a, order="F", dtype=res_type, usm_type=res_usm_type + ) + + # use DPCTL tensor function to fill the coefficient matrix array + # with content from the input array `a` + a_ht_copy_ev, a_copy_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=a_usm_arr, dst=a_f.get_array(), sycl_queue=a.sycl_queue + ) + + b_f = dpnp.empty_like( + b, order="F", dtype=res_type, usm_type=res_usm_type + ) + + # use DPCTL tensor function to fill the array of multiple dependent variables + # with content from the input array `b` + b_ht_copy_ev, b_copy_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=b_usm_arr, dst=b_f.get_array(), sycl_queue=b.sycl_queue + ) + + # Call the LAPACK extension function _gesv to solve the system of linear + # equations with the coefficient square matrix and the dependent variables array. + ht_lapack_ev, _ = li._gesv( + exec_q, a_f.get_array(), b_f.get_array(), [a_copy_ev, b_copy_ev] + ) + + ht_lapack_ev.wait() + b_ht_copy_ev.wait() + a_ht_copy_ev.wait() + + return b_f diff --git a/tests/test_linalg.py b/tests/test_linalg.py index 7ce774d0abd6..6c8c2a5f04e3 100644 --- a/tests/test_linalg.py +++ b/tests/test_linalg.py @@ -1,11 +1,16 @@ import dpctl import numpy import pytest -from numpy.testing import assert_allclose, assert_array_equal +from numpy.testing import assert_allclose, assert_array_equal, assert_raises import dpnp as inp -from .helper import get_all_dtypes, get_complex_dtypes, has_support_aspect64 +from .helper import ( + assert_dtype_allclose, + get_all_dtypes, + has_support_aspect64, + is_cpu_device, +) def vvsort(val, vec, size, xp): @@ -508,3 +513,108 @@ def test_svd(type, shape): assert_allclose( inp.asnumpy(dpnp_vt)[i, :], np_vt[i, :], rtol=tol, atol=tol ) + + +class TestSolve: + @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) + def test_solve(self, dtype): + a_np = numpy.array([[1, 0.5], [0.5, 1]], dtype=dtype) + a_dp = inp.array(a_np) + + expected = numpy.linalg.solve(a_np, a_np) + result = inp.linalg.solve(a_dp, a_dp) + + assert_allclose(expected, result, rtol=1e-06) + + @pytest.mark.parametrize("a_dtype", get_all_dtypes(no_bool=True)) + @pytest.mark.parametrize("b_dtype", get_all_dtypes(no_bool=True)) + def test_solve_diff_type(self, a_dtype, b_dtype): + a_np = numpy.array([[1, 2], [3, -5]], dtype=a_dtype) + b_np = numpy.array([4, 1], dtype=b_dtype) + + a_dp = inp.array(a_np) + b_dp = inp.array(b_np) + + expected = numpy.linalg.solve(a_np, b_np) + result = inp.linalg.solve(a_dp, b_dp) + + assert_dtype_allclose(result, expected) + + def test_solve_strides(self): + a_np = numpy.array( + [ + [2, 3, 1, 4, 5], + [5, 6, 7, 8, 9], + [9, 7, 7, 2, 3], + [1, 4, 5, 1, 8], + [8, 9, 8, 5, 3], + ] + ) + b_np = numpy.array([5, 8, 9, 2, 1]) + + a_dp = inp.array(a_np) + b_dp = inp.array(b_np) + + # positive strides + expected = numpy.linalg.solve(a_np[::2, ::2], b_np[::2]) + result = inp.linalg.solve(a_dp[::2, ::2], b_dp[::2]) + assert_allclose(expected, result, rtol=1e-05) + + # negative strides + expected = numpy.linalg.solve(a_np[::-2, ::-2], b_np[::-2]) + result = inp.linalg.solve(a_dp[::-2, ::-2], b_dp[::-2]) + assert_allclose(expected, result, rtol=1e-05) + + # TODO: remove skipif when MKLD-16626 is resolved + @pytest.mark.skipif(is_cpu_device(), reason="MKLD-16626") + @pytest.mark.parametrize( + "matrix, vector", + [ + ([[1, 2], [2, 4]], [1, 2]), + ([[0, 0], [0, 0]], [0, 0]), + ([[1, 1], [1, 1]], [2, 2]), + ([[2, 4], [1, 2]], [3, 1.5]), + ([[1, 2], [0, 0]], [3, 0]), + ([[1, 0], [2, 0]], [3, 4]), + ], + ids=[ + "Linearly dependent rows", + "Zero matrix", + "Identical rows", + "Linearly dependent columns", + "Zero row", + "Zero column", + ], + ) + def test_solve_singular_matrix(self, matrix, vector): + a_np = numpy.array(matrix, dtype="float32") + b_np = numpy.array(vector, dtype="float32") + + a_dp = inp.array(a_np) + b_dp = inp.array(b_np) + + assert_raises(numpy.linalg.LinAlgError, numpy.linalg.solve, a_np, b_np) + assert_raises(inp.linalg.LinAlgError, inp.linalg.solve, a_dp, b_dp) + + def test_solve_errors(self): + a_dp = inp.array([[1, 0.5], [0.5, 1]], dtype="float32") + b_dp = inp.array(a_dp, dtype="float32") + + # diffetent queue + a_queue = dpctl.SyclQueue() + b_queue = dpctl.SyclQueue() + a_dp_q = inp.array(a_dp, sycl_queue=a_queue) + b_dp_q = inp.array(b_dp, sycl_queue=b_queue) + assert_raises(ValueError, inp.linalg.solve, a_dp_q, b_dp_q) + + # unsupported type + a_np = inp.asnumpy(a_dp) + b_np = inp.asnumpy(b_dp) + assert_raises(TypeError, inp.linalg.solve, a_np, b_dp) + assert_raises(TypeError, inp.linalg.solve, a_dp, b_np) + + # a.ndim < 2 + a_dp_ndim_1 = a_dp.flatten() + assert_raises( + inp.linalg.LinAlgError, inp.linalg.solve, a_dp_ndim_1, b_dp + ) diff --git a/tests/test_sycl_queue.py b/tests/test_sycl_queue.py index 3c658c14fe52..19104d7bd434 100644 --- a/tests/test_sycl_queue.py +++ b/tests/test_sycl_queue.py @@ -1345,3 +1345,27 @@ def test_take(func, device): expected_queue = dpnp_data.get_array().sycl_queue result_queue = result.get_array().sycl_queue assert_sycl_queue_equal(result_queue, expected_queue) + + +@pytest.mark.parametrize( + "device", + valid_devices, + ids=[device.filter_string for device in valid_devices], +) +def test_solve(device): + x = [[1.0, 2.0], [3.0, 5.0]] + y = [1.0, 2.0] + + numpy_x = numpy.array(x) + numpy_y = numpy.array(y) + dpnp_x = dpnp.array(x, device=device) + dpnp_y = dpnp.array(y, device=device) + + result = dpnp.linalg.solve(dpnp_x, dpnp_y) + expected = numpy.linalg.solve(numpy_x, numpy_y) + assert_dtype_allclose(result, expected) + + result_queue = result.sycl_queue + + assert_sycl_queue_equal(result_queue, dpnp_x.sycl_queue) + assert_sycl_queue_equal(result_queue, dpnp_y.sycl_queue) diff --git a/tests/test_usm_type.py b/tests/test_usm_type.py index 4982ed424140..fa2931cc5054 100644 --- a/tests/test_usm_type.py +++ b/tests/test_usm_type.py @@ -486,3 +486,41 @@ def test_take(func, usm_type_x, usm_type_ind): assert x.usm_type == usm_type_x assert ind.usm_type == usm_type_ind assert z.usm_type == du.get_coerced_usm_type([usm_type_x, usm_type_ind]) + + +@pytest.mark.parametrize( + "usm_type_matrix", list_of_usm_types, ids=list_of_usm_types +) +@pytest.mark.parametrize( + "usm_type_vector", list_of_usm_types, ids=list_of_usm_types +) +@pytest.mark.parametrize( + "matrix, vector", + [ + ([[1, 2], [3, 5]], dp.empty((2, 0))), + ([[1, 2], [3, 5]], [1, 2]), + ( + [ + [[1, 1, 1], [0, 2, 5], [2, 5, -1]], + [[3, -1, 1], [1, 2, 3], [2, 3, 1]], + [[1, 4, 1], [1, 2, -2], [4, 1, 2]], + ], + [[6, -4, 27], [9, -6, 15], [15, 1, 11]], + ), + ], + ids=[ + "2D_Matrix_Empty_Vector", + "2D_Matrix_1D_Vector", + "3D_Matrix_and_Vectors", + ], +) +def test_solve(matrix, vector, usm_type_matrix, usm_type_vector): + x = dp.array(matrix, usm_type=usm_type_matrix) + y = dp.array(vector, usm_type=usm_type_vector) + z = dp.linalg.solve(x, y) + + assert x.usm_type == usm_type_matrix + assert y.usm_type == usm_type_vector + assert z.usm_type == du.get_coerced_usm_type( + [usm_type_matrix, usm_type_vector] + ) diff --git a/tests/third_party/cupy/linalg_tests/test_solve.py b/tests/third_party/cupy/linalg_tests/test_solve.py new file mode 100644 index 000000000000..6194cf6b8ac4 --- /dev/null +++ b/tests/third_party/cupy/linalg_tests/test_solve.py @@ -0,0 +1,90 @@ +import unittest + +import numpy +import pytest + +import dpnp as cupy +from tests.helper import has_support_aspect64 +from tests.third_party.cupy import testing + + +@testing.parameterize( + *testing.product( + { + "order": ["C", "F"], + } + ) +) +class TestSolve(unittest.TestCase): + # TODO: add get_batched_gesv_limit + # def setUp(self): + # if self.batched_gesv_limit is not None: + # self.old_limit = get_batched_gesv_limit() + # set_batched_gesv_limit(self.batched_gesv_limit) + + # def tearDown(self): + # if self.batched_gesv_limit is not None: + # set_batched_gesv_limit(self.old_limit) + + @testing.for_dtypes("ifdFD") + @testing.numpy_cupy_allclose( + atol=1e-3, contiguous_check=False, type_check=has_support_aspect64() + ) + def check_x(self, a_shape, b_shape, xp, dtype): + a = testing.shaped_random(a_shape, xp, dtype=dtype, seed=0, scale=20) + b = testing.shaped_random(b_shape, xp, dtype=dtype, seed=1) + a = a.copy(order=self.order) + b = b.copy(order=self.order) + a_copy = a.copy() + b_copy = b.copy() + result = xp.linalg.solve(a, b) + numpy.testing.assert_array_equal(a_copy, a) + numpy.testing.assert_array_equal(b_copy, b) + return result + + def test_solve(self): + self.check_x((4, 4), (4,)) + self.check_x((5, 5), (5, 2)) + self.check_x((2, 4, 4), (2, 4)) + self.check_x((2, 5, 5), (2, 5, 2)) + self.check_x((2, 3, 2, 2), (2, 3, 2)) + self.check_x((2, 3, 3, 3), (2, 3, 3, 2)) + self.check_x((0, 0), (0,)) + self.check_x((0, 0), (0, 2)) + self.check_x((0, 2, 2), (0, 2)) + self.check_x((0, 2, 2), (0, 2, 3)) + + def check_shape(self, a_shape, b_shape, error_types): + for xp, error_type in error_types.items(): + a = xp.random.rand(*a_shape) + b = xp.random.rand(*b_shape) + with pytest.raises(error_type): + xp.linalg.solve(a, b) + + # Undefined behavior is implementation-dependent: + # Numpy with OpenBLAS returns an empty array + # while numpy with OneMKL raises LinAlgError + @pytest.mark.skip("Undefined behavior") + def test_solve_singular_empty(self, xp): + a = xp.zeros((3, 3)) # singular + b = xp.empty((3, 0)) # nrhs = 0 + # LinAlgError("Singular matrix") is not raised + return xp.linalg.solve(a, b) + + def test_invalid_shape(self): + linalg_errors = { + numpy: numpy.linalg.LinAlgError, + cupy: cupy.linalg.LinAlgError, + } + value_errors = { + numpy: ValueError, + cupy: ValueError, + } + + self.check_shape((2, 3), (4,), linalg_errors) + self.check_shape((3, 3), (2,), value_errors) + self.check_shape((3, 3), (2, 2), value_errors) + self.check_shape((3, 3, 4), (3,), linalg_errors) + self.check_shape((2, 3, 3), (3,), value_errors) + self.check_shape((3, 3), (0,), value_errors) + self.check_shape((0, 3, 4), (3,), linalg_errors) From 996cf5405e4800cfb28fa2690477826257c76d70 Mon Sep 17 00:00:00 2001 From: vtavana <120411540+vtavana@users.noreply.github.com> Date: Sat, 16 Dec 2023 05:24:10 -0600 Subject: [PATCH 33/38] impelment dpnp.std, dpnp.var and dpnp.nanvar (#1635) * impelment dpnp.var and dpnp.std * implement dpnp.nanvar * support out keyword for dpnp.sum * add more tests * use replace_nan func for nanprod * address comments * update test_sum_empty_axis * address new comments * minor update * address remarks * remove uncalled raise --- dpnp/backend/include/dpnp_iface_fptr.hpp | 34 +- dpnp/backend/kernels/dpnp_krnl_statistics.cpp | 61 --- dpnp/dpnp_algo/dpnp_algo.pxd | 6 - dpnp/dpnp_algo/dpnp_algo_statistics.pxi | 113 ----- dpnp/dpnp_array.py | 52 ++- dpnp/dpnp_iface.py | 26 +- dpnp/dpnp_iface_manipulation.py | 2 - dpnp/dpnp_iface_mathematical.py | 213 +-------- dpnp/dpnp_iface_nanfunctions.py | 418 ++++++++++++++++++ dpnp/dpnp_iface_statistics.py | 347 ++++++++++----- tests/conftest.py | 15 + tests/skipped_tests.tbl | 29 -- tests/skipped_tests_gpu.tbl | 29 -- tests/skipped_tests_gpu_no_fp64.tbl | 12 - tests/test_mathematical.py | 5 +- tests/test_statistics.py | 371 +++++++++++++--- tests/test_sum.py | 22 + tests/test_sycl_queue.py | 3 + tests/test_usm_type.py | 3 + .../cupy/math_tests/test_sumprod.py | 110 ++--- .../cupy/statistics_tests/test_meanvar.py | 65 ++- 21 files changed, 1144 insertions(+), 792 deletions(-) create mode 100644 dpnp/dpnp_iface_nanfunctions.py diff --git a/dpnp/backend/include/dpnp_iface_fptr.hpp b/dpnp/backend/include/dpnp_iface_fptr.hpp index 6a174b3b647e..8b1f4c48a111 100644 --- a/dpnp/backend/include/dpnp_iface_fptr.hpp +++ b/dpnp/backend/include/dpnp_iface_fptr.hpp @@ -216,8 +216,6 @@ enum class DPNPFuncName : size_t DPNP_FN_MULTIPLY_EXT, /**< Used in numpy.multiply() impl, requires extra parameters */ DPNP_FN_NANVAR, /**< Used in numpy.nanvar() impl */ - DPNP_FN_NANVAR_EXT, /**< Used in numpy.nanvar() impl, requires extra - parameters */ DPNP_FN_NEGATIVE, /**< Used in numpy.negative() impl */ DPNP_FN_NONZERO, /**< Used in numpy.nonzero() impl */ DPNP_FN_ONES, /**< Used in numpy.ones() impl */ @@ -374,8 +372,7 @@ enum class DPNPFuncName : size_t */ DPNP_FN_SQUARE, /**< Used in numpy.square() impl */ DPNP_FN_STD, /**< Used in numpy.std() impl */ - DPNP_FN_STD_EXT, /**< Used in numpy.std() impl, requires extra parameters */ - DPNP_FN_SUBTRACT, /**< Used in numpy.subtract() impl */ + DPNP_FN_SUBTRACT, /**< Used in numpy.subtract() impl */ DPNP_FN_SUBTRACT_EXT, /**< Used in numpy.subtract() impl, requires extra parameters */ DPNP_FN_SUM, /**< Used in numpy.sum() impl */ @@ -386,21 +383,20 @@ enum class DPNPFuncName : size_t DPNP_FN_TAKE, /**< Used in numpy.take() impl */ DPNP_FN_TAN, /**< Used in numpy.tan() impl */ DPNP_FN_TANH, /**< Used in numpy.tanh() impl */ - DPNP_FN_TRANSPOSE, /**< Used in numpy.transpose() impl */ - DPNP_FN_TRACE, /**< Used in numpy.trace() impl */ - DPNP_FN_TRACE_EXT, /**< Used in numpy.trace() impl, requires extra - parameters */ - DPNP_FN_TRAPZ, /**< Used in numpy.trapz() impl */ - DPNP_FN_TRAPZ_EXT, /**< Used in numpy.trapz() impl, requires extra - parameters */ - DPNP_FN_TRI, /**< Used in numpy.tri() impl */ - DPNP_FN_TRIL, /**< Used in numpy.tril() impl */ - DPNP_FN_TRIU, /**< Used in numpy.triu() impl */ - DPNP_FN_TRUNC, /**< Used in numpy.trunc() impl */ - DPNP_FN_VANDER, /**< Used in numpy.vander() impl */ - DPNP_FN_VAR, /**< Used in numpy.var() impl */ - DPNP_FN_VAR_EXT, /**< Used in numpy.var() impl, requires extra parameters */ - DPNP_FN_ZEROS, /**< Used in numpy.zeros() impl */ + DPNP_FN_TRANSPOSE, /**< Used in numpy.transpose() impl */ + DPNP_FN_TRACE, /**< Used in numpy.trace() impl */ + DPNP_FN_TRACE_EXT, /**< Used in numpy.trace() impl, requires extra + parameters */ + DPNP_FN_TRAPZ, /**< Used in numpy.trapz() impl */ + DPNP_FN_TRAPZ_EXT, /**< Used in numpy.trapz() impl, requires extra + parameters */ + DPNP_FN_TRI, /**< Used in numpy.tri() impl */ + DPNP_FN_TRIL, /**< Used in numpy.tril() impl */ + DPNP_FN_TRIU, /**< Used in numpy.triu() impl */ + DPNP_FN_TRUNC, /**< Used in numpy.trunc() impl */ + DPNP_FN_VANDER, /**< Used in numpy.vander() impl */ + DPNP_FN_VAR, /**< Used in numpy.var() impl */ + DPNP_FN_ZEROS, /**< Used in numpy.zeros() impl */ DPNP_FN_ZEROS_LIKE, /**< Used in numpy.zeros_like() impl */ DPNP_FN_LAST, /**< The latest element of the enumeration */ }; diff --git a/dpnp/backend/kernels/dpnp_krnl_statistics.cpp b/dpnp/backend/kernels/dpnp_krnl_statistics.cpp index 5c0ca1f6591b..8f685c97cb38 100644 --- a/dpnp/backend/kernels/dpnp_krnl_statistics.cpp +++ b/dpnp/backend/kernels/dpnp_krnl_statistics.cpp @@ -939,16 +939,6 @@ template void (*dpnp_nanvar_default_c)(void *, void *, void *, const size_t, size_t) = dpnp_nanvar_c<_DataType>; -template -DPCTLSyclEventRef (*dpnp_nanvar_ext_c)(DPCTLSyclQueueRef, - void *, - void *, - void *, - const size_t, - size_t, - const DPCTLEventVectorRef) = - dpnp_nanvar_c<_DataType>; - template DPCTLSyclEventRef dpnp_std_c(DPCTLSyclQueueRef q_ref, void *array1_in, @@ -1039,18 +1029,6 @@ void (*dpnp_std_default_c)(void *, size_t, size_t) = dpnp_std_c<_DataType, _ResultType>; -template -DPCTLSyclEventRef (*dpnp_std_ext_c)(DPCTLSyclQueueRef, - void *, - void *, - const shape_elem_type *, - size_t, - const shape_elem_type *, - size_t, - size_t, - const DPCTLEventVectorRef) = - dpnp_std_c<_DataType, _ResultType>; - template class dpnp_var_c_kernel; @@ -1150,18 +1128,6 @@ void (*dpnp_var_default_c)(void *, size_t, size_t) = dpnp_var_c<_DataType, _ResultType>; -template -DPCTLSyclEventRef (*dpnp_var_ext_c)(DPCTLSyclQueueRef, - void *, - void *, - const shape_elem_type *, - size_t, - const shape_elem_type *, - size_t, - size_t, - const DPCTLEventVectorRef) = - dpnp_var_c<_DataType, _ResultType>; - void func_map_init_statistics(func_map_t &fmap) { fmap[DPNPFuncName::DPNP_FN_CORRELATE][eft_INT][eft_INT] = { @@ -1316,15 +1282,6 @@ void func_map_init_statistics(func_map_t &fmap) fmap[DPNPFuncName::DPNP_FN_NANVAR][eft_DBL][eft_DBL] = { eft_DBL, (void *)dpnp_nanvar_default_c}; - fmap[DPNPFuncName::DPNP_FN_NANVAR_EXT][eft_INT][eft_INT] = { - eft_INT, (void *)dpnp_nanvar_ext_c}; - fmap[DPNPFuncName::DPNP_FN_NANVAR_EXT][eft_LNG][eft_LNG] = { - eft_LNG, (void *)dpnp_nanvar_ext_c}; - fmap[DPNPFuncName::DPNP_FN_NANVAR_EXT][eft_FLT][eft_FLT] = { - eft_FLT, (void *)dpnp_nanvar_ext_c}; - fmap[DPNPFuncName::DPNP_FN_NANVAR_EXT][eft_DBL][eft_DBL] = { - eft_DBL, (void *)dpnp_nanvar_ext_c}; - fmap[DPNPFuncName::DPNP_FN_STD][eft_INT][eft_INT] = { eft_DBL, (void *)dpnp_std_default_c}; fmap[DPNPFuncName::DPNP_FN_STD][eft_LNG][eft_LNG] = { @@ -1334,15 +1291,6 @@ void func_map_init_statistics(func_map_t &fmap) fmap[DPNPFuncName::DPNP_FN_STD][eft_DBL][eft_DBL] = { eft_DBL, (void *)dpnp_std_default_c}; - fmap[DPNPFuncName::DPNP_FN_STD_EXT][eft_INT][eft_INT] = { - eft_DBL, (void *)dpnp_std_ext_c}; - fmap[DPNPFuncName::DPNP_FN_STD_EXT][eft_LNG][eft_LNG] = { - eft_DBL, (void *)dpnp_std_ext_c}; - fmap[DPNPFuncName::DPNP_FN_STD_EXT][eft_FLT][eft_FLT] = { - eft_FLT, (void *)dpnp_std_ext_c}; - fmap[DPNPFuncName::DPNP_FN_STD_EXT][eft_DBL][eft_DBL] = { - eft_DBL, (void *)dpnp_std_ext_c}; - fmap[DPNPFuncName::DPNP_FN_VAR][eft_INT][eft_INT] = { eft_DBL, (void *)dpnp_var_default_c}; fmap[DPNPFuncName::DPNP_FN_VAR][eft_LNG][eft_LNG] = { @@ -1352,14 +1300,5 @@ void func_map_init_statistics(func_map_t &fmap) fmap[DPNPFuncName::DPNP_FN_VAR][eft_DBL][eft_DBL] = { eft_DBL, (void *)dpnp_var_default_c}; - fmap[DPNPFuncName::DPNP_FN_VAR_EXT][eft_INT][eft_INT] = { - eft_DBL, (void *)dpnp_var_ext_c}; - fmap[DPNPFuncName::DPNP_FN_VAR_EXT][eft_LNG][eft_LNG] = { - eft_DBL, (void *)dpnp_var_ext_c}; - fmap[DPNPFuncName::DPNP_FN_VAR_EXT][eft_FLT][eft_FLT] = { - eft_FLT, (void *)dpnp_var_ext_c}; - fmap[DPNPFuncName::DPNP_FN_VAR_EXT][eft_DBL][eft_DBL] = { - eft_DBL, (void *)dpnp_var_ext_c}; - return; } diff --git a/dpnp/dpnp_algo/dpnp_algo.pxd b/dpnp/dpnp_algo/dpnp_algo.pxd index d49adcf0b7fc..18813e3e04c8 100644 --- a/dpnp/dpnp_algo/dpnp_algo.pxd +++ b/dpnp/dpnp_algo/dpnp_algo.pxd @@ -100,8 +100,6 @@ cdef extern from "dpnp_iface_fptr.hpp" namespace "DPNPFuncName": # need this na DPNP_FN_MINIMUM_EXT DPNP_FN_MODF DPNP_FN_MODF_EXT - DPNP_FN_NANVAR - DPNP_FN_NANVAR_EXT DPNP_FN_NONZERO DPNP_FN_ONES DPNP_FN_ONES_LIKE @@ -187,8 +185,6 @@ cdef extern from "dpnp_iface_fptr.hpp" namespace "DPNPFuncName": # need this na DPNP_FN_SEARCHSORTED_EXT DPNP_FN_SORT DPNP_FN_SORT_EXT - DPNP_FN_STD - DPNP_FN_STD_EXT DPNP_FN_SUM DPNP_FN_SUM_EXT DPNP_FN_SVD @@ -202,8 +198,6 @@ cdef extern from "dpnp_iface_fptr.hpp" namespace "DPNPFuncName": # need this na DPNP_FN_TRIL_EXT DPNP_FN_TRIU DPNP_FN_TRIU_EXT - DPNP_FN_VAR - DPNP_FN_VAR_EXT DPNP_FN_ZEROS DPNP_FN_ZEROS_LIKE diff --git a/dpnp/dpnp_algo/dpnp_algo_statistics.pxi b/dpnp/dpnp_algo/dpnp_algo_statistics.pxi index 34e0684fcbfc..37d51d131ffe 100644 --- a/dpnp/dpnp_algo/dpnp_algo_statistics.pxi +++ b/dpnp/dpnp_algo/dpnp_algo_statistics.pxi @@ -39,78 +39,15 @@ __all__ += [ "dpnp_average", "dpnp_correlate", "dpnp_median", - "dpnp_nanvar", - "dpnp_std", - "dpnp_var", ] -# C function pointer to the C library template functions -ctypedef c_dpctl.DPCTLSyclEventRef(*fptr_custom_cov_1in_1out_t)(c_dpctl.DPCTLSyclQueueRef, - void *, void * , size_t, size_t, - const c_dpctl.DPCTLEventVectorRef) -ctypedef c_dpctl.DPCTLSyclEventRef(*fptr_custom_nanvar_t)(c_dpctl.DPCTLSyclQueueRef, - void *, void * , void * , size_t, size_t, - const c_dpctl.DPCTLEventVectorRef) -ctypedef c_dpctl.DPCTLSyclEventRef(*fptr_custom_std_var_1in_1out_t)(c_dpctl.DPCTLSyclQueueRef, - void *, void * , shape_elem_type * , size_t, - shape_elem_type * , size_t, size_t, - const c_dpctl.DPCTLEventVectorRef) - # C function pointer to the C library template functions ctypedef c_dpctl.DPCTLSyclEventRef(*custom_statistic_1in_1out_func_ptr_t)(c_dpctl.DPCTLSyclQueueRef, void *, void * , shape_elem_type * , size_t, shape_elem_type * , size_t, const c_dpctl.DPCTLEventVectorRef) -cdef utils.dpnp_descriptor call_fptr_custom_std_var_1in_1out(DPNPFuncName fptr_name, utils.dpnp_descriptor x1, ddof): - cdef shape_type_c x1_shape = x1.shape - - """ Convert string type names (array.dtype) to C enum DPNPFuncType """ - cdef DPNPFuncType param_type = dpnp_dtype_to_DPNPFuncType(x1.dtype) - - """ get the FPTR data structure """ - cdef DPNPFuncData kernel_data = get_dpnp_function_ptr(fptr_name, param_type, DPNP_FT_NONE) - - x1_obj = x1.get_array() - - # create result array with type given by FPTR data - cdef shape_type_c result_shape = (1,) - cdef utils.dpnp_descriptor result = utils.create_output_descriptor(result_shape, - kernel_data.return_type, - None, - device=x1_obj.sycl_device, - usm_type=x1_obj.usm_type, - sycl_queue=x1_obj.sycl_queue) - - result_sycl_queue = result.get_array().sycl_queue - - cdef c_dpctl.SyclQueue q = result_sycl_queue - cdef c_dpctl.DPCTLSyclQueueRef q_ref = q.get_queue_ref() - - cdef fptr_custom_std_var_1in_1out_t func = kernel_data.ptr - - # stub for interface support - cdef shape_type_c axis - cdef Py_ssize_t axis_size = 0 - - """ Call FPTR function """ - cdef c_dpctl.DPCTLSyclEventRef event_ref = func(q_ref, - x1.get_data(), - result.get_data(), - x1_shape.data(), - x1.ndim, - axis.data(), - axis_size, - ddof, - NULL) # dep_events_ref - - with nogil: c_dpctl.DPCTLEvent_WaitAndThrow(event_ref) - c_dpctl.DPCTLEvent_Delete(event_ref) - - - return result - cpdef dpnp_average(utils.dpnp_descriptor x1): array_sum = dpnp_sum(x1).get_pyobj() @@ -207,53 +144,3 @@ cpdef utils.dpnp_descriptor dpnp_median(utils.dpnp_descriptor array1): c_dpctl.DPCTLEvent_Delete(event_ref) return result - - -cpdef utils.dpnp_descriptor dpnp_nanvar(utils.dpnp_descriptor arr, ddof): - # dpnp_isnan does not support USM array as input in comparison to dpnp.isnan - cdef utils.dpnp_descriptor mask_arr = dpnp.get_dpnp_descriptor(dpnp.isnan(arr.get_pyobj()), - copy_when_nondefault_queue=False) - n = dpnp.count_nonzero(mask_arr.get_pyobj()) - res_size = int(arr.size - n) - cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(arr.dtype) - - cdef DPNPFuncData kernel_data = get_dpnp_function_ptr(DPNP_FN_NANVAR_EXT, param1_type, param1_type) - - arr_obj = arr.get_array() - - # create result array with type given by FPTR data - cdef shape_type_c result_shape = utils._object_to_tuple(res_size) - cdef utils.dpnp_descriptor result = utils.create_output_descriptor(result_shape, - kernel_data.return_type, - None, - device=arr_obj.sycl_device, - usm_type=arr_obj.usm_type, - sycl_queue=arr_obj.sycl_queue) - - result_sycl_queue = result.get_array().sycl_queue - - cdef c_dpctl.SyclQueue q = result_sycl_queue - cdef c_dpctl.DPCTLSyclQueueRef q_ref = q.get_queue_ref() - - cdef fptr_custom_nanvar_t func = kernel_data.ptr - - cdef c_dpctl.DPCTLSyclEventRef event_ref = func(q_ref, - arr.get_data(), - mask_arr.get_data(), - result.get_data(), - result.size, - arr.size, - NULL) # dep_events_ref - - with nogil: c_dpctl.DPCTLEvent_WaitAndThrow(event_ref) - c_dpctl.DPCTLEvent_Delete(event_ref) - - return call_fptr_custom_std_var_1in_1out(DPNP_FN_VAR_EXT, result, ddof) - - -cpdef utils.dpnp_descriptor dpnp_std(utils.dpnp_descriptor a, size_t ddof): - return call_fptr_custom_std_var_1in_1out(DPNP_FN_STD_EXT, a, ddof) - - -cpdef utils.dpnp_descriptor dpnp_var(utils.dpnp_descriptor a, size_t ddof): - return call_fptr_custom_std_var_1in_1out(DPNP_FN_VAR_EXT, a, ddof) diff --git a/dpnp/dpnp_array.py b/dpnp/dpnp_array.py index c1fbbc1d124d..a5b060396ba4 100644 --- a/dpnp/dpnp_array.py +++ b/dpnp/dpnp_array.py @@ -1167,15 +1167,23 @@ def squeeze(self, axis=None): return dpnp.squeeze(self, axis) - def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False): - """Returns the variance of the array elements, along given axis. - - .. seealso:: - :obj:`dpnp.var` for full documentation, + def std( + self, + axis=None, + dtype=None, + out=None, + ddof=0, + keepdims=False, + *, + where=True, + ): + """ + Returns the standard deviation of the array elements, along given axis. + Refer to :obj:`dpnp.std` for full documentation. """ - return dpnp.std(self, axis, dtype, out, ddof, keepdims) + return dpnp.std(self, axis, dtype, out, ddof, keepdims, where=where) @property def strides(self): @@ -1207,10 +1215,7 @@ def sum( """ Returns the sum along a given axis. - .. seealso:: - :obj:`dpnp.sum` for full documentation, - :meth:`dpnp.dparray.sum` - + For full documentation refer to :obj:`dpnp.sum`. """ return dpnp.sum( @@ -1307,23 +1312,22 @@ def transpose(self, *axes): res._array_obj = dpt.permute_dims(self._array_obj, axes) return res - def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False): + def var( + self, + axis=None, + dtype=None, + out=None, + ddof=0, + keepdims=False, + *, + where=True, + ): """ - Returns the variance of the array elements along given axis. - - Masked entries are ignored, and result elements which are not - finite will be masked. - - Refer to `numpy.var` for full documentation. - - See Also - -------- - :obj:`numpy.ndarray.var` : corresponding function for ndarrays - :obj:`numpy.var` : Equivalent function + Returns the variance of the array elements, along given axis. + Refer to :obj:`dpnp.var` for full documentation. """ - - return dpnp.var(self, axis, dtype, out, ddof, keepdims) + return dpnp.var(self, axis, dtype, out, ddof, keepdims, where=where) # 'view' diff --git a/dpnp/dpnp_iface.py b/dpnp/dpnp_iface.py index 247264a79c56..215509c1fc35 100644 --- a/dpnp/dpnp_iface.py +++ b/dpnp/dpnp_iface.py @@ -1,5 +1,3 @@ -# cython: language_level=3 -# distutils: language = c++ # -*- coding: utf-8 -*- # ***************************************************************************** # Copyright (c) 2016-2023, Intel Corporation @@ -92,6 +90,8 @@ from dpnp.dpnp_iface_manipulation import __all__ as __all__manipulation from dpnp.dpnp_iface_mathematical import * from dpnp.dpnp_iface_mathematical import __all__ as __all__mathematical +from dpnp.dpnp_iface_nanfunctions import * +from dpnp.dpnp_iface_nanfunctions import __all__ as __all__nanfunctions from dpnp.dpnp_iface_searching import * from dpnp.dpnp_iface_searching import __all__ as __all__searching from dpnp.dpnp_iface_sorting import * @@ -110,6 +110,7 @@ __all__ += __all__logic __all__ += __all__manipulation __all__ += __all__mathematical +__all__ += __all__nanfunctions __all__ += __all__searching __all__ += __all__sorting __all__ += __all__statistics @@ -456,7 +457,7 @@ def get_normalized_queue_device(obj=None, device=None, sycl_queue=None): ) -def get_result_array(a, out=None): +def get_result_array(a, out=None, casting="safe"): """ If `out` is provided, value of `a` array will be copied into the `out` array according to ``safe`` casting rule. @@ -466,11 +467,12 @@ def get_result_array(a, out=None): ---------- a : {dpnp_array} Input array. - out : {dpnp_array, usm_ndarray} If provided, value of `a` array will be copied into it according to ``safe`` casting rule. It should be of the appropriate shape. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Returns ------- @@ -482,21 +484,15 @@ def get_result_array(a, out=None): if out is None: return a else: + dpnp.check_supported_arrays_type(out) if out.shape != a.shape: raise ValueError( f"Output array of shape {a.shape} is needed, got {out.shape}." ) - elif not isinstance(out, dpnp_array): - if isinstance(out, dpt.usm_ndarray): - out = dpnp_array._create_from_usm_ndarray(out) - else: - raise TypeError( - "Output array must be any of supported type, but got {}".format( - type(out) - ) - ) - - dpnp.copyto(out, a, casting="safe") + elif isinstance(out, dpt.usm_ndarray): + out = dpnp_array._create_from_usm_ndarray(out) + + dpnp.copyto(out, a, casting=casting) return out diff --git a/dpnp/dpnp_iface_manipulation.py b/dpnp/dpnp_iface_manipulation.py index 7ee53bca3775..0913ddb886d3 100644 --- a/dpnp/dpnp_iface_manipulation.py +++ b/dpnp/dpnp_iface_manipulation.py @@ -1,5 +1,3 @@ -# cython: language_level=3 -# distutils: language = c++ # -*- coding: utf-8 -*- # ***************************************************************************** # Copyright (c) 2016-2023, Intel Corporation diff --git a/dpnp/dpnp_iface_mathematical.py b/dpnp/dpnp_iface_mathematical.py index f6ca59e69070..53ae89d2a51e 100644 --- a/dpnp/dpnp_iface_mathematical.py +++ b/dpnp/dpnp_iface_mathematical.py @@ -1,5 +1,3 @@ -# cython: language_level=3 -# distutils: language = c++ # -*- coding: utf-8 -*- # ***************************************************************************** # Copyright (c) 2016-2023, Intel Corporation @@ -109,10 +107,6 @@ "mod", "modf", "multiply", - "nancumprod", - "nancumsum", - "nanprod", - "nansum", "negative", "positive", "power", @@ -1769,179 +1763,6 @@ def multiply( ) -def nancumprod(x1, **kwargs): - """ - Return the cumulative product of array elements over a given axis treating Not a Numbers (NaNs) as one. - - For full documentation refer to :obj:`numpy.nancumprod`. - - Limitations - ----------- - Parameter `x` is supported as :class:`dpnp.ndarray`. - Keyword argument `kwargs` is currently unsupported. - Otherwise the function will be executed sequentially on CPU. - Input array data types are limited by supported DPNP :ref:`Data types`. - - .. seealso:: :obj:`dpnp.cumprod` : Return the cumulative product of elements along a given axis. - - Examples - -------- - >>> import dpnp as np - >>> a = np.array([1., np.nan]) - >>> result = np.nancumprod(a) - >>> [x for x in result] - [1.0, 1.0] - >>> b = np.array([[1., 2., np.nan], [4., np.nan, 6.]]) - >>> result = np.nancumprod(b) - >>> [x for x in result] - [1.0, 2.0, 2.0, 8.0, 8.0, 48.0] - - - """ - - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) - if x1_desc and not kwargs: - return dpnp_nancumprod(x1_desc).get_pyobj() - - return call_origin(numpy.nancumprod, x1, **kwargs) - - -def nancumsum(x1, **kwargs): - """ - Return the cumulative sum of the elements along a given axis. - - For full documentation refer to :obj:`numpy.nancumsum`. - - Limitations - ----------- - Parameter `x` is supported as :class:`dpnp.ndarray`. - Keyword argument `kwargs` is currently unsupported. - Otherwise the function will be executed sequentially on CPU. - Input array data types are limited by supported DPNP :ref:`Data types`. - - See Also - -------- - :obj:`dpnp.cumsum` : Return the cumulative sum of the elements along a given axis. - - Examples - -------- - >>> import dpnp as np - >>> a = np.array([1., np.nan]) - >>> result = np.nancumsum(a) - >>> [x for x in result] - [1.0, 1.0] - >>> b = np.array([[1., 2., np.nan], [4., np.nan, 6.]]) - >>> result = np.nancumprod(b) - >>> [x for x in result] - [1.0, 3.0, 3.0, 7.0, 7.0, 13.0] - - """ - - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) - if x1_desc and not kwargs: - return dpnp_nancumsum(x1_desc).get_pyobj() - - return call_origin(numpy.nancumsum, x1, **kwargs) - - -def nanprod( - a, - axis=None, - dtype=None, - out=None, - keepdims=False, - initial=None, - where=True, -): - """ - Return the product of array elements over a given axis treating Not a Numbers (NaNs) as ones. - - For full documentation refer to :obj:`numpy.nanprod`. - - Returns - ------- - out : dpnp.ndarray - A new array holding the result is returned unless `out` is specified, in which case it is returned. - - See Also - -------- - :obj:`dpnp.prod` : Returns product across array propagating NaNs. - :obj:`dpnp.isnan` : Test element-wise for NaN and return result as a boolean array. - - Limitations - ----------- - Input array is only supported as either :class:`dpnp.ndarray` or :class:`dpctl.tensor.usm_ndarray`. - Parameters `initial`, and `where` are only supported with their default values. - Otherwise the function will be executed sequentially on CPU. - Input array data types are limited by supported DPNP :ref:`Data types`. - - Examples - -------- - >>> import dpnp as np - >>> np.nanprod(np.array(1)) - array(1) - >>> np.nanprod(np.array([1])) - array(1) - >>> np.nanprod(np.array([1, np.nan])) - array(1.0) - >>> a = np.array([[1, 2], [3, np.nan]]) - >>> np.nanprod(a) - array(6.0) - >>> np.nanprod(a, axis=0) - array([3., 2.]) - - """ - - dpnp.check_supported_arrays_type(a) - - if issubclass(a.dtype.type, dpnp.inexact): - mask = dpnp.isnan(a) - a = dpnp.array(a, copy=True) - dpnp.copyto(a, 1, where=mask) - - return dpnp.prod( - a, - axis=axis, - dtype=dtype, - out=out, - keepdims=keepdims, - initial=initial, - where=where, - ) - - -def nansum(x1, **kwargs): - """ - Calculate sum() function treating 'Not a Numbers' (NaN) as zero. - - For full documentation refer to :obj:`numpy.nansum`. - - Limitations - ----------- - Parameter `x1` is supported as :class:`dpnp.ndarray`. - Keyword argument `kwargs` is currently unsupported. - Otherwise the function will be executed sequentially on CPU. - Input array data types are limited by supported DPNP :ref:`Data types`. - - Examples - -------- - >>> import dpnp as np - >>> np.nansum(np.array([1, 2])) - 3 - >>> np.nansum(np.array([[1, 2], [3, 4]])) - 10 - - """ - - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) - if x1_desc and not kwargs: - result_obj = dpnp_nansum(x1_desc).get_pyobj() - result = dpnp.convert_single_elem_array_to_scalar(result_obj) - return result - - return call_origin(numpy.nansum, x1, **kwargs) - - def negative( x, /, @@ -2233,11 +2054,11 @@ def prod( ) elif initial is not None: raise NotImplementedError( - "initial keyword argument is only supported by its default value." + "initial keyword argument is only supported with its default value." ) elif where is not True: raise NotImplementedError( - "where keyword argument is only supported by its default value." + "where keyword argument is only supported with its default value." ) else: dpt_array = dpnp.get_usm_ndarray(a) @@ -2768,8 +2589,8 @@ def sum( ----------- Parameters `x` is supported as either :class:`dpnp.ndarray` or :class:`dpctl.tensor.usm_ndarray`. - Parameters `out`, `initial` and `where` are supported with their default values. - Otherwise the function will be executed sequentially on CPU. + Parameters `initial` and `where` are supported with their default values. + Otherwise ``NotImplementedError`` exception will be raised. Input array data types are limited by supported DPNP :ref:`Data types`. Examples @@ -2790,12 +2611,14 @@ def sum( axis = normalize_axis_tuple(axis, x.ndim, "axis") - if out is not None: - pass - elif initial != 0: - pass + if initial != 0: + raise NotImplementedError( + "initial keyword argument is only supported with its default value." + ) elif where is not True: - pass + raise NotImplementedError( + "where keyword argument is only supported with its default value." + ) else: if ( len(x.shape) == 2 @@ -2853,18 +2676,8 @@ def sum( y = dpt.sum( dpnp.get_usm_ndarray(x), axis=axis, dtype=dtype, keepdims=keepdims ) - return dpnp_array._create_from_usm_ndarray(y) - - return call_origin( - numpy.sum, - x, - axis=axis, - dtype=dtype, - out=out, - keepdims=keepdims, - initial=initial, - where=where, - ) + result = dpnp_array._create_from_usm_ndarray(y) + return dpnp.get_result_array(result, out, casting="same_kind") def trapz(y1, x1=None, dx=1.0, axis=-1): diff --git a/dpnp/dpnp_iface_nanfunctions.py b/dpnp/dpnp_iface_nanfunctions.py new file mode 100644 index 000000000000..966a2c9a5781 --- /dev/null +++ b/dpnp/dpnp_iface_nanfunctions.py @@ -0,0 +1,418 @@ +# -*- coding: utf-8 -*- +# ***************************************************************************** +# Copyright (c) 2016-2023, Intel Corporation +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# - Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# - Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +# THE POSSIBILITY OF SUCH DAMAGE. +# ***************************************************************************** + +""" +Interface of the nan functions of the DPNP + +Notes +----- +This module is a face or public interface file for the library +it contains: + - Interface functions + - documentation for the functions + - The functions parameters check + +""" + +import numpy + +import dpnp + +from .dpnp_algo import * +from .dpnp_utils import * + +__all__ = [ + "nancumprod", + "nancumsum", + "nanprod", + "nansum", + "nanvar", +] + + +def _replace_nan(a, val): + """ + Replace NaNs in array `a` with `val`. + + If `a` is of inexact type, make a copy of `a`, replace NaNs with + the `val` value, and return the copy together with a boolean mask + marking the locations where NaNs were present. If `a` is not of + inexact type, do nothing and return `a` together with a mask of None. + + Parameters + ---------- + a : {dpnp_array, usm_ndarray} + Input array. + val : float + NaN values are set to `val` before doing the operation. + + Returns + ------- + out : {dpnp_array} + If `a` is of inexact type, return a copy of `a` with the NaNs + replaced by the fill value, otherwise return `a`. + mask: {bool, None} + If `a` is of inexact type, return a boolean mask marking locations of + NaNs, otherwise return ``None``. + + """ + + dpnp.check_supported_arrays_type(a) + if issubclass(a.dtype.type, dpnp.inexact): + mask = dpnp.isnan(a) + if not dpnp.any(mask): + mask = None + else: + a = dpnp.array(a, copy=True) + dpnp.copyto(a, val, where=mask) + else: + mask = None + + return a, mask + + +def nancumprod(x1, **kwargs): + """ + Return the cumulative product of array elements over a given axis treating Not a Numbers (NaNs) as one. + + For full documentation refer to :obj:`numpy.nancumprod`. + + Limitations + ----------- + Parameter `x` is supported as :class:`dpnp.ndarray`. + Keyword argument `kwargs` is currently unsupported. + Otherwise the function will be executed sequentially on CPU. + Input array data types are limited by supported DPNP :ref:`Data types`. + + .. seealso:: :obj:`dpnp.cumprod` : Return the cumulative product of elements along a given axis. + + Examples + -------- + >>> import dpnp as np + >>> a = np.array([1., np.nan]) + >>> result = np.nancumprod(a) + >>> [x for x in result] + [1.0, 1.0] + >>> b = np.array([[1., 2., np.nan], [4., np.nan, 6.]]) + >>> result = np.nancumprod(b) + >>> [x for x in result] + [1.0, 2.0, 2.0, 8.0, 8.0, 48.0] + + """ + + x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) + if x1_desc and not kwargs: + return dpnp_nancumprod(x1_desc).get_pyobj() + + return call_origin(numpy.nancumprod, x1, **kwargs) + + +def nancumsum(x1, **kwargs): + """ + Return the cumulative sum of the elements along a given axis. + + For full documentation refer to :obj:`numpy.nancumsum`. + + Limitations + ----------- + Parameter `x` is supported as :class:`dpnp.ndarray`. + Keyword argument `kwargs` is currently unsupported. + Otherwise the function will be executed sequentially on CPU. + Input array data types are limited by supported DPNP :ref:`Data types`. + + See Also + -------- + :obj:`dpnp.cumsum` : Return the cumulative sum of the elements along a given axis. + + Examples + -------- + >>> import dpnp as np + >>> a = np.array([1., np.nan]) + >>> result = np.nancumsum(a) + >>> [x for x in result] + [1.0, 1.0] + >>> b = np.array([[1., 2., np.nan], [4., np.nan, 6.]]) + >>> result = np.nancumprod(b) + >>> [x for x in result] + [1.0, 3.0, 3.0, 7.0, 7.0, 13.0] + + """ + + x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) + if x1_desc and not kwargs: + return dpnp_nancumsum(x1_desc).get_pyobj() + + return call_origin(numpy.nancumsum, x1, **kwargs) + + +def nansum(x1, **kwargs): + """ + Calculate sum() function treating 'Not a Numbers' (NaN) as zero. + + For full documentation refer to :obj:`numpy.nansum`. + + Limitations + ----------- + Parameter `x1` is supported as :class:`dpnp.ndarray`. + Keyword argument `kwargs` is currently unsupported. + Otherwise the function will be executed sequentially on CPU. + Input array data types are limited by supported DPNP :ref:`Data types`. + + Examples + -------- + >>> import dpnp as np + >>> np.nansum(np.array([1, 2])) + 3 + >>> np.nansum(np.array([[1, 2], [3, 4]])) + 10 + + """ + + x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) + if x1_desc and not kwargs: + result_obj = dpnp_nansum(x1_desc).get_pyobj() + result = dpnp.convert_single_elem_array_to_scalar(result_obj) + return result + + return call_origin(numpy.nansum, x1, **kwargs) + + +def nanprod( + a, + axis=None, + dtype=None, + out=None, + keepdims=False, + initial=None, + where=True, +): + """ + Return the product of array elements over a given axis treating Not a Numbers (NaNs) as ones. + + For full documentation refer to :obj:`numpy.nanprod`. + + Returns + ------- + out : dpnp.ndarray + A new array holding the result is returned unless `out` is specified, in which case it is returned. + + See Also + -------- + :obj:`dpnp.prod` : Returns product across array propagating NaNs. + :obj:`dpnp.isnan` : Test element-wise for NaN and return result as a boolean array. + + Limitations + ----------- + Input array is only supported as either :class:`dpnp.ndarray` or :class:`dpctl.tensor.usm_ndarray`. + Parameters `initial`, and `where` are only supported with their default values. + Otherwise the function will be executed sequentially on CPU. + Input array data types are limited by supported DPNP :ref:`Data types`. + + Examples + -------- + >>> import dpnp as np + >>> np.nanprod(np.array(1)) + array(1) + >>> np.nanprod(np.array([1])) + array(1) + >>> np.nanprod(np.array([1, np.nan])) + array(1.0) + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nanprod(a) + array(6.0) + >>> np.nanprod(a, axis=0) + array([3., 2.]) + + """ + + a, mask = _replace_nan(a, 1) + + return dpnp.prod( + a, + axis=axis, + dtype=dtype, + out=out, + keepdims=keepdims, + initial=initial, + where=where, + ) + + +def nanvar( + a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True +): + """ + Compute the variance along the specified axis, while ignoring NaNs. + + For full documentation refer to :obj:`numpy.nanvar`. + + Parameters + ---------- + a : {dpnp_array, usm_ndarray}: + Input array. + axis : int or tuple of ints, optional + axis or axes along which the variances must be computed. If a tuple + of unique integers is given, the variances are computed over multiple axes. + If ``None``, the variance is computed over the entire array. + Default: `None`. + dtype : dtype, optional + Type to use in computing the standard deviation. For arrays of + integer type the default real-valued floating-point data type is used, + for arrays of float types it is the same as the array type. + out : {dpnp_array, usm_ndarray}, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type (of the calculated + values) will be cast if necessary. + ddof : {int, float}, optional + Means Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` corresponds to the total + number of elements over which the variance is calculated. + Default: `0.0`. + keepdims : bool, optional + If ``True``, the reduced axes (dimensions) are included in the result + as singleton dimensions, so that the returned array remains + compatible with the input array according to Array Broadcasting + rules. Otherwise, if ``False``, the reduced axes are not included in + the returned array. Default: ``False``. + + Returns + ------- + out : dpnp.ndarray + an array containing the variances. If the variance was computed + over the entire array, a zero-dimensional array is returned. + + If `a` has a real-valued floating-point data type, the returned + array will have the same data type as `a`. + If `a` has a boolean or integral data type, the returned array + will have the default floating point data type for the device + where input array `a` is allocated. + + Limitations + ----------- + Parameters `where` is only supported with its default value. + Otherwise ``NotImplementedError`` exception will be raised. + Input array data types are limited by real valued data types. + + See Also + -------- + :obj:`dpnp.var` : Compute the variance along the specified axis. + :obj:`dpnp.std` : Compute the standard deviation along the specified axis. + :obj:`dpnp.nanmean` : Compute the arithmetic mean along the specified axis, + ignoring NaNs. + :obj:`dpnp.nanstd` : Compute the standard deviation along + the specified axis, while ignoring NaNs. + + Examples + -------- + >>> import dpnp as np + >>> a = np.array([[1, np.nan], [3, 4]]) + >>> np.nanvar(a) + array(1.5555555555555554) + >>> np.nanvar(a, axis=0) + array([1., 0.]) + >>> np.nanvar(a, axis=1) + array([0., 0.25]) # may vary + + """ + + dpnp.check_supported_arrays_type(a) + if where is not True: + raise NotImplementedError( + "where keyword argument is only supported with its default value." + ) + elif not isinstance(ddof, (int, float)): + raise TypeError( + "An integer or float is required, but got {}".format(type(ddof)) + ) + else: + arr, mask = _replace_nan(a, 0) + if mask is None: + return dpnp.var( + arr, + axis=axis, + dtype=dtype, + out=out, + ddof=ddof, + keepdims=keepdims, + where=where, + ) + + if dtype is not None: + dtype = dpnp.dtype(dtype) + if not issubclass(dtype.type, dpnp.inexact): + raise TypeError( + "If input is inexact, then dtype must be inexact." + ) + if out is not None: + dpnp.check_supported_arrays_type(out) + if not dpnp.issubdtype(out.dtype, dpnp.inexact): + raise TypeError( + "If input is inexact, then out must be inexact." + ) + + # Compute mean + var_dtype = a.real.dtype if dtype is None else dtype + cnt = dpnp.sum( + ~mask, axis=axis, dtype=var_dtype, keepdims=True, where=where + ) + avg = dpnp.sum( + arr, axis=axis, dtype=var_dtype, keepdims=True, where=where + ) + avg = dpnp.divide(avg, cnt, out=avg) + + # Compute squared deviation from mean. + if arr.dtype == avg.dtype: + arr = dpnp.subtract(arr, avg, out=arr) + else: + arr = dpnp.subtract(arr, avg) + dpnp.copyto(arr, 0.0, where=mask) + if dpnp.issubdtype(arr.dtype, dpnp.complexfloating): + sqr = dpnp.multiply(arr, arr.conj(), out=arr).real + else: + sqr = dpnp.multiply(arr, arr, out=arr) + + # Compute variance + var = dpnp.sum( + sqr, + axis=axis, + dtype=var_dtype, + out=out, + keepdims=keepdims, + where=where, + ) + + if var.ndim < cnt.ndim: + cnt = cnt.squeeze(axis) + cnt -= ddof + dpnp.divide(var, cnt, out=var) + + isbad = cnt <= 0 + if dpnp.any(isbad): + # NaN, inf, or negative numbers are all possible bad + # values, so explicitly replace them with NaN. + dpnp.copyto(var, dpnp.nan, where=isbad) + + return var diff --git a/dpnp/dpnp_iface_statistics.py b/dpnp/dpnp_iface_statistics.py index 07cad8e4f30c..5d00154659c2 100644 --- a/dpnp/dpnp_iface_statistics.py +++ b/dpnp/dpnp_iface_statistics.py @@ -40,6 +40,7 @@ import dpctl.tensor as dpt import numpy +from numpy.core.numeric import normalize_axis_index import dpnp from dpnp.dpnp_algo import * @@ -60,12 +61,52 @@ "median", "min", "ptp", - "nanvar", "std", "var", ] +def _count_reduce_items(arr, axis, where=True): + """ + Calculates the number of items used in a reduction operation along the specified axis or axes + + Parameters + ---------- + arr : {dpnp_array, usm_ndarray} + Input array. + axis : int or tuple of ints, optional + axis or axes along which the number of items used in a reduction operation must be counted. + If a tuple of unique integers is given, the items are counted over multiple axes. + If ``None``, the variance is computed over the entire array. + Default: `None`. + + Returns + ------- + out : int + The number of items should be used in a reduction operation. + + Limitations + ----------- + Parameters `where` is only supported with its default value. + + """ + if where is True: + # no boolean mask given, calculate items according to axis + if axis is None: + axis = tuple(range(arr.ndim)) + elif not isinstance(axis, tuple): + axis = (axis,) + items = 1 + for ax in axis: + items *= arr.shape[normalize_axis_index(ax, arr.ndim)] + items = dpnp.intp(items) + else: + raise NotImplementedError( + "where keyword argument is only supported with its default value." + ) + return items + + def amax(a, axis=None, out=None, keepdims=False, initial=None, where=True): """ Return the maximum of an array or maximum along an axis. @@ -342,7 +383,7 @@ def max(a, axis=None, out=None, keepdims=False, initial=None, where=True): ----------- Input and output arrays are only supported as either :class:`dpnp.ndarray` or :class:`dpctl.tensor.usm_ndarray`. - Parameters `where`, and `initial` are supported only with their default values. + Parameters `where`, and `initial` are only supported with their default values. Otherwise ``NotImplementedError`` exception will be raised. Input array data types are limited by supported DPNP :ref:`Data types`. @@ -378,11 +419,11 @@ def max(a, axis=None, out=None, keepdims=False, initial=None, where=True): if initial is not None: raise NotImplementedError( - "initial keyword argument is only supported by its default value." + "initial keyword argument is only supported with its default value." ) elif where is not True: raise NotImplementedError( - "where keyword argument is only supported by its default value." + "where keyword argument is only supported with its default value." ) else: dpt_array = dpnp.get_usm_ndarray(a) @@ -409,7 +450,7 @@ def mean(a, /, axis=None, dtype=None, out=None, keepdims=False, *, where=True): ----------- Parameters `a` is supported as either :class:`dpnp.ndarray` or :class:`dpctl.tensor.usm_ndarray`. - Parameter `where` is supported only with their default values. + Parameter `where` is only supported with its default value. Otherwise ``NotImplementedError`` exception will be raised. Input array data types are limited by supported DPNP :ref:`Data types`. @@ -440,7 +481,7 @@ def mean(a, /, axis=None, dtype=None, out=None, keepdims=False, *, where=True): if where is not True: raise NotImplementedError( - "where keyword argument is only supported by its default value." + "where keyword argument is only supported with its default value." ) else: dpt_array = dpnp.get_usm_ndarray(a) @@ -517,7 +558,7 @@ def min(a, axis=None, out=None, keepdims=False, initial=None, where=True): ----------- Input and output arrays are only supported as either :class:`dpnp.ndarray` or :class:`dpctl.tensor.usm_ndarray`. - Parameters `where`, and `initial` are supported only with their default values. + Parameters `where`, and `initial` are only supported with their default values. Otherwise ``NotImplementedError`` exception will be raised. Input array data types are limited by supported DPNP :ref:`Data types`. @@ -553,11 +594,11 @@ def min(a, axis=None, out=None, keepdims=False, initial=None, where=True): if initial is not None: raise NotImplementedError( - "initial keyword argument is only supported by its default value." + "initial keyword argument is only supported with its default value." ) elif where is not True: raise NotImplementedError( - "where keyword argument is only supported by its default value." + "where keyword argument is only supported with its default value." ) else: dpt_array = dpnp.get_usm_ndarray(a) @@ -611,70 +652,70 @@ def ptp( ) -def nanvar(x1, axis=None, dtype=None, out=None, ddof=0, keepdims=False): - """ - Compute the variance along the specified axis, while ignoring NaNs. - - For full documentation refer to :obj:`numpy.nanvar`. - - Limitations - ----------- - Input array is supported as :obj:`dpnp.ndarray`. - Parameter `axis` is supported only with default value ``None``. - Parameter `dtype` is supported only with default value ``None``. - Parameter `out` is supported only with default value ``None``. - Parameter `keepdims` is supported only with default value ``False``. - Otherwise the function will be executed sequentially on CPU. +def std( + a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True +): """ + Compute the standard deviation along the specified axis. - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) - if x1_desc: - if x1.size == 0: - pass - elif axis is not None: - pass - elif dtype is not None: - pass - elif out is not None: - pass - elif keepdims: - pass - else: - result_obj = dpnp_nanvar(x1_desc, ddof).get_pyobj() - result = dpnp.convert_single_elem_array_to_scalar(result_obj) - - return result - - return call_origin( - numpy.nanvar, - x1, - axis=axis, - dtype=dtype, - out=out, - ddof=ddof, - keepdims=keepdims, - ) + For full documentation refer to :obj:`numpy.std`. + Parameters + ---------- + a : {dpnp_array, usm_ndarray}: + nput array. + axis : int or tuple of ints, optional + Axis or axes along which the variances must be computed. If a tuple + of unique integers is given, the variances are computed over multiple axes. + If ``None``, the variance is computed over the entire array. + Default: `None`. + dtype : dtype, optional + Type to use in computing the standard deviation. For arrays of + integer type the default real-valued floating-point data type is used, + for arrays of float types it is the same as the array type. + out : {dpnp_array, usm_ndarray}, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type (of the calculated + values) will be cast if necessary. + ddof : {int, float}, optional + Means Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` corresponds to the total + number of elements over which the variance is calculated. + Default: `0.0`. + keepdims : bool, optional + If ``True``, the reduced axes (dimensions) are included in the result + as singleton dimensions, so that the returned array remains + compatible with the input array according to Array Broadcasting + rules. Otherwise, if ``False``, the reduced axes are not included in + the returned array. Default: ``False``. -def std(x1, axis=None, dtype=None, out=None, ddof=0, keepdims=False): - """ - Compute the standard deviation along the specified axis. + Returns + ------- + out : dpnp.ndarray + an array containing the standard deviations. If the standard + deviation was computed over the entire array, a zero-dimensional + array is returned. - For full documentation refer to :obj:`numpy.std`. + If `a` has a real-valued floating-point data type, the returned + array will have the same data type as `a`. + If `a` has a boolean or integral data type, the returned array + will have the default floating point data type for the device + where input array `a` is allocated. Limitations ----------- - Input array is supported as :obj:`dpnp.ndarray`. - Size of input array is limited by ``a.size > 0``. - Parameter `axis` is supported only with default value ``None``. - Parameter `dtype` is supported only with default value ``None``. - Parameter `out` is supported only with default value ``None``. - Parameter `keepdims` is supported only with default value ``False``. - Otherwise the function will be executed sequentially on CPU. + Parameters `where` is only supported with its default value. + Otherwise ``NotImplementedError`` exception will be raised. Input array data types are limited by supported DPNP :ref:`Data types`. + Notes + ----- + Note that, for complex numbers, the absolute value is taken before squaring, + so that the result is always real and nonnegative. + See Also -------- + :obj:`dpnp.ndarray.std` : corresponding function for ndarrays. :obj:`dpnp.var` : Compute the variance along the specified axis. :obj:`dpnp.mean` : Compute the arithmetic mean along the specified axis. :obj:`dpnp.nanmean` : Compute the arithmetic mean along the specified axis, @@ -689,50 +730,113 @@ def std(x1, axis=None, dtype=None, out=None, ddof=0, keepdims=False): >>> import dpnp as np >>> a = np.array([[1, 2], [3, 4]]) >>> np.std(a) - 1.118033988749895 + array(1.118033988749895) + >>> np.std(a, axis=0) + array([1., 1.]) + >>> np.std(a, axis=1) + array([0.5, 0.5]) """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) - if x1_desc: - if x1_desc.size == 0: - pass - elif axis is not None: - pass - elif dtype is not None: - pass - elif out is not None: - pass - elif keepdims: - pass - else: - result_obj = dpnp_std(x1_desc, ddof).get_pyobj() - result = dpnp.convert_single_elem_array_to_scalar(result_obj) + dpnp.check_supported_arrays_type(a) - return result + if where is not True: + raise NotImplementedError( + "where keyword argument is only supported with its default value." + ) + elif not isinstance(ddof, (int, float)): + raise TypeError( + "An integer or float is required, but got {}".format(type(ddof)) + ) + else: + if dpnp.issubdtype(a.dtype, dpnp.complexfloating): + result = dpnp.var( + a, + axis=axis, + dtype=None, + out=out, + ddof=ddof, + keepdims=keepdims, + where=where, + ) + dpnp.sqrt(result, out=result) + else: + dpt_array = dpnp.get_usm_ndarray(a) + result = dpnp_array._create_from_usm_ndarray( + dpt.std( + dpt_array, axis=axis, correction=ddof, keepdims=keepdims + ) + ) + result = dpnp.get_result_array(result, out) - return call_origin(numpy.std, x1, axis, dtype, out, ddof, keepdims) + if dtype is not None and out is None: + result = result.astype(dtype, casting="same_kind") + return result -def var(x1, axis=None, dtype=None, out=None, ddof=0, keepdims=False): +def var( + a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True +): """ Compute the variance along the specified axis. For full documentation refer to :obj:`numpy.var`. + Parameters + ---------- + a : {dpnp_array, usm_ndarray}: + Input array. + axis : int or tuple of ints, optional + axis or axes along which the variances must be computed. If a tuple + of unique integers is given, the variances are computed over multiple axes. + If ``None``, the variance is computed over the entire array. + Default: `None`. + dtype : dtype, optional + Type to use in computing the variance. For arrays of integer type + the default real-valued floating-point data type is used, + for arrays of float types it is the same as the array type. + out : {dpnp_array, usm_ndarray}, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type (of the calculated + values) will be cast if necessary. + ddof : {int, float}, optional + Means Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` corresponds to the total + number of elements over which the variance is calculated. + Default: `0.0`. + keepdims : bool, optional + If ``True``, the reduced axes (dimensions) are included in the result + as singleton dimensions, so that the returned array remains + compatible with the input array according to Array Broadcasting + rules. Otherwise, if ``False``, the reduced axes are not included in + the returned array. Default: ``False``. + + Returns + ------- + out : dpnp.ndarray + an array containing the variances. If the variance was computed + over the entire array, a zero-dimensional array is returned. + + If `a` has a real-valued floating-point data type, the returned + array will have the same data type as `a`. + If `a` has a boolean or integral data type, the returned array + will have the default floating point data type for the device + where input array `a` is allocated. + Limitations ----------- - Input array is supported as :obj:`dpnp.ndarray`. - Size of input array is limited by ``a.size > 0``. - Parameter `axis` is supported only with default value ``None``. - Parameter `dtype` is supported only with default value ``None``. - Parameter `out` is supported only with default value ``None``. - Parameter `keepdims` is supported only with default value ``False``. - Otherwise the function will be executed sequentially on CPU. + Parameters `where` is only supported with its default value. + Otherwise ``NotImplementedError`` exception will be raised. Input array data types are limited by supported DPNP :ref:`Data types`. + Notes + ----- + Note that, for complex numbers, the absolute value is taken before squaring, + so that the result is always real and nonnegative. + See Also -------- + :obj:`dpnp.ndarray.var` : corresponding function for ndarrays. :obj:`dpnp.std` : Compute the standard deviation along the specified axis. :obj:`dpnp.mean` : Compute the arithmetic mean along the specified axis. :obj:`dpnp.nanmean` : Compute the arithmetic mean along the specified axis, @@ -747,26 +851,57 @@ def var(x1, axis=None, dtype=None, out=None, ddof=0, keepdims=False): >>> import dpnp as np >>> a = np.array([[1, 2], [3, 4]]) >>> np.var(a) - 1.25 + array(1.25) + >>> np.var(a, axis=0) + array([1., 1.]) + >>> np.var(a, axis=1) + array([0.25, 0.25]) """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) - if x1_desc: - if x1_desc.size == 0: - pass - elif axis is not None: - pass - elif dtype is not None: - pass - elif out is not None: - pass - elif keepdims: - pass + dpnp.check_supported_arrays_type(a) + if where is not True: + raise NotImplementedError( + "where keyword argument is only supported with its default value." + ) + elif not isinstance(ddof, (int, float)): + raise TypeError( + "An integer or float is required, but got {}".format(type(ddof)) + ) + else: + if dpnp.issubdtype(a.dtype, dpnp.complexfloating): + # Note that if dtype is not of inexact type then arrmean will not be either. + arrmean = dpnp.mean( + a, axis=axis, dtype=dtype, keepdims=True, where=where + ) + x = dpnp.subtract(a, arrmean) + x = dpnp.multiply(x, x.conj(), out=x).real + result = dpnp.sum( + x, + axis=axis, + dtype=a.real.dtype, + out=out, + keepdims=keepdims, + where=where, + ) + + cnt = _count_reduce_items(a, axis, where) + cnt = numpy.max(cnt - ddof, 0).astype( + result.dtype, casting="same_kind" + ) + if not cnt: + cnt = dpnp.nan + + dpnp.divide(result, cnt, out=result) else: - result_obj = dpnp_var(x1_desc, ddof).get_pyobj() - result = dpnp.convert_single_elem_array_to_scalar(result_obj) - - return result - - return call_origin(numpy.var, x1, axis, dtype, out, ddof, keepdims) + dpt_array = dpnp.get_usm_ndarray(a) + result = dpnp_array._create_from_usm_ndarray( + dpt.var( + dpt_array, axis=axis, correction=ddof, keepdims=keepdims + ) + ) + result = dpnp.get_result_array(result, out) + + if out is None and dtype is not None: + result = result.astype(dtype, casting="same_kind") + return result diff --git a/tests/conftest.py b/tests/conftest.py index 0213f52e09e3..231af2e34fa9 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -26,6 +26,7 @@ import os import sys +import warnings import dpctl import numpy @@ -122,6 +123,20 @@ def suppress_invalid_numpy_warnings(): numpy.seterr(**old_settings) # reset to default +@pytest.fixture +def suppress_dof_numpy_warnings(): + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", r"Degrees of freedom <= 0 for slice") + yield + + +@pytest.fixture +def suppress_mean_empty_slice_numpy_warnings(): + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", r"Mean of empty slice") + yield + + @pytest.fixture def suppress_divide_invalid_numpy_warnings( suppress_divide_numpy_warnings, suppress_invalid_numpy_warnings diff --git a/tests/skipped_tests.tbl b/tests/skipped_tests.tbl index 87a29e2cb0c5..cca2992e6561 100644 --- a/tests/skipped_tests.tbl +++ b/tests/skipped_tests.tbl @@ -555,8 +555,6 @@ tests/third_party/cupy/math_tests/test_misc.py::TestConvolve::test_convolve_diff tests/third_party/cupy/math_tests/test_misc.py::TestConvolve::test_convolve_diff_types[full] tests/third_party/cupy/math_tests/test_rounding.py::TestRounding::test_fix -tests/third_party/cupy/math_tests/test_sumprod.py::TestSumprod::test_sum_out -tests/third_party/cupy/math_tests/test_sumprod.py::TestSumprod::test_sum_out_wrong_shape tests/third_party/cupy/math_tests/test_sumprod.py::TestCumprod::test_ndarray_cumprod_2dim_with_axis tests/third_party/cupy/math_tests/test_sumprod.py::TestCumprod::test_cumprod_arraylike @@ -1026,57 +1024,30 @@ tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_9_{ax tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStdAdditional::test_nanstd_float16 tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStdAdditional::test_nanstd_huge tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStdAdditional::test_nanstd_out -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStdAdditional::test_nanvar_float16 -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStdAdditional::test_nanvar_huge -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStdAdditional::test_nanvar_out tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_0_{axis=None, ddof=0, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_0_{axis=None, ddof=0, keepdims=True, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_10_{axis=0, ddof=0, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_10_{axis=0, ddof=0, keepdims=False, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_11_{axis=0, ddof=0, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_11_{axis=0, ddof=0, keepdims=False, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_12_{axis=0, ddof=1, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_12_{axis=0, ddof=1, keepdims=True, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_13_{axis=0, ddof=1, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_13_{axis=0, ddof=1, keepdims=True, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_14_{axis=0, ddof=1, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_14_{axis=0, ddof=1, keepdims=False, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_15_{axis=0, ddof=1, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_15_{axis=0, ddof=1, keepdims=False, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_16_{axis=1, ddof=0, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_16_{axis=1, ddof=0, keepdims=True, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_17_{axis=1, ddof=0, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_17_{axis=1, ddof=0, keepdims=True, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_18_{axis=1, ddof=0, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_18_{axis=1, ddof=0, keepdims=False, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_19_{axis=1, ddof=0, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_19_{axis=1, ddof=0, keepdims=False, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_1_{axis=None, ddof=0, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_1_{axis=None, ddof=0, keepdims=True, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_20_{axis=1, ddof=1, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_20_{axis=1, ddof=1, keepdims=True, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_21_{axis=1, ddof=1, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_21_{axis=1, ddof=1, keepdims=True, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_22_{axis=1, ddof=1, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_22_{axis=1, ddof=1, keepdims=False, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_23_{axis=1, ddof=1, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_23_{axis=1, ddof=1, keepdims=False, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_2_{axis=None, ddof=0, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_2_{axis=None, ddof=0, keepdims=False, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_3_{axis=None, ddof=0, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_3_{axis=None, ddof=0, keepdims=False, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_4_{axis=None, ddof=1, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_4_{axis=None, ddof=1, keepdims=True, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_5_{axis=None, ddof=1, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_5_{axis=None, ddof=1, keepdims=True, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_6_{axis=None, ddof=1, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_6_{axis=None, ddof=1, keepdims=False, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_7_{axis=None, ddof=1, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_7_{axis=None, ddof=1, keepdims=False, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_8_{axis=0, ddof=0, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_8_{axis=0, ddof=0, keepdims=True, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_9_{axis=0, ddof=0, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_9_{axis=0, ddof=0, keepdims=True, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmax_all tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmax_all_nan tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmax_axis0 diff --git a/tests/skipped_tests_gpu.tbl b/tests/skipped_tests_gpu.tbl index b6f6ceb45913..b8df4b5179d5 100644 --- a/tests/skipped_tests_gpu.tbl +++ b/tests/skipped_tests_gpu.tbl @@ -652,8 +652,6 @@ tests/third_party/cupy/math_tests/test_misc.py::TestConvolve::test_convolve_diff tests/third_party/cupy/math_tests/test_misc.py::TestConvolve::test_convolve_diff_types[full] tests/third_party/cupy/math_tests/test_rounding.py::TestRounding::test_fix -tests/third_party/cupy/math_tests/test_sumprod.py::TestSumprod::test_sum_out -tests/third_party/cupy/math_tests/test_sumprod.py::TestSumprod::test_sum_out_wrong_shape tests/third_party/cupy/math_tests/test_sumprod.py::TestCumprod::test_ndarray_cumprod_2dim_with_axis tests/third_party/cupy/math_tests/test_sumprod.py::TestCumprod::test_cumprod_arraylike @@ -1087,57 +1085,30 @@ tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_9_{ax tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStdAdditional::test_nanstd_float16 tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStdAdditional::test_nanstd_huge tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStdAdditional::test_nanstd_out -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStdAdditional::test_nanvar_float16 -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStdAdditional::test_nanvar_huge -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStdAdditional::test_nanvar_out tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_0_{axis=None, ddof=0, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_0_{axis=None, ddof=0, keepdims=True, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_10_{axis=0, ddof=0, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_10_{axis=0, ddof=0, keepdims=False, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_11_{axis=0, ddof=0, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_11_{axis=0, ddof=0, keepdims=False, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_12_{axis=0, ddof=1, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_12_{axis=0, ddof=1, keepdims=True, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_13_{axis=0, ddof=1, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_13_{axis=0, ddof=1, keepdims=True, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_14_{axis=0, ddof=1, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_14_{axis=0, ddof=1, keepdims=False, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_15_{axis=0, ddof=1, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_15_{axis=0, ddof=1, keepdims=False, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_16_{axis=1, ddof=0, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_16_{axis=1, ddof=0, keepdims=True, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_17_{axis=1, ddof=0, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_17_{axis=1, ddof=0, keepdims=True, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_18_{axis=1, ddof=0, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_18_{axis=1, ddof=0, keepdims=False, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_19_{axis=1, ddof=0, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_19_{axis=1, ddof=0, keepdims=False, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_1_{axis=None, ddof=0, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_1_{axis=None, ddof=0, keepdims=True, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_20_{axis=1, ddof=1, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_20_{axis=1, ddof=1, keepdims=True, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_21_{axis=1, ddof=1, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_21_{axis=1, ddof=1, keepdims=True, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_22_{axis=1, ddof=1, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_22_{axis=1, ddof=1, keepdims=False, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_23_{axis=1, ddof=1, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_23_{axis=1, ddof=1, keepdims=False, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_2_{axis=None, ddof=0, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_2_{axis=None, ddof=0, keepdims=False, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_3_{axis=None, ddof=0, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_3_{axis=None, ddof=0, keepdims=False, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_4_{axis=None, ddof=1, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_4_{axis=None, ddof=1, keepdims=True, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_5_{axis=None, ddof=1, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_5_{axis=None, ddof=1, keepdims=True, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_6_{axis=None, ddof=1, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_6_{axis=None, ddof=1, keepdims=False, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_7_{axis=None, ddof=1, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_7_{axis=None, ddof=1, keepdims=False, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_8_{axis=0, ddof=0, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_8_{axis=0, ddof=0, keepdims=True, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_9_{axis=0, ddof=0, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_9_{axis=0, ddof=0, keepdims=True, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmax_all tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmax_all_nan tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmax_axis0 diff --git a/tests/skipped_tests_gpu_no_fp64.tbl b/tests/skipped_tests_gpu_no_fp64.tbl index 0e043ee7452b..26e11a700624 100644 --- a/tests/skipped_tests_gpu_no_fp64.tbl +++ b/tests/skipped_tests_gpu_no_fp64.tbl @@ -285,15 +285,3 @@ tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWeib tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWeibull_param_2_{a_shape=(3, 2), shape=(4, 3, 2)}::test_weibull_for_inf_a tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWeibull_param_3_{a_shape=(3, 2), shape=(3, 2)}::test_weibull tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWeibull_param_3_{a_shape=(3, 2), shape=(3, 2)}::test_weibull_for_inf_a - - -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestMeanVar::test_external_std_all -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestMeanVar::test_external_std_all_ddof -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestMeanVar::test_external_var_all -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestMeanVar::test_external_var_all_ddof -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestMeanVar::test_std_all -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestMeanVar::test_std_all_ddof -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestMeanVar::test_var_all -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestMeanVar::test_var_all_ddof -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestProductZeroLength_param_6_{func='std', params=((), None)}::test_external_mean_zero_len -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestProductZeroLength_param_12_{func='var', params=((), None)}::test_external_mean_zero_len diff --git a/tests/test_mathematical.py b/tests/test_mathematical.py index 7484a66bfb53..51a8de6a3923 100644 --- a/tests/test_mathematical.py +++ b/tests/test_mathematical.py @@ -2082,11 +2082,10 @@ def test_sum_empty(dtype, axis): assert_array_equal(numpy_res, dpnp_res.asnumpy()) -@pytest.mark.usefixtures("allow_fall_back_on_numpy") -@pytest.mark.parametrize("dtype", get_all_dtypes(no_complex=True, no_bool=True)) +@pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) def test_sum_empty_out(dtype): a = dpnp.empty((1, 2, 0, 4), dtype=dtype) - out = dpnp.ones(()) + out = dpnp.ones((), dtype=dtype) res = a.sum(out=out) assert_array_equal(out.asnumpy(), res.asnumpy()) assert_array_equal(out.asnumpy(), numpy.array(0, dtype=dtype)) diff --git a/tests/test_statistics.py b/tests/test_statistics.py index 50a9ae5aa36d..3caaaf9c805f 100644 --- a/tests/test_statistics.py +++ b/tests/test_statistics.py @@ -8,7 +8,12 @@ import dpnp -from .helper import assert_dtype_allclose, get_all_dtypes +from .helper import ( + assert_dtype_allclose, + get_all_dtypes, + get_float_complex_dtypes, + has_support_aspect64, +) @pytest.mark.parametrize( @@ -61,29 +66,35 @@ def test_max_min_out(func): ia = dpnp.array(a) np_res = getattr(numpy, func)(a, axis=0) + # output is dpnp array dpnp_res = dpnp.array(numpy.empty_like(np_res)) getattr(dpnp, func)(ia, axis=0, out=dpnp_res) assert_allclose(dpnp_res, np_res) + # output is usm array dpnp_res = dpt.asarray(numpy.empty_like(np_res)) getattr(dpnp, func)(ia, axis=0, out=dpnp_res) assert_allclose(dpnp_res, np_res) + # output is numpy array -> Error dpnp_res = numpy.empty_like(np_res) with pytest.raises(TypeError): getattr(dpnp, func)(ia, axis=0, out=dpnp_res) + # output has incorrect shape -> Error dpnp_res = dpnp.array(numpy.empty((2, 3))) with pytest.raises(ValueError): getattr(dpnp, func)(ia, axis=0, out=dpnp_res) @pytest.mark.parametrize("func", ["max", "min"]) -def test_max_min_NotImplemented(func): +def test_max_min_error(func): ia = dpnp.arange(5) - + # where is not supported with pytest.raises(NotImplementedError): getattr(dpnp, func)(ia, where=False) + + # initial is not supported with pytest.raises(NotImplementedError): getattr(dpnp, func)(ia, initial=6) @@ -118,7 +129,10 @@ def test_mean_dtype(self, dtype): result = dpnp.mean(dp_array, dtype=dtype) assert_allclose(expected, result) - @pytest.mark.usefixtures("suppress_invalid_numpy_warnings") + @pytest.mark.usefixtures( + "suppress_invalid_numpy_warnings", + "suppress_mean_empty_slice_numpy_warnings", + ) @pytest.mark.parametrize("axis", [0, 1, (0, 1)]) @pytest.mark.parametrize("shape", [(2, 3), (2, 0), (0, 3)]) def test_mean_empty(self, axis, shape): @@ -149,63 +163,308 @@ def test_mean_scalar(self): expected = np_array.mean() assert_allclose(expected, result) - def test_mean_NotImplemented(func): + def test_mean_NotImplemented(self): ia = dpnp.arange(5) with pytest.raises(NotImplementedError): dpnp.mean(ia, where=False) -@pytest.mark.usefixtures("allow_fall_back_on_numpy") -@pytest.mark.parametrize( - "array", - [ - [2, 0, 6, 2], - [2, 0, 6, 2, 5, 6, 7, 8], - [], - [2, 1, numpy.nan, 5, 3], - [-1, numpy.nan, 1, numpy.inf], - [3, 6, 0, 1], - [3, 6, 0, 1, 8], - [3, 2, 9, 6, numpy.nan], - [numpy.nan, numpy.nan, numpy.inf, numpy.nan], - [[2, 0], [6, 2]], - [[2, 0, 6, 2], [5, 6, 7, 8]], - [[[2, 0], [6, 2]], [[5, 6], [7, 8]]], - [[-1, numpy.nan], [1, numpy.inf]], - [[numpy.nan, numpy.nan], [numpy.inf, numpy.nan]], - ], - ids=[ - "[2, 0, 6, 2]", - "[2, 0, 6, 2, 5, 6, 7, 8]", - "[]", - "[2, 1, np.nan, 5, 3]", - "[-1, np.nan, 1, np.inf]", - "[3, 6, 0, 1]", - "[3, 6, 0, 1, 8]", - "[3, 2, 9, 6, np.nan]", - "[np.nan, np.nan, np.inf, np.nan]", - "[[2, 0], [6, 2]]", - "[[2, 0, 6, 2], [5, 6, 7, 8]]", - "[[[2, 0], [6, 2]], [[5, 6], [7, 8]]]", - "[[-1, np.nan], [1, np.inf]]", - "[[np.nan, np.nan], [np.inf, np.nan]]", - ], -) -@pytest.mark.parametrize( - "dtype", get_all_dtypes(no_none=True, no_bool=True, no_complex=True) -) -def test_nanvar(array, dtype): - dtype = dpnp.default_float_type() - a = numpy.array(array, dtype=dtype) - ia = dpnp.array(a) - for ddof in range(a.ndim): - expected = numpy.nanvar(a, ddof=ddof) - result = dpnp.nanvar(ia, ddof=ddof) - assert_allclose(expected, result, rtol=1e-06) - - expected = numpy.nanvar(a, axis=None, ddof=0) - result = dpnp.nanvar(ia, axis=None, ddof=0) - assert_allclose(expected, result, rtol=1e-06) +class TestVar: + @pytest.mark.usefixtures( + "suppress_divide_invalid_numpy_warnings", "suppress_dof_numpy_warnings" + ) + @pytest.mark.parametrize("dtype", get_all_dtypes()) + @pytest.mark.parametrize("axis", [None, 0, 1, (0, 1)]) + @pytest.mark.parametrize("keepdims", [True, False]) + @pytest.mark.parametrize("ddof", [0, 0.5, 1, 1.5, 2]) + def test_var(self, dtype, axis, keepdims, ddof): + dp_array = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dtype) + np_array = dpnp.asnumpy(dp_array) + + expected = numpy.var(np_array, axis=axis, keepdims=keepdims, ddof=ddof) + result = dpnp.var(dp_array, axis=axis, keepdims=keepdims, ddof=ddof) + + if axis == 0 and ddof == 2: + assert dpnp.all(dpnp.isnan(result)) + else: + assert_dtype_allclose(result, expected) + + @pytest.mark.usefixtures( + "suppress_divide_invalid_numpy_warnings", "suppress_dof_numpy_warnings" + ) + @pytest.mark.parametrize("dtype", get_all_dtypes()) + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("ddof", [0, 1]) + def test_var_out(self, dtype, axis, ddof): + dp_array = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dtype) + np_array = dpnp.asnumpy(dp_array) + + expected = numpy.var(np_array, axis=axis, ddof=ddof) + if has_support_aspect64(): + res_dtype = expected.dtype + else: + res_dtype = dpnp.default_float_type(dp_array.device) + result = dpnp.empty(expected.shape, dtype=res_dtype) + dpnp.var(dp_array, axis=axis, out=result, ddof=ddof) + assert_dtype_allclose(result, expected) + + @pytest.mark.usefixtures( + "suppress_invalid_numpy_warnings", "suppress_dof_numpy_warnings" + ) + @pytest.mark.parametrize("axis", [0, 1, (0, 1)]) + @pytest.mark.parametrize("shape", [(2, 3), (2, 0), (0, 3)]) + def test_var_empty(self, axis, shape): + dp_array = dpnp.empty(shape, dtype=dpnp.int64) + np_array = dpnp.asnumpy(dp_array) + + result = dpnp.var(dp_array, axis=axis) + expected = numpy.var(np_array, axis=axis) + assert_dtype_allclose(result, expected) + + @pytest.mark.parametrize("dtype", get_all_dtypes()) + def test_var_strided(self, dtype): + dp_array = dpnp.array([-2, -1, 0, 1, 0, 2], dtype=dtype) + np_array = dpnp.asnumpy(dp_array) + + result = dpnp.var(dp_array[::-1]) + expected = numpy.var(np_array[::-1]) + assert_dtype_allclose(result, expected) + + result = dpnp.var(dp_array[::2]) + expected = numpy.var(np_array[::2]) + assert_dtype_allclose(result, expected) + + @pytest.mark.usefixtures("suppress_complex_warning") + @pytest.mark.parametrize("dt_in", get_all_dtypes(no_bool=True)) + @pytest.mark.parametrize("dt_out", get_float_complex_dtypes()) + def test_var_dtype(self, dt_in, dt_out): + dp_array = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dt_in) + np_array = dpnp.asnumpy(dp_array) + + expected = numpy.var(np_array, dtype=dt_out) + result = dpnp.var(dp_array, dtype=dt_out) + assert expected.dtype == result.dtype + assert_allclose(result, expected, rtol=1e-06) + + def test_var_scalar(self): + dp_array = dpnp.array(5) + np_array = dpnp.asnumpy(dp_array) + + result = dp_array.var() + expected = np_array.var() + assert_allclose(expected, result) + + def test_var_error(self): + ia = dpnp.arange(5) + # where keyword is not implemented + with pytest.raises(NotImplementedError): + dpnp.var(ia, where=False) + + # ddof should be an integer or float + with pytest.raises(TypeError): + dpnp.var(ia, ddof="1") + + +class TestStd: + @pytest.mark.usefixtures( + "suppress_divide_invalid_numpy_warnings", "suppress_dof_numpy_warnings" + ) + @pytest.mark.parametrize("dtype", get_all_dtypes()) + @pytest.mark.parametrize("axis", [0, 1, (0, 1)]) + @pytest.mark.parametrize("keepdims", [True, False]) + @pytest.mark.parametrize("ddof", [0, 0.5, 1, 1.5, 2]) + def test_std(self, dtype, axis, keepdims, ddof): + dp_array = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dtype) + np_array = dpnp.asnumpy(dp_array) + + expected = numpy.std(np_array, axis=axis, keepdims=keepdims, ddof=ddof) + result = dpnp.std(dp_array, axis=axis, keepdims=keepdims, ddof=ddof) + if axis == 0 and ddof == 2: + assert dpnp.all(dpnp.isnan(result)) + else: + assert_dtype_allclose(result, expected) + + @pytest.mark.usefixtures( + "suppress_divide_invalid_numpy_warnings", "suppress_dof_numpy_warnings" + ) + @pytest.mark.parametrize("dtype", get_all_dtypes()) + @pytest.mark.parametrize("axis", [0, 1]) + @pytest.mark.parametrize("ddof", [0, 1]) + def test_std_out(self, dtype, axis, ddof): + dp_array = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dtype) + np_array = dpnp.asnumpy(dp_array) + + expected = numpy.std(np_array, axis=axis, ddof=ddof) + if has_support_aspect64(): + res_dtype = expected.dtype + else: + res_dtype = dpnp.default_float_type(dp_array.device) + result = dpnp.empty(expected.shape, dtype=res_dtype) + dpnp.std(dp_array, axis=axis, out=result, ddof=ddof) + assert_dtype_allclose(result, expected) + + @pytest.mark.usefixtures( + "suppress_invalid_numpy_warnings", "suppress_dof_numpy_warnings" + ) + @pytest.mark.parametrize("axis", [None, 0, 1, (0, 1)]) + @pytest.mark.parametrize("shape", [(2, 3), (2, 0), (0, 3)]) + def test_std_empty(self, axis, shape): + dp_array = dpnp.empty(shape, dtype=dpnp.int64) + np_array = dpnp.asnumpy(dp_array) + + result = dpnp.std(dp_array, axis=axis) + expected = numpy.std(np_array, axis=axis) + assert_dtype_allclose(result, expected) + + @pytest.mark.parametrize("dtype", get_all_dtypes()) + def test_std_strided(self, dtype): + dp_array = dpnp.array([-2, -1, 0, 1, 0, 2], dtype=dtype) + np_array = dpnp.asnumpy(dp_array) + + result = dpnp.std(dp_array[::-1]) + expected = numpy.std(np_array[::-1]) + assert_dtype_allclose(result, expected) + + result = dpnp.std(dp_array[::2]) + expected = numpy.std(np_array[::2]) + assert_dtype_allclose(result, expected) + + @pytest.mark.usefixtures("suppress_complex_warning") + @pytest.mark.parametrize("dt_in", get_all_dtypes(no_bool=True)) + @pytest.mark.parametrize("dt_out", get_float_complex_dtypes()) + def test_std_dtype(self, dt_in, dt_out): + dp_array = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dt_in) + np_array = dpnp.asnumpy(dp_array) + + expected = numpy.std(np_array, dtype=dt_out) + result = dpnp.std(dp_array, dtype=dt_out) + assert expected.dtype == result.dtype + assert_allclose(result, expected, rtol=1e-6) + + def test_std_scalar(self): + dp_array = dpnp.array(5) + np_array = dpnp.asnumpy(dp_array) + + result = dp_array.std() + expected = np_array.std() + assert_dtype_allclose(result, expected) + + def test_std_error(self): + ia = dpnp.arange(5) + # where keyword is not implemented + with pytest.raises(NotImplementedError): + dpnp.std(ia, where=False) + + # ddof should be an integer or float + with pytest.raises(TypeError): + dpnp.std(ia, ddof="1") + + +class TestNanVar: + @pytest.mark.parametrize( + "array", + [ + [2, 0, 6, 2], + [2, 0, 6, 2, 5, 6, 7, 8], + [], + [2, 1, numpy.nan, 5, 3], + [-1, numpy.nan, 1, numpy.inf], + [3, 6, 0, 1], + [3, 6, 0, 1, 8], + [3, 2, 9, 6, numpy.nan], + [numpy.nan, numpy.nan, numpy.inf, numpy.nan], + [[2, 0], [6, 2]], + [[2, 0, 6, 2], [5, 6, 7, 8]], + [[[2, 0], [6, 2]], [[5, 6], [7, 8]]], + [[-1, numpy.nan], [1, numpy.inf]], + [[numpy.nan, numpy.nan], [numpy.inf, numpy.nan]], + ], + ids=[ + "[2, 0, 6, 2]", + "[2, 0, 6, 2, 5, 6, 7, 8]", + "[]", + "[2, 1, np.nan, 5, 3]", + "[-1, np.nan, 1, np.inf]", + "[3, 6, 0, 1]", + "[3, 6, 0, 1, 8]", + "[3, 2, 9, 6, np.nan]", + "[np.nan, np.nan, np.inf, np.nan]", + "[[2, 0], [6, 2]]", + "[[2, 0, 6, 2], [5, 6, 7, 8]]", + "[[[2, 0], [6, 2]], [[5, 6], [7, 8]]]", + "[[-1, np.nan], [1, np.inf]]", + "[[np.nan, np.nan], [np.inf, np.nan]]", + ], + ) + @pytest.mark.usefixtures( + "suppress_invalid_numpy_warnings", "suppress_dof_numpy_warnings" + ) + @pytest.mark.parametrize( + "dtype", get_all_dtypes(no_none=True, no_bool=True) + ) + def test_nanvar(self, array, dtype): + try: + a = numpy.array(array, dtype=dtype) + except: + pytest.skip("floating datat type is needed to store NaN") + ia = dpnp.array(a) + for ddof in range(a.ndim): + expected = numpy.nanvar(a, ddof=ddof) + result = dpnp.nanvar(ia, ddof=ddof) + assert_dtype_allclose(result, expected) + + @pytest.mark.usefixtures("suppress_dof_numpy_warnings") + @pytest.mark.parametrize("dtype", get_float_complex_dtypes()) + @pytest.mark.parametrize("axis", [None, 0, 1, 2, (0, 1), (1, 2)]) + @pytest.mark.parametrize("keepdims", [True, False]) + @pytest.mark.parametrize("ddof", [0, 0.5, 1, 1.5, 2, 3]) + def test_nanvar_out(self, dtype, axis, keepdims, ddof): + a = numpy.arange(4 * 3 * 5, dtype=dtype) + a[::2] = numpy.nan + a = a.reshape(4, 3, 5) + ia = dpnp.array(a) + + expected = numpy.nanvar(a, axis=axis, ddof=ddof, keepdims=keepdims) + if has_support_aspect64(): + res_dtype = expected.dtype + else: + res_dtype = dpnp.default_float_type(ia.device) + result = dpnp.empty(expected.shape, dtype=res_dtype) + dpnp.nanvar(ia, out=result, axis=axis, ddof=ddof, keepdims=keepdims) + assert_dtype_allclose(result, expected) + + @pytest.mark.usefixtures("suppress_complex_warning") + @pytest.mark.parametrize("dt_in", get_float_complex_dtypes()) + @pytest.mark.parametrize("dt_out", get_float_complex_dtypes()) + def test_nanvar_dtype(self, dt_in, dt_out): + a = numpy.arange(4 * 3 * 5, dtype=dt_in) + a[::2] = numpy.nan + a = a.reshape(4, 3, 5) + ia = dpnp.array(a) + + expected = numpy.nanvar(a, dtype=dt_out) + result = dpnp.nanvar(ia, dtype=dt_out) + assert_dtype_allclose(result, expected) + + def test_nanvar_error(self): + ia = dpnp.arange(5, dtype=dpnp.float32) + ia[0] = dpnp.nan + # where keyword is not implemented + with pytest.raises(NotImplementedError): + dpnp.nanvar(ia, where=False) + + # dtype should be floating + with pytest.raises(TypeError): + dpnp.nanvar(ia, dtype=dpnp.int32) + + # out dtype should be inexact + res = dpnp.empty((1,), dtype=dpnp.int32) + with pytest.raises(TypeError): + dpnp.nanvar(ia, out=res) + + # ddof should be an integer or float + with pytest.raises(TypeError): + dpnp.nanvar(ia, ddof="1") @pytest.mark.usefixtures("allow_fall_back_on_numpy") diff --git a/tests/test_sum.py b/tests/test_sum.py index 4104b33a6248..25c294d051e7 100644 --- a/tests/test_sum.py +++ b/tests/test_sum.py @@ -7,6 +7,7 @@ import dpnp from tests.helper import ( assert_dtype_allclose, + get_all_dtypes, get_float_dtypes, has_support_aspect64, ) @@ -65,3 +66,24 @@ def test_sum_axis(): else: expected = numpy.sum(a, axis=1) assert_array_equal(expected, result) + + +@pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) +@pytest.mark.parametrize("axis", [0, 1, (0, 1)]) +def test_sum_out(dtype, axis): + a = dpnp.arange(2 * 4, dtype=dtype).reshape(2, 4) + a_np = dpnp.asnumpy(a) + + expected = numpy.sum(a_np, axis=axis) + res = dpnp.empty(expected.shape, dtype=dtype) + a.sum(axis=axis, out=res) + assert_array_equal(expected, res.asnumpy()) + + +def test_sum_NotImplemented(): + ia = dpnp.arange(5) + with pytest.raises(NotImplementedError): + dpnp.sum(ia, where=False) + + with pytest.raises(NotImplementedError): + dpnp.sum(ia, initial=1) diff --git a/tests/test_sycl_queue.py b/tests/test_sycl_queue.py index 19104d7bd434..b1175703b017 100644 --- a/tests/test_sycl_queue.py +++ b/tests/test_sycl_queue.py @@ -373,6 +373,7 @@ def test_meshgrid(device_x, device_y): pytest.param("nancumsum", [1.0, dpnp.nan]), pytest.param("nanprod", [1.0, dpnp.nan]), pytest.param("nansum", [1.0, dpnp.nan]), + pytest.param("nanvar", [1.0, 2.0, 4.0, dpnp.nan]), pytest.param("negative", [1.0, 0.0, -1.0]), pytest.param("positive", [1.0, 0.0, -1.0]), pytest.param("prod", [1.0, 2.0]), @@ -387,6 +388,7 @@ def test_meshgrid(device_x, device_y): ), pytest.param("sinh", [-5.0, -3.5, 0.0, 3.5, 5.0]), pytest.param("sqrt", [1.0, 3.0, 9.0]), + pytest.param("std", [1.0, 2.0, 4.0, 7.0]), pytest.param("sum", [1.0, 2.0]), pytest.param( "tan", [-dpnp.pi / 2, -dpnp.pi / 4, 0.0, dpnp.pi / 4, dpnp.pi / 2] @@ -394,6 +396,7 @@ def test_meshgrid(device_x, device_y): pytest.param("tanh", [-5.0, -3.5, 0.0, 3.5, 5.0]), pytest.param("trapz", [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]), pytest.param("trunc", [-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]), + pytest.param("var", [1.0, 2.0, 4.0, 7.0]), ], ) @pytest.mark.parametrize( diff --git a/tests/test_usm_type.py b/tests/test_usm_type.py index fa2931cc5054..4da04c2d6751 100644 --- a/tests/test_usm_type.py +++ b/tests/test_usm_type.py @@ -395,6 +395,7 @@ def test_meshgrid(usm_type_x, usm_type_y): pytest.param("log1p", [1.0e-10, 1.0, 2.0, 4.0, 7.0]), pytest.param("log2", [1.0, 2.0, 4.0, 7.0]), pytest.param("nanprod", [1.0, 2.0, dp.nan]), + pytest.param("nanvar", [1.0, 2.0, 4.0, dp.nan]), pytest.param("max", [1.0, 2.0, 4.0, 7.0]), pytest.param("mean", [1.0, 2.0, 4.0, 7.0]), pytest.param("min", [1.0, 2.0, 4.0, 7.0]), @@ -414,11 +415,13 @@ def test_meshgrid(usm_type_x, usm_type_y): ), pytest.param("sinh", [-5.0, -3.5, 0.0, 3.5, 5.0]), pytest.param("sqrt", [1.0, 3.0, 9.0]), + pytest.param("std", [1.0, 2.0, 4.0, 7.0]), pytest.param( "tan", [-dp.pi / 2, -dp.pi / 4, 0.0, dp.pi / 4, dp.pi / 2] ), pytest.param("tanh", [-5.0, -3.5, 0.0, 3.5, 5.0]), pytest.param("trunc", [-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]), + pytest.param("var", [1.0, 2.0, 4.0, 7.0]), ], ) @pytest.mark.parametrize("usm_type", list_of_usm_types, ids=list_of_usm_types) diff --git a/tests/third_party/cupy/math_tests/test_sumprod.py b/tests/third_party/cupy/math_tests/test_sumprod.py index 0728382a5b43..fc94b329665f 100644 --- a/tests/third_party/cupy/math_tests/test_sumprod.py +++ b/tests/third_party/cupy/math_tests/test_sumprod.py @@ -8,7 +8,6 @@ from tests.third_party.cupy import testing -@testing.gpu class TestSumprod(unittest.TestCase): def tearDown(self): # Free huge memory for slow test @@ -19,75 +18,52 @@ def tearDown(self): # Note: numpy.sum() always upcast integers to (u)int64 and float32 to # float64 for dtype=None. `np.sum` does that too for integers, but not for # float32, so we need to special-case it for these tests + def _get_dtype_kwargs(self, xp, dtype): + if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): + return {"dtype": numpy.float64} + return {} + @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_sum_all(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - dtype = numpy.float64 - return a.sum(dtype=dtype) - else: - return a.sum() + return a.sum(**self._get_dtype_kwargs(xp, dtype)) @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_sum_all_keepdims(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - dtype = numpy.float64 - return a.sum(dtype=dtype, keepdims=True) - else: - return a.sum(keepdims=True) + return a.sum(**self._get_dtype_kwargs(xp, dtype), keepdims=True) @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_external_sum_all(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - dtype = numpy.float64 - return xp.sum(a, dtype=dtype) - else: - return xp.sum(a) + return xp.sum(a, **self._get_dtype_kwargs(xp, dtype)) @testing.for_all_dtypes() @testing.numpy_cupy_allclose(rtol=1e-06) def test_sum_all2(self, xp, dtype): a = testing.shaped_arange((20, 30, 40), xp, dtype) - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - dtype = numpy.float64 - return a.sum(dtype=dtype) - else: - return a.sum() + return a.sum(**self._get_dtype_kwargs(xp, dtype)) @testing.for_all_dtypes() @testing.numpy_cupy_allclose(type_check=False) def test_sum_all_transposed(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype).transpose(2, 0, 1) - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - dtype = numpy.float64 - return a.sum(dtype=dtype) - else: - return a.sum() + return a.sum(**self._get_dtype_kwargs(xp, dtype)) @testing.for_all_dtypes() @testing.numpy_cupy_allclose(rtol=1e-06) def test_sum_all_transposed2(self, xp, dtype): a = testing.shaped_arange((20, 30, 40), xp, dtype).transpose(2, 0, 1) - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - dtype = numpy.float64 - return a.sum(dtype=dtype) - else: - return a.sum() + return a.sum(**self._get_dtype_kwargs(xp, dtype)) @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_sum_axis(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - dtype = numpy.float64 - return a.sum(dtype=dtype, axis=1) - else: - return a.sum(axis=1) + return a.sum(**self._get_dtype_kwargs(xp, dtype), axis=1) @testing.slow @testing.numpy_cupy_allclose() @@ -99,11 +75,7 @@ def test_sum_axis_huge(self, xp): @testing.numpy_cupy_allclose() def test_external_sum_axis(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - dtype = numpy.float64 - return xp.sum(a, axis=1, dtype=dtype) - else: - return xp.sum(a, axis=1) + return xp.sum(a, **self._get_dtype_kwargs(xp, dtype), axis=1) # float16 is omitted, since NumPy's sum on float16 arrays has more error # than CuPy's. @@ -111,71 +83,49 @@ def test_external_sum_axis(self, xp, dtype): @testing.numpy_cupy_allclose() def test_sum_axis2(self, xp, dtype): a = testing.shaped_arange((20, 30, 40), xp, dtype) - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - dtype = numpy.float64 - return a.sum(axis=1, dtype=dtype) - else: - return a.sum(axis=1) + return a.sum(**self._get_dtype_kwargs(xp, dtype), axis=1) @testing.for_all_dtypes() @testing.numpy_cupy_allclose(contiguous_check=False) def test_sum_axis_transposed(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype).transpose(2, 0, 1) - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - dtype = numpy.float64 - return a.sum(axis=1, dtype=dtype) - else: - return a.sum(axis=1) + return a.sum(**self._get_dtype_kwargs(xp, dtype), axis=1) @testing.for_all_dtypes() @testing.numpy_cupy_allclose(contiguous_check=False) def test_sum_axis_transposed2(self, xp, dtype): a = testing.shaped_arange((20, 30, 40), xp, dtype).transpose(2, 0, 1) - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - dtype = numpy.float64 - return a.sum(axis=1, dtype=dtype) - else: - return a.sum(axis=1) + return a.sum(**self._get_dtype_kwargs(xp, dtype), axis=1) @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_sum_axes(self, xp, dtype): a = testing.shaped_arange((2, 3, 4, 5), xp, dtype) - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - dtype = numpy.float64 - return a.sum(axis=(1, 3), dtype=dtype) - else: - return a.sum(axis=(1, 3)) + return a.sum(**self._get_dtype_kwargs(xp, dtype), axis=(1, 3)) @testing.for_all_dtypes() @testing.numpy_cupy_allclose(rtol=1e-4) def test_sum_axes2(self, xp, dtype): a = testing.shaped_arange((20, 30, 40, 50), xp, dtype) - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - dtype = numpy.float64 - return a.sum(axis=(1, 3), dtype=dtype) - else: - return a.sum(axis=(1, 3)) + return a.sum(**self._get_dtype_kwargs(xp, dtype), axis=(1, 3)) @testing.for_all_dtypes() @testing.numpy_cupy_allclose(rtol=1e-6) def test_sum_axes3(self, xp, dtype): a = testing.shaped_arange((2, 3, 4, 5), xp, dtype) - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - dtype = numpy.float64 - return a.sum(axis=(0, 2, 3), dtype=dtype) - else: - return a.sum(axis=(0, 2, 3)) + return a.sum(**self._get_dtype_kwargs(xp, dtype), axis=(0, 2, 3)) @testing.for_all_dtypes() @testing.numpy_cupy_allclose(rtol=1e-6) def test_sum_axes4(self, xp, dtype): a = testing.shaped_arange((20, 30, 40, 50), xp, dtype) - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - dtype = numpy.float64 - return a.sum(axis=(0, 2, 3), dtype=dtype) - else: - return a.sum(axis=(0, 2, 3)) + return a.sum(**self._get_dtype_kwargs(xp, dtype), axis=(0, 2, 3)) + + @testing.for_all_dtypes() + @testing.numpy_cupy_allclose() + def test_sum_empty_axis(self, xp, dtype): + a = testing.shaped_arange((2, 3, 4, 5), xp, dtype) + return a.sum(**self._get_dtype_kwargs(xp, dtype), axis=()) @testing.for_all_dtypes_combination(names=["src_dtype", "dst_dtype"]) @testing.numpy_cupy_allclose() @@ -193,11 +143,9 @@ def test_sum_keepdims_and_dtype(self, xp, src_dtype, dst_dtype): @testing.numpy_cupy_allclose() def test_sum_keepdims_multiple_axes(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - dtype = numpy.float64 - return a.sum(axis=(1, 2), keepdims=True, dtype=dtype) - else: - return a.sum(axis=(1, 2), keepdims=True) + return a.sum( + **self._get_dtype_kwargs(xp, dtype), axis=(1, 2), keepdims=True + ) @testing.for_all_dtypes() @testing.numpy_cupy_allclose() diff --git a/tests/third_party/cupy/statistics_tests/test_meanvar.py b/tests/third_party/cupy/statistics_tests/test_meanvar.py index 738057a99f49..de2eb22604fb 100644 --- a/tests/third_party/cupy/statistics_tests/test_meanvar.py +++ b/tests/third_party/cupy/statistics_tests/test_meanvar.py @@ -4,7 +4,7 @@ import pytest import dpnp as cupy -from tests.helper import has_support_aspect64 +from tests.helper import has_support_aspect16, has_support_aspect64 from tests.third_party.cupy import testing ignore_runtime_warnings = pytest.mark.filterwarnings( @@ -152,7 +152,6 @@ def test_returned(self, dtype): self.check_returned(a, axis=1, weights=w) -@testing.gpu class TestMeanVar(unittest.TestCase): @testing.for_all_dtypes() @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) @@ -197,51 +196,47 @@ def test_mean_all_complex_dtype(self, xp, dtype): return xp.mean(a, dtype=numpy.complex64) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_var_all(self, xp, dtype): a = testing.shaped_arange((2, 3), xp, dtype) return a.var() @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_external_var_all(self, xp, dtype): a = testing.shaped_arange((2, 3), xp, dtype) return xp.var(a) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_var_all_ddof(self, xp, dtype): a = testing.shaped_arange((2, 3), xp, dtype) return a.var(ddof=1) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_external_var_all_ddof(self, xp, dtype): a = testing.shaped_arange((2, 3), xp, dtype) return xp.var(a, ddof=1) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes() @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_var_axis(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) return a.var(axis=1) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes() @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_external_var_axis(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) return xp.var(a, axis=1) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes() @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_var_axis_ddof(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) return a.var(axis=1, ddof=1) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes() @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_external_var_axis_ddof(self, xp, dtype): @@ -249,51 +244,47 @@ def test_external_var_axis_ddof(self, xp, dtype): return xp.var(a, axis=1, ddof=1) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_std_all(self, xp, dtype): a = testing.shaped_arange((2, 3), xp, dtype) return a.std() @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_external_std_all(self, xp, dtype): a = testing.shaped_arange((2, 3), xp, dtype) return xp.std(a) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_std_all_ddof(self, xp, dtype): a = testing.shaped_arange((2, 3), xp, dtype) return a.std(ddof=1) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_external_std_all_ddof(self, xp, dtype): a = testing.shaped_arange((2, 3), xp, dtype) return xp.std(a, ddof=1) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes() @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_std_axis(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) return a.std(axis=1) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes() @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_external_std_axis(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) return xp.std(a, axis=1) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes() @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_std_axis_ddof(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) return a.std(axis=1, ddof=1) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes() @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_external_std_axis_ddof(self, xp, dtype): @@ -382,11 +373,10 @@ def test_nanmean_all_nan(self, xp): } ) ) -@testing.gpu class TestNanVarStd(unittest.TestCase): - @ignore_runtime_warnings - @testing.for_all_dtypes(no_float16=True, no_complex=True) - @testing.numpy_cupy_allclose(rtol=1e-6) + @pytest.mark.usefixtures("suppress_dof_numpy_warnings") + @testing.for_all_dtypes(no_float16=True) + @testing.numpy_cupy_allclose(rtol=1e-6, type_check=has_support_aspect64()) def test_nanvar(self, xp, dtype): a = testing.shaped_random(self.shape, xp, dtype=dtype) if a.dtype.kind not in "biu": @@ -395,8 +385,7 @@ def test_nanvar(self, xp, dtype): a, axis=self.axis, ddof=self.ddof, keepdims=self.keepdims ) - @ignore_runtime_warnings - @testing.for_all_dtypes(no_float16=True, no_complex=True) + @testing.for_all_dtypes(no_float16=True) @testing.numpy_cupy_allclose(rtol=1e-6) def test_nanstd(self, xp, dtype): a = testing.shaped_random(self.shape, xp, dtype=dtype) @@ -407,11 +396,10 @@ def test_nanstd(self, xp, dtype): ) -@testing.gpu class TestNanVarStdAdditional(unittest.TestCase): - @ignore_runtime_warnings - @testing.for_all_dtypes(no_float16=True, no_complex=True) - @testing.numpy_cupy_allclose(rtol=1e-6) + @pytest.mark.usefixtures("suppress_dof_numpy_warnings") + @testing.for_all_dtypes(no_float16=True) + @testing.numpy_cupy_allclose(rtol=1e-6, type_check=has_support_aspect64()) def test_nanvar_out(self, xp, dtype): a = testing.shaped_random((10, 20, 30), xp, dtype) z = xp.zeros((20, 30)) @@ -424,8 +412,8 @@ def test_nanvar_out(self, xp, dtype): return z @testing.slow - @testing.for_all_dtypes(no_float16=True, no_complex=True) - @testing.numpy_cupy_allclose(rtol=1e-6) + @testing.for_all_dtypes(no_float16=True) + @testing.numpy_cupy_allclose(rtol=1e-6, type_check=has_support_aspect64()) def test_nanvar_huge(self, xp, dtype): a = testing.shaped_random((1024, 512), xp, dtype) @@ -434,14 +422,16 @@ def test_nanvar_huge(self, xp, dtype): return xp.nanvar(a, axis=1) - @testing.numpy_cupy_allclose(rtol=1e-4) + @pytest.mark.skipif( + not has_support_aspect16(), reason="No fp16 support by device" + ) + @testing.numpy_cupy_allclose(rtol=1e-3) def test_nanvar_float16(self, xp): a = testing.shaped_arange((4, 5), xp, numpy.float16) a[0][0] = xp.nan return xp.nanvar(a, axis=0) - @ignore_runtime_warnings - @testing.for_all_dtypes(no_float16=True, no_complex=True) + @testing.for_all_dtypes(no_float16=True) @testing.numpy_cupy_allclose(rtol=1e-6) def test_nanstd_out(self, xp, dtype): a = testing.shaped_random((10, 20, 30), xp, dtype) @@ -455,7 +445,7 @@ def test_nanstd_out(self, xp, dtype): return z @testing.slow - @testing.for_all_dtypes(no_float16=True, no_complex=True) + @testing.for_all_dtypes(no_float16=True) @testing.numpy_cupy_allclose(rtol=1e-6) def test_nanstd_huge(self, xp, dtype): a = testing.shaped_random((1024, 512), xp, dtype) @@ -487,8 +477,11 @@ def test_nanstd_float16(self, xp): } ) ) -@pytest.mark.usefixtures("allow_fall_back_on_numpy") -@testing.gpu +@pytest.mark.usefixtures( + "suppress_invalid_numpy_warnings", + "suppress_dof_numpy_warnings", + "suppress_mean_empty_slice_numpy_warnings", +) class TestProductZeroLength(unittest.TestCase): @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) From d9c1ca1bcc0cd31fbb116d62c6e2e79a5396d0e4 Mon Sep 17 00:00:00 2001 From: Natalia Polina Date: Sat, 16 Dec 2023 07:01:48 -0800 Subject: [PATCH 34/38] Implement of dpnp.clip() (#1645) * Implement of dpnp.clip() * address comments * Increased tests coverage --------- Co-authored-by: Anton Volkov Co-authored-by: Anton <100830759+antonwolfy@users.noreply.github.com> --- dpnp/dpnp_array.py | 11 ++- dpnp/dpnp_iface_mathematical.py | 73 +++++++++++++++ tests/skipped_tests.tbl | 10 -- tests/skipped_tests_gpu.tbl | 11 +-- tests/test_dparray.py | 9 ++ tests/test_mathematical.py | 93 +++++++++++++++++++ tests/test_sycl_queue.py | 11 +++ tests/test_usm_type.py | 7 ++ .../third_party/cupy/math_tests/test_misc.py | 2 +- 9 files changed, 205 insertions(+), 22 deletions(-) diff --git a/dpnp/dpnp_array.py b/dpnp/dpnp_array.py index a5b060396ba4..314491fcff51 100644 --- a/dpnp/dpnp_array.py +++ b/dpnp/dpnp_array.py @@ -628,7 +628,16 @@ def choose(input, choices, out=None, mode="raise"): return dpnp.choose(input, choices, out, mode) - # 'clip', + def clip(self, min=None, max=None, out=None, **kwargs): + """ + Clip (limit) the values in an array. + + Refer to :obj:`dpnp.clip` for full documentation. + + """ + + return dpnp.clip(self, min, max, out=out, **kwargs) + # 'compress', def conj(self): diff --git a/dpnp/dpnp_iface_mathematical.py b/dpnp/dpnp_iface_mathematical.py index 53ae89d2a51e..35465a046ccf 100644 --- a/dpnp/dpnp_iface_mathematical.py +++ b/dpnp/dpnp_iface_mathematical.py @@ -84,6 +84,7 @@ "add", "around", "ceil", + "clip", "conj", "conjugate", "convolve", @@ -381,6 +382,78 @@ def ceil( ) +def clip(a, a_min, a_max, *, out=None, order="K", **kwargs): + """ + Clip (limit) the values in an array. + + For full documentation refer to :obj:`numpy.clip`. + + Parameters + ---------- + a : {dpnp_array, usm_ndarray} + Array containing elements to clip. + a_min, a_max : {dpnp_array, usm_ndarray, None} + Minimum and maximum value. If ``None``, clipping is not performed on the corresponding edge. + Only one of `a_min` and `a_max` may be ``None``. Both are broadcast against `a`. + out : {dpnp_array, usm_ndarray}, optional + The results will be placed in this array. It may be the input array for in-place clipping. + `out` must be of the right shape to hold the output. Its type is preserved. + order : {"C", "F", "A", "K", None}, optional + Memory layout of the newly output array, if parameter `out` is `None`. + If `order` is ``None``, the default value "K" will be used. + + Returns + ------- + out : dpnp_array + An array with the elements of `a`, but where values < `a_min` are replaced with `a_min`, + and those > `a_max` with `a_max`. + + Limitations + ----------- + Keyword argument `kwargs` is currently unsupported. + Otherwise ``NotImplementedError`` exception will be raised. + + Examples + -------- + >>> import dpnp as np + >>> a = np.arange(10) + >>> a + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> np.clip(a, 1, 8) + array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8]) + >>> np.clip(a, 8, 1) + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) + >>> np.clip(a, 3, 6, out=a) + array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6]) + >>> a + array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6]) + + >>> a = np.arange(10) + >>> a + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> min = np.array([3, 4, 1, 1, 1, 4, 4, 4, 4, 4]) + >>> np.clip(a, min, 8) + array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8]) + + """ + + if kwargs: + raise NotImplementedError(f"kwargs={kwargs} is currently not supported") + + if order is None: + order = "K" + + usm_arr = dpnp.get_usm_ndarray(a) + usm_min = None if a_min is None else dpnp.get_usm_ndarray_or_scalar(a_min) + usm_max = None if a_max is None else dpnp.get_usm_ndarray_or_scalar(a_max) + + usm_out = None if out is None else dpnp.get_usm_ndarray(out) + usm_res = dpt.clip(usm_arr, usm_min, usm_max, out=usm_out, order=order) + if out is not None and isinstance(out, dpnp_array): + return out + return dpnp_array._create_from_usm_ndarray(usm_res) + + def conjugate( x, /, diff --git a/tests/skipped_tests.tbl b/tests/skipped_tests.tbl index cca2992e6561..c3d1fd8b0779 100644 --- a/tests/skipped_tests.tbl +++ b/tests/skipped_tests.tbl @@ -412,8 +412,6 @@ tests/third_party/cupy/math_tests/test_floating.py::TestFloating::test_ldexp tests/third_party/cupy/math_tests/test_floating.py::TestFloating::test_nextafter_combination tests/third_party/cupy/math_tests/test_floating.py::TestFloating::test_nextafter_float -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_clip_min_max_none -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_external_clip4 tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_fmax_nan tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_fmin_nan tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_nan_to_num @@ -435,14 +433,6 @@ tests/third_party/cupy/math_tests/test_misc.py::TestConvolveInvalid_param_1_{mod tests/third_party/cupy/math_tests/test_misc.py::TestConvolveInvalid_param_2_{mode='full'}::test_convolve_empty tests/third_party/cupy/math_tests/test_misc.py::TestConvolveInvalid_param_2_{mode='full'}::test_convolve_ndim -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_clip1 -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_clip3 -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_clip_min_none -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_clip_max_none -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_external_clip1 -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_external_clip2 -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_external_clip3 -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_clip2 tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_fabs tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_fabs_negative tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_nan_to_num_scalar_nan diff --git a/tests/skipped_tests_gpu.tbl b/tests/skipped_tests_gpu.tbl index b8df4b5179d5..86805a4560b0 100644 --- a/tests/skipped_tests_gpu.tbl +++ b/tests/skipped_tests_gpu.tbl @@ -509,8 +509,6 @@ tests/third_party/cupy/math_tests/test_floating.py::TestFloating::test_ldexp tests/third_party/cupy/math_tests/test_floating.py::TestFloating::test_nextafter_combination tests/third_party/cupy/math_tests/test_floating.py::TestFloating::test_nextafter_float -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_clip_min_max_none -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_external_clip4 tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_fmax_nan tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_fmin_nan tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_nan_to_num @@ -532,14 +530,7 @@ tests/third_party/cupy/math_tests/test_misc.py::TestConvolveInvalid_param_1_{mod tests/third_party/cupy/math_tests/test_misc.py::TestConvolveInvalid_param_2_{mode='full'}::test_convolve_empty tests/third_party/cupy/math_tests/test_misc.py::TestConvolveInvalid_param_2_{mode='full'}::test_convolve_ndim -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_clip1 -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_clip3 -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_clip_min_none -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_clip_max_none -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_external_clip1 -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_external_clip2 -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_external_clip3 -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_clip2 + tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_fabs tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_fabs_negative tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_nan_to_num_scalar_nan diff --git a/tests/test_dparray.py b/tests/test_dparray.py index 3c57d44bf912..26446c855ac1 100644 --- a/tests/test_dparray.py +++ b/tests/test_dparray.py @@ -246,3 +246,12 @@ def test_repeat(): numpy_array = numpy.arange(4).repeat(3) dpnp_array = dpnp.arange(4).repeat(3) assert_array_equal(numpy_array, dpnp_array) + + +def test_clip(): + numpy_array = numpy.arange(10) + dpnp_array = dpnp.arange(10) + result = dpnp.clip(dpnp_array, 3, 7) + expected = numpy.clip(numpy_array, 3, 7) + + assert_array_equal(expected, result) diff --git a/tests/test_mathematical.py b/tests/test_mathematical.py index 51a8de6a3923..3cfbd88b4dc1 100644 --- a/tests/test_mathematical.py +++ b/tests/test_mathematical.py @@ -13,6 +13,7 @@ ) import dpnp +from dpnp.dpnp_array import dpnp_array from .helper import ( assert_dtype_allclose, @@ -27,6 +28,98 @@ ) +class TestClip: + @pytest.mark.parametrize( + "dtype", get_all_dtypes(no_bool=True, no_none=True, no_complex=True) + ) + @pytest.mark.parametrize("order", ["C", "F", "A", "K", None]) + def test_clip(self, dtype, order): + dp_a = dpnp.asarray([[1, 2, 8], [1, 6, 4], [9, 5, 1]], dtype=dtype) + np_a = dpnp.asnumpy(dp_a) + + result = dpnp.clip(dp_a, 2, 6, order=order) + expected = numpy.clip(np_a, 2, 6, order=order) + assert_allclose(expected, result) + assert expected.flags.c_contiguous == result.flags.c_contiguous + assert expected.flags.f_contiguous == result.flags.f_contiguous + + @pytest.mark.parametrize( + "dtype", get_all_dtypes(no_bool=True, no_none=True, no_complex=True) + ) + def test_clip_arrays(self, dtype): + dp_a = dpnp.asarray([1, 2, 8, 1, 6, 4, 1], dtype=dtype) + np_a = dpnp.asnumpy(dp_a) + + min_v = dpnp.asarray(2, dtype=dtype) + max_v = dpnp.asarray(6, dtype=dtype) + + result = dpnp.clip(dp_a, min_v, max_v) + expected = numpy.clip(np_a, min_v.asnumpy(), max_v.asnumpy()) + assert_allclose(expected, result) + + @pytest.mark.parametrize( + "dtype", get_all_dtypes(no_bool=True, no_none=True, no_complex=True) + ) + @pytest.mark.parametrize("in_dp", [dpnp, dpt]) + @pytest.mark.parametrize("out_dp", [dpnp, dpt]) + def test_clip_out(self, dtype, in_dp, out_dp): + np_a = numpy.array([[1, 2, 8], [1, 6, 4], [9, 5, 1]], dtype=dtype) + dp_a = in_dp.asarray(np_a) + + dp_out = out_dp.ones(dp_a.shape, dtype=dtype) + np_out = numpy.ones(np_a.shape, dtype=dtype) + + result = dpnp.clip(dp_a, 2, 6, out=dp_out) + expected = numpy.clip(np_a, 2, 6, out=np_out) + assert_allclose(expected, result) + assert_allclose(np_out, dp_out) + assert isinstance(result, dpnp_array) + + def test_input_nan(self): + np_a = numpy.array([-2.0, numpy.nan, 0.5, 3.0, 0.25, numpy.nan]) + dp_a = dpnp.array(np_a) + + result = dpnp.clip(dp_a, -1, 1) + expected = numpy.clip(np_a, -1, 1) + assert_array_equal(result, expected) + + # TODO: unmute the test once dpctl resolves the issue + @pytest.mark.skip(reason="dpctl-1489 issue") + @pytest.mark.parametrize( + "kwargs", + [ + {"min": numpy.nan}, + {"max": numpy.nan}, + {"min": numpy.nan, "max": numpy.nan}, + {"min": -2, "max": numpy.nan}, + {"min": numpy.nan, "max": 10}, + ], + ) + def test_nan_edges(self, kwargs): + np_a = numpy.arange(7) + dp_a = dpnp.asarray(np_a) + + result = dp_a.clip(**kwargs) + expected = np_a.clip(**kwargs) + assert_allclose(expected, result) + + @pytest.mark.parametrize( + "kwargs", + [ + {"casting": "same_kind"}, + {"dtype": "i8"}, + {"subok": True}, + {"where": True}, + ], + ) + def test_not_implemented_kwargs(self, kwargs): + a = dpnp.arange(8, dtype="i4") + + numpy.clip(a.asnumpy(), 1, 5, **kwargs) + with pytest.raises(NotImplementedError): + dpnp.clip(a, 1, 5, **kwargs) + + class TestDiff: @pytest.mark.parametrize("n", list(range(0, 3))) @pytest.mark.parametrize("dt", get_integer_dtypes()) diff --git a/tests/test_sycl_queue.py b/tests/test_sycl_queue.py index b1175703b017..26d047c103d4 100644 --- a/tests/test_sycl_queue.py +++ b/tests/test_sycl_queue.py @@ -1372,3 +1372,14 @@ def test_solve(device): assert_sycl_queue_equal(result_queue, dpnp_x.sycl_queue) assert_sycl_queue_equal(result_queue, dpnp_y.sycl_queue) + + +@pytest.mark.parametrize( + "device", + valid_devices, + ids=[device.filter_string for device in valid_devices], +) +def test_clip(device): + x = dpnp.arange(10, device=device) + y = dpnp.clip(x, 3, 7) + assert_sycl_queue_equal(x.sycl_queue, y.sycl_queue) diff --git a/tests/test_usm_type.py b/tests/test_usm_type.py index 4da04c2d6751..b889bab6010c 100644 --- a/tests/test_usm_type.py +++ b/tests/test_usm_type.py @@ -527,3 +527,10 @@ def test_solve(matrix, vector, usm_type_matrix, usm_type_vector): assert z.usm_type == du.get_coerced_usm_type( [usm_type_matrix, usm_type_vector] ) + + +@pytest.mark.parametrize("usm_type", list_of_usm_types, ids=list_of_usm_types) +def test_clip(usm_type): + x = dp.arange(10, usm_type=usm_type) + y = dp.clip(x, 2, 7) + assert x.usm_type == y.usm_type diff --git a/tests/third_party/cupy/math_tests/test_misc.py b/tests/third_party/cupy/math_tests/test_misc.py index c05432e36427..241457fbad90 100644 --- a/tests/third_party/cupy/math_tests/test_misc.py +++ b/tests/third_party/cupy/math_tests/test_misc.py @@ -519,7 +519,7 @@ def test_convolve_non_contiguous(self, xp, dtype, mode): return xp.convolve(a[::200], b[10::70], mode=mode) @testing.for_all_dtypes(no_float16=True) - @testing.numpy_cupy_allclose(rtol=1e-4) + @testing.numpy_cupy_allclose(rtol=5e-4) def test_convolve_large_non_contiguous(self, xp, dtype, mode): a = testing.shaped_arange((10000,), xp, dtype) b = testing.shaped_arange((100,), xp, dtype) From 680d9038b2d7d478d6a13b311688ae6db1b1aaa0 Mon Sep 17 00:00:00 2001 From: vtavana <120411540+vtavana@users.noreply.github.com> Date: Wed, 20 Dec 2023 11:14:28 -0600 Subject: [PATCH 35/38] implement `dpnp.nanargmax`, `dpnp.nanargmin`, `dpnp.nanmax`, and `dpnp.nanmin` (#1646) * implement nanargmax, nanargmin, nanmax, nanmin * address comments * suppress overflow warning --- dpnp/dpnp_iface_mathematical.py | 12 +- dpnp/dpnp_iface_nanfunctions.py | 364 +++++++++++++++++- dpnp/dpnp_iface_searching.py | 46 ++- dpnp/dpnp_iface_statistics.py | 52 ++- tests/conftest.py | 7 + tests/skipped_tests.tbl | 90 ++--- tests/skipped_tests_gpu.tbl | 90 ++--- tests/test_mathematical.py | 51 +-- tests/test_search.py | 56 ++- tests/test_statistics.py | 66 +++- tests/test_sycl_queue.py | 4 + tests/test_usm_type.py | 4 + .../cupy/sorting_tests/test_search.py | 46 ++- .../cupy/statistics_tests/test_order.py | 92 ++--- 14 files changed, 704 insertions(+), 276 deletions(-) diff --git a/dpnp/dpnp_iface_mathematical.py b/dpnp/dpnp_iface_mathematical.py index 35465a046ccf..53e4e619091e 100644 --- a/dpnp/dpnp_iface_mathematical.py +++ b/dpnp/dpnp_iface_mathematical.py @@ -133,8 +133,8 @@ def _append_to_diff_array(a, axis, combined, values): Scalar value (including case with 0d array) is expanded to an array with length=1 in the direction of axis and the shape of the input array `a` - in along all other axes. - Note, if `values` is a scalar. then it is converted to 0d array allocating + along all other axes. + Note, if `values` is a scalar, then it is converted to 0d array allocating on the same SYCL queue as the input array `a` and with the same USM type. """ @@ -1132,7 +1132,9 @@ def fmax(x1, x2, /, out=None, *, where=True, dtype=None, subok=True, **kwargs): See Also -------- :obj:`dpnp.maximum` : Element-wise maximum of array elements, propagates NaNs. - :obj:`dpnp.fmin` : Element-wise minimum of array elements, ignore NaNs. + :obj:`dpnp.fmin` : Element-wise minimum of array elements, ignores NaNs. + :obj:`dpnp.max` : The maximum value of an array along a given axis, propagates NaNs.. + :obj:`dpnp.nanmax` : The maximum value of an array along a given axis, ignores NaNs. :obj:`dpnp.minimum` : Element-wise minimum of array elements, propagates NaNs. :obj:`dpnp.fmod` : Calculate the element-wise remainder of division. @@ -1237,7 +1239,9 @@ def fmin(x1, x2, /, out=None, *, where=True, dtype=None, subok=True, **kwargs): See Also -------- :obj:`dpnp.minimum` : Element-wise minimum of array elements, propagates NaNs. - :obj:`dpnp.fmax` : Element-wise maximum of array elements, ignore NaNs. + :obj:`dpnp.fmax` : Element-wise maximum of array elements, ignores NaNs. + :obj:`dpnp.min` : The minimum value of an array along a given axis, propagates NaNs. + :obj:`dpnp.nanmin` : The minimum value of an array along a given axis, ignores NaNs. :obj:`dpnp.maximum` : Element-wise maximum of array elements, propagates NaNs. :obj:`dpnp.fmod` : Calculate the element-wise remainder of division. diff --git a/dpnp/dpnp_iface_nanfunctions.py b/dpnp/dpnp_iface_nanfunctions.py index 966a2c9a5781..a16583fa0c9c 100644 --- a/dpnp/dpnp_iface_nanfunctions.py +++ b/dpnp/dpnp_iface_nanfunctions.py @@ -37,6 +37,8 @@ """ +import warnings + import numpy import dpnp @@ -45,8 +47,12 @@ from .dpnp_utils import * __all__ = [ + "nanargmax", + "nanargmin", "nancumprod", "nancumsum", + "nanmax", + "nanmin", "nanprod", "nansum", "nanvar", @@ -94,6 +100,142 @@ def _replace_nan(a, val): return a, mask +def nanargmax(a, axis=None, out=None, *, keepdims=False): + """ + Returns the indices of the maximum values along an axis ignoring NaNs. + + For full documentation refer to :obj:`numpy.nanargmax`. + + Parameters + ---------- + a : {dpnp_array, usm_ndarray} + Input array. + axis : int, optional + Axis along which to search. If ``None``, the function must return + the index of the maximum value of the flattened array. + Default: ``None``. + out : {dpnp_array, usm_ndarray}, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + keepdims : bool + If ``True``, the reduced axes (dimensions) must be included in the + result as singleton dimensions, and, accordingly, the result must be + compatible with the input array. Otherwise, if ``False``, the reduced + axes (dimensions) must not be included in the result. + Default: ``False``. + + Returns + ------- + out : dpnp.ndarray + If `axis` is ``None``, a zero-dimensional array containing the index of + the first occurrence of the maximum value ignoring NaNs; otherwise, a non-zero-dimensional + array containing the indices of the minimum values ignoring NaNs. The returned array + must have the default array index data type. + For all-NaN slices ``ValueError`` is raised. + Warning: the results cannot be trusted if a slice contains only NaNs and -Infs. + + Limitations + ----------- + Input array is only supported as either :class:`dpnp.ndarray` + or :class:`dpctl.tensor.usm_ndarray`. + Input array data types are limited by supported DPNP :ref:`Data types`. + + See Also + -------- + :obj:`dpnp.nanargmin` : Returns the indices of the minimum values along an axis, igonring NaNs. + :obj:`dpnp.argmax` : Returns the indices of the maximum values along an axis. + + Examples + -------- + >>> import dpnp as np + >>> a = np.array([[np.nan, 4], [2, 3]]) + >>> np.argmax(a) + array(0) + >>> np.nanargmax(a) + array(1) + >>> np.nanargmax(a, axis=0) + array([1, 0]) + >>> np.nanargmax(a, axis=1) + array([1, 1]) + + """ + + a, mask = _replace_nan(a, -dpnp.inf) + if mask is not None: + mask = dpnp.all(mask, axis=axis) + if dpnp.any(mask): + raise ValueError("All-NaN slice encountered") + return dpnp.argmax(a, axis=axis, out=out, keepdims=keepdims) + + +def nanargmin(a, axis=None, out=None, *, keepdims=False): + """ + Returns the indices of the minimum values along an axis ignoring NaNs. + + For full documentation refer to :obj:`numpy.nanargmin`. + + Parameters + ---------- + a : {dpnp_array, usm_ndarray} + Input array. + axis : int, optional + Axis along which to search. If ``None``, the function must return + the index of the minimum value of the flattened array. + Default: ``None``. + out : {dpnp_array, usm_ndarray}, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + keepdims : bool + If ``True``, the reduced axes (dimensions) must be included in the + result as singleton dimensions, and, accordingly, the result must be + compatible with the input array. Otherwise, if ``False``, the reduced + axes (dimensions) must not be included in the result. + Default: ``False``. + + Returns + ------- + out : dpnp.ndarray + If `axis` is ``None``, a zero-dimensional array containing the index of + the first occurrence of the minimum value ignoring NaNs; otherwise, a non-zero-dimensional + array containing the indices of the minimum values ignoring NaNs. The returned array + must have the default array index data type. + For all-NaN slices ``ValueError`` is raised. + Warning: the results cannot be trusted if a slice contains only NaNs and Infs. + + Limitations + ----------- + Input and output arrays are only supported as either :class:`dpnp.ndarray` + or :class:`dpctl.tensor.usm_ndarray`. + Input array data types are limited by supported DPNP :ref:`Data types`. + + See Also + -------- + :obj:`dpnp.nanargmax` : Returns the indices of the maximum values along an axis, igonring NaNs. + :obj:`dpnp.argmin` : Returns the indices of the minimum values along an axis. + + Examples + -------- + >>> import dpnp as np + >>> a = np.array([[np.nan, 4], [2, 3]]) + >>> np.argmin(a) + array(0) + >>> np.nanargmin(a) + array(2) + >>> np.nanargmin(a, axis=0) + array([1, 1]) + >>> np.nanargmin(a, axis=1) + array([1, 0]) + + """ + + a, mask = _replace_nan(a, dpnp.inf) + if mask is not None: + mask = dpnp.all(mask, axis=axis) + if dpnp.any(mask): + raise ValueError("All-NaN slice encountered") + return dpnp.argmin(a, axis=axis, out=out, keepdims=keepdims) + + def nancumprod(x1, **kwargs): """ Return the cumulative product of array elements over a given axis treating Not a Numbers (NaNs) as one. @@ -168,36 +310,194 @@ def nancumsum(x1, **kwargs): return call_origin(numpy.nancumsum, x1, **kwargs) -def nansum(x1, **kwargs): +def nanmax(a, axis=None, out=None, keepdims=False, initial=None, where=True): """ - Calculate sum() function treating 'Not a Numbers' (NaN) as zero. + Return the maximum of an array or maximum along an axis, ignoring any NaNs. - For full documentation refer to :obj:`numpy.nansum`. + For full documentation refer to :obj:`numpy.nanmax`. + + Parameters + ---------- + a : {dpnp_array, usm_ndarray} + Input array. + axis : int or tuple of ints, optional + Axis or axes along which maximum values must be computed. By default, + the maximum value must be computed over the entire array. If a tuple of integers, + maximum values must be computed over multiple axes. + Default: ``None``. + out : {dpnp_array, usm_ndarray}, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + keepdims : bool + If ``True``, the reduced axes (dimensions) must be included in the + result as singleton dimensions, and, accordingly, the result must be + compatible with the input array. Otherwise, if ``False``, the reduced + axes (dimensions) must not be included in the result. + Default: ``False``. + + Returns + ------- + out : dpnp.ndarray + If the maximum value was computed over the entire array, a zero-dimensional array + containing the maximum value ignoring NaNs; otherwise, a non-zero-dimensional array + containing the maximum values ignoring NaNs. The returned array must have + the same data type as `a`. + When all-NaN slices are encountered a ``RuntimeWarning`` is raised and NaN is + returned for that slice. Limitations ----------- - Parameter `x1` is supported as :class:`dpnp.ndarray`. - Keyword argument `kwargs` is currently unsupported. - Otherwise the function will be executed sequentially on CPU. + Input array is only supported as either :class:`dpnp.ndarray` + or :class:`dpctl.tensor.usm_ndarray`. + Parameters `where`, and `initial` are only supported with their default values. + Otherwise ``NotImplementedError`` exception will be raised. Input array data types are limited by supported DPNP :ref:`Data types`. + See Also + -------- + :obj:`dpnp.nanmin` : The minimum value of an array along a given axis, ignoring any NaNs. + :obj:`dpnp.max` : The maximum value of an array along a given axis, propagating any NaNs. + :obj:`dpnp.fmax` : Element-wise maximum of two arrays, ignoring any NaNs. + :obj:`dpnp.maximum` : Element-wise maximum of two arrays, propagating any NaNs. + :obj:`dpnp.isnan` : Shows which elements are Not a Number (NaN). + :obj:`dpnp.isfinite` : Shows which elements are neither NaN nor infinity. + Examples -------- >>> import dpnp as np - >>> np.nansum(np.array([1, 2])) - 3 - >>> np.nansum(np.array([[1, 2], [3, 4]])) - 10 + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nanmax(a) + array(3.) + >>> np.nanmax(a, axis=0) + array([3., 2.]) + >>> np.nanmax(a, axis=1) + array([2., 3.]) + + When positive infinity and negative infinity are present: + + >>> np.nanmax(np.array([1, 2, np.nan, np.NINF])) + array(2.) + >>> np.nanmax(np.array([1, 2, np.nan, np.inf])) + array(inf) """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) - if x1_desc and not kwargs: - result_obj = dpnp_nansum(x1_desc).get_pyobj() - result = dpnp.convert_single_elem_array_to_scalar(result_obj) - return result + if initial is not None: + raise NotImplementedError( + "initial keyword argument is only supported with its default value." + ) + elif where is not True: + raise NotImplementedError( + "where keyword argument is only supported with its default value." + ) + else: + a, mask = _replace_nan(a, -dpnp.inf) + res = dpnp.max(a, axis=axis, out=out, keepdims=keepdims) + if mask is None: + return res + else: + mask = dpnp.all(mask, axis=axis) + if dpnp.any(mask): + dpnp.copyto(res, dpnp.nan, where=mask) + warnings.warn( + "All-NaN slice encountered", RuntimeWarning, stacklevel=2 + ) + return res - return call_origin(numpy.nansum, x1, **kwargs) + +def nanmin(a, axis=None, out=None, keepdims=False, initial=None, where=True): + """ + Return the minimum of an array or minimum along an axis, ignoring any NaNs. + + For full documentation refer to :obj:`numpy.nanmin`. + + Parameters + ---------- + a : {dpnp_array, usm_ndarray} + Input array. + axis : int or tuple of ints, optional + Axis or axes along which minimum values must be computed. By default, + the minimum value must be computed over the entire array. If a tuple of integers, + minimum values must be computed over multiple axes. + Default: ``None``. + out : {dpnp_array, usm_ndarray}, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + keepdims : bool, optional + If ``True``, the reduced axes (dimensions) must be included in the + result as singleton dimensions, and, accordingly, the result must be + compatible with the input array. Otherwise, if ``False``, the reduced + axes (dimensions) must not be included in the result. + Default: ``False``. + + Returns + ------- + out : dpnp.ndarray + If the minimum value was computed over the entire array, a zero-dimensional array + containing the minimum value ignoring NaNs; otherwise, a non-zero-dimensional array + containing the minimum values ignoring NaNs. The returned array must have + the same data type as `a`. + When all-NaN slices are encountered a ``RuntimeWarning`` is raised and NaN is + returned for that slice. + + Limitations + ----------- + Input array is only supported as either :class:`dpnp.ndarray` + or :class:`dpctl.tensor.usm_ndarray`. + Parameters `where`, and `initial` are only supported with their default values. + Otherwise ``NotImplementedError`` exception will be raised. + Input array data types are limited by supported DPNP :ref:`Data types`. + + See Also + -------- + :obj:`dpnp.nanmax` : The maximum value of an array along a given axis, ignoring any NaNs. + :obj:`dpnp.min` : The minimum value of an array along a given axis, propagating any NaNs. + :obj:`dpnp.fmin` : Element-wise minimum of two arrays, ignoring any NaNs. + :obj:`dpnp.minimum` : Element-wise minimum of two arrays, propagating any NaNs. + :obj:`dpnp.isnan` : Shows which elements are Not a Number (NaN). + :obj:`dpnp.isfinite` : Shows which elements are neither NaN nor infinity. + + Examples + -------- + >>> import dpnp as np + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nanmin(a) + array(1.) + >>> np.nanmin(a, axis=0) + array([1., 2.]) + >>> np.nanmin(a, axis=1) + array([1., 3.]) + + When positive infinity and negative infinity are present: + + >>> np.nanmin(np.array([1, 2, np.nan, np.inf])) + array(1.) + >>> np.nanmin(np.array([1, 2, np.nan, np.NINF])) + array(-inf) + + """ + + if initial is not None: + raise NotImplementedError( + "initial keyword argument is only supported with its default value." + ) + elif where is not True: + raise NotImplementedError( + "where keyword argument is only supported with its default value." + ) + else: + a, mask = _replace_nan(a, +dpnp.inf) + res = dpnp.min(a, axis=axis, out=out, keepdims=keepdims) + if mask is None: + return res + else: + mask = dpnp.all(mask, axis=axis) + if dpnp.any(mask): + dpnp.copyto(res, dpnp.nan, where=mask) + warnings.warn( + "All-NaN slice encountered", RuntimeWarning, stacklevel=2 + ) + return res def nanprod( @@ -261,6 +561,38 @@ def nanprod( ) +def nansum(x1, **kwargs): + """ + Calculate sum() function treating 'Not a Numbers' (NaN) as zero. + + For full documentation refer to :obj:`numpy.nansum`. + + Limitations + ----------- + Parameter `x1` is supported as :class:`dpnp.ndarray`. + Keyword argument `kwargs` is currently unsupported. + Otherwise the function will be executed sequentially on CPU. + Input array data types are limited by supported DPNP :ref:`Data types`. + + Examples + -------- + >>> import dpnp as np + >>> np.nansum(np.array([1, 2])) + 3 + >>> np.nansum(np.array([[1, 2], [3, 4]])) + 10 + + """ + + x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) + if x1_desc and not kwargs: + result_obj = dpnp_nansum(x1_desc).get_pyobj() + result = dpnp.convert_single_elem_array_to_scalar(result_obj) + return result + + return call_origin(numpy.nansum, x1, **kwargs) + + def nanvar( a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True ): diff --git a/dpnp/dpnp_iface_searching.py b/dpnp/dpnp_iface_searching.py index 0210535e1697..5187a280d5ce 100644 --- a/dpnp/dpnp_iface_searching.py +++ b/dpnp/dpnp_iface_searching.py @@ -60,27 +60,31 @@ def argmax(a, axis=None, out=None, *, keepdims=False): a : {dpnp_array, usm_ndarray} Input array. axis : int, optional - By default, the index is into the flattened array, otherwise - along the specified axis. + Axis along which to search. If ``None``, the function must return + the index of the maximum value of the flattened array. + Default: ``None``. out : {dpnp_array, usm_ndarray}, optional If provided, the result will be inserted into this array. It should be of the appropriate shape and dtype. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the array. + keepdims : bool + If ``True``, the reduced axes (dimensions) must be included in the + result as singleton dimensions, and, accordingly, the result must be + compatible with the input array. Otherwise, if ``False``, the reduced + axes (dimensions) must not be included in the result. + Default: ``False``. Returns ------- out : dpnp.ndarray - Indices of maximum value of `a`. It has the same shape as `a.shape` - with the dimension along `axis` removed. If `keepdims` is set to True, - then the size of `axis` will be 1 with the resulting array having same - shape as `a.shape`. + If `axis` is ``None``, a zero-dimensional array containing the index of + the first occurrence of the maximum value; otherwise, a non-zero-dimensional + array containing the indices of the minimum values. The returned array + must have the default array index data type. See Also -------- :obj:`dpnp.ndarray.argmax` : Equivalent function. + :obj:`dpnp.nanargmax` : Returns the indices of the maximum values along an axis, igonring NaNs. :obj:`dpnp.argmin` : Returns the indices of the minimum values along an axis. :obj:`dpnp.max` : The maximum value along a given axis. :obj:`dpnp.unravel_index` : Convert a flat index into an index tuple. @@ -140,27 +144,31 @@ def argmin(a, axis=None, out=None, *, keepdims=False): a : {dpnp_array, usm_ndarray} Input array. axis : int, optional - By default, the index is into the flattened array, otherwise - along the specified axis. + Axis along which to search. If ``None``, the function must return + the index of the minimum value of the flattened array. + Default: ``None``. out : {dpnp_array, usm_ndarray}, optional If provided, the result will be inserted into this array. It should be of the appropriate shape and dtype. keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the array. + If ``True``, the reduced axes (dimensions) must be included in the + result as singleton dimensions, and, accordingly, the result must be + compatible with the input array. Otherwise, if ``False``, the reduced + axes (dimensions) must not be included in the result. + Default: ``False``. Returns ------- out : dpnp.ndarray - Indices of minimum value of `a`. It has the same shape as `a.shape` - with the dimension along `axis` removed. If `keepdims` is set to True, - then the size of `axis` will be 1 with the resulting array having same - shape as `a.shape`. + If `axis` is ``None``, a zero-dimensional array containing the index of + the first occurrence of the minimum value; otherwise, a non-zero-dimensional + array containing the indices of the minimum values. The returned array + must have the default array index data type. See Also -------- :obj:`dpnp.ndarray.argmin` : Equivalent function. + :obj:`dpnp.nanargmin` : Returns the indices of the minimum values along an axis, igonring NaNs. :obj:`dpnp.argmax` : Returns the indices of the maximum values along an axis. :obj:`dpnp.min` : The minimum value along a given axis. :obj:`dpnp.unravel_index` : Convert a flat index into an index tuple. diff --git a/dpnp/dpnp_iface_statistics.py b/dpnp/dpnp_iface_statistics.py index 5d00154659c2..4e4201c97cdb 100644 --- a/dpnp/dpnp_iface_statistics.py +++ b/dpnp/dpnp_iface_statistics.py @@ -374,14 +374,36 @@ def max(a, axis=None, out=None, keepdims=False, initial=None, where=True): For full documentation refer to :obj:`numpy.max`. + Parameters + ---------- + a : {dpnp_array, usm_ndarray} + Input array. + axis : int or tuple of ints, optional + Axis or axes along which maximum values must be computed. By default, + the maximum value must be computed over the entire array. If a tuple of integers, + maximum values must be computed over multiple axes. + Default: ``None``. + out : {dpnp_array, usm_ndarray}, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + keepdims : bool + If ``True``, the reduced axes (dimensions) must be included in the + result as singleton dimensions, and, accordingly, the result must be + compatible with the input array. Otherwise, if ``False``, the reduced + axes (dimensions) must not be included in the result. + Default: ``False``. + Returns ------- out : dpnp.ndarray - Maximum of `a`. + If the maximum value was computed over the entire array, a zero-dimensional array + containing the maximum value; otherwise, a non-zero-dimensional array + containing the maximum values. The returned array must have + the same data type as `a`. Limitations ----------- - Input and output arrays are only supported as either :class:`dpnp.ndarray` + Input array is only supported as either :class:`dpnp.ndarray` or :class:`dpctl.tensor.usm_ndarray`. Parameters `where`, and `initial` are only supported with their default values. Otherwise ``NotImplementedError`` exception will be raised. @@ -549,14 +571,36 @@ def min(a, axis=None, out=None, keepdims=False, initial=None, where=True): For full documentation refer to :obj:`numpy.min`. + Parameters + ---------- + a : {dpnp_array, usm_ndarray} + Input array. + axis : int or tuple of ints, optional + Axis or axes along which minimum values must be computed. By default, + the minimum value must be computed over the entire array. If a tuple of integers, + minimum values must be computed over multiple axes. + Default: ``None``. + out : {dpnp_array, usm_ndarray}, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + keepdims : bool, optional + If ``True``, the reduced axes (dimensions) must be included in the + result as singleton dimensions, and, accordingly, the result must be + compatible with the input array. Otherwise, if ``False``, the reduced + axes (dimensions) must not be included in the result. + Default: ``False``. + Returns ------- out : dpnp.ndarray - Minimum of `a`. + If the minimum value was computed over the entire array, a zero-dimensional array + containing the minimum value; otherwise, a non-zero-dimensional array + containing the minimum values. The returned array must have + the same data type as `a`. Limitations ----------- - Input and output arrays are only supported as either :class:`dpnp.ndarray` + Input array is only supported as either :class:`dpnp.ndarray` or :class:`dpctl.tensor.usm_ndarray`. Parameters `where`, and `initial` are only supported with their default values. Otherwise ``NotImplementedError`` exception will be raised. diff --git a/tests/conftest.py b/tests/conftest.py index 231af2e34fa9..6b9d01691a31 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -137,6 +137,13 @@ def suppress_mean_empty_slice_numpy_warnings(): yield +@pytest.fixture +def suppress_overflow_encountered_in_cast_numpy_warnings(): + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", r"overflow encountered in cast") + yield + + @pytest.fixture def suppress_divide_invalid_numpy_warnings( suppress_divide_numpy_warnings, suppress_invalid_numpy_warnings diff --git a/tests/skipped_tests.tbl b/tests/skipped_tests.tbl index c3d1fd8b0779..15572947eee2 100644 --- a/tests/skipped_tests.tbl +++ b/tests/skipped_tests.tbl @@ -768,34 +768,6 @@ tests/third_party/cupy/sorting_tests/test_search.py::TestFlatNonzero_param_1_{ar tests/third_party/cupy/sorting_tests/test_search.py::TestFlatNonzero_param_2_{array=array([], dtype=float64)}::test_flatnonzero tests/third_party/cupy/sorting_tests/test_search.py::TestFlatNonzero_param_3_{array=array([], shape=(0, 2), dtype=float64)}::test_flatnonzero tests/third_party/cupy/sorting_tests/test_search.py::TestFlatNonzero_param_4_{array=array([], shape=(0, 2, 0), dtype=float64)}::test_flatnonzero -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMax::test_nanargmax_all -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMax::test_nanargmax_axis0 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMax::test_nanargmax_axis1 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMax::test_nanargmax_axis2 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMax::test_nanargmax_axis_large -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMax::test_nanargmax_nan -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMax::test_nanargmax_nan2 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMax::test_nanargmax_nan3 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMax::test_nanargmax_nan4 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMax::test_nanargmax_nan5 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMax::test_nanargmax_tie -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMax::test_nanargmax_zero_size -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMax::test_nanargmax_zero_size_axis0 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMax::test_nanargmax_zero_size_axis1 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMin::test_nanargmin_all -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMin::test_nanargmin_axis0 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMin::test_nanargmin_axis1 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMin::test_nanargmin_axis2 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMin::test_nanargmin_axis_large -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMin::test_nanargmin_nan -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMin::test_nanargmin_nan2 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMin::test_nanargmin_nan3 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMin::test_nanargmin_nan4 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMin::test_nanargmin_nan5 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMin::test_nanargmin_tie -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMin::test_nanargmin_zero_size -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMin::test_nanargmin_zero_size_axis0 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMin::test_nanargmin_zero_size_axis1 tests/third_party/cupy/sorting_tests/test_search.py::TestNonzeroZeroDimension_param_0_{array=array(0)}::test_nonzero tests/third_party/cupy/sorting_tests/test_search.py::TestNonzeroZeroDimension_param_1_{array=array(1)}::test_nonzero tests/third_party/cupy/sorting_tests/test_sort.py::TestArgpartition_param_0_{external=False}::test_argpartition_axis @@ -1038,29 +1010,43 @@ tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_6_{ tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_7_{axis=None, ddof=1, keepdims=False, shape=(4, 3, 5)}::test_nanstd tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_8_{axis=0, ddof=0, keepdims=True, shape=(3, 4)}::test_nanstd tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_9_{axis=0, ddof=0, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmax_all -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmax_all_nan -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmax_axis0 -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmax_axis1 -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmax_axis2 -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmax_axis_large -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmax_nan -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmin_all -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmin_all_nan -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmin_axis0 -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmin_axis1 -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmin_axis2 -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmin_axis_large -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmin_nan -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_bad_q -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_defaults -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_keepdims -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_neg_axis -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_no_axis -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_out -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_q_list -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_scalar_q -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_tuple_axis -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_uxpected_interpolation + +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_bad_q[linear] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_bad_q[lower] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_bad_q[higher] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_bad_q[midpoint] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_defaults[linear] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_defaults[lower] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_defaults[higher] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_defaults[midpoint] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_keepdims[linear] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_keepdims[lower] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_keepdims[higher] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_keepdims[midpoint] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_neg_axis[linear] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_neg_axis[lower] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_neg_axis[higher] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_neg_axis[midpoint] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_no_axis[linear] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_no_axis[lower] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_no_axis[higher] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_no_axis[midpoint] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_out[linear] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_out[lower] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_out[higher] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_out[midpoint] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_q_list[linear] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_q_list[lower] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_q_list[higher] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_q_list[midpoint] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_scalar_q[linear] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_scalar_q[lower] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_scalar_q[higher] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_scalar_q[midpoint] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_tuple_axis[linear] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_tuple_axis[lower] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_tuple_axis[higher] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_tuple_axis[midpoint] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_unxpected_method tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_ptp_all_nan tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_ptp_nan diff --git a/tests/skipped_tests_gpu.tbl b/tests/skipped_tests_gpu.tbl index 86805a4560b0..dae91d41ac3f 100644 --- a/tests/skipped_tests_gpu.tbl +++ b/tests/skipped_tests_gpu.tbl @@ -830,34 +830,6 @@ tests/third_party/cupy/sorting_tests/test_search.py::TestFlatNonzero_param_1_{ar tests/third_party/cupy/sorting_tests/test_search.py::TestFlatNonzero_param_2_{array=array([], dtype=float64)}::test_flatnonzero tests/third_party/cupy/sorting_tests/test_search.py::TestFlatNonzero_param_3_{array=array([], shape=(0, 2), dtype=float64)}::test_flatnonzero tests/third_party/cupy/sorting_tests/test_search.py::TestFlatNonzero_param_4_{array=array([], shape=(0, 2, 0), dtype=float64)}::test_flatnonzero -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMax::test_nanargmax_all -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMax::test_nanargmax_axis0 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMax::test_nanargmax_axis1 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMax::test_nanargmax_axis2 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMax::test_nanargmax_axis_large -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMax::test_nanargmax_nan -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMax::test_nanargmax_nan2 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMax::test_nanargmax_nan3 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMax::test_nanargmax_nan4 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMax::test_nanargmax_nan5 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMax::test_nanargmax_tie -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMax::test_nanargmax_zero_size -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMax::test_nanargmax_zero_size_axis0 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMax::test_nanargmax_zero_size_axis1 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMin::test_nanargmin_all -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMin::test_nanargmin_axis0 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMin::test_nanargmin_axis1 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMin::test_nanargmin_axis2 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMin::test_nanargmin_axis_large -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMin::test_nanargmin_nan -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMin::test_nanargmin_nan2 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMin::test_nanargmin_nan3 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMin::test_nanargmin_nan4 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMin::test_nanargmin_nan5 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMin::test_nanargmin_tie -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMin::test_nanargmin_zero_size -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMin::test_nanargmin_zero_size_axis0 -tests/third_party/cupy/sorting_tests/test_search.py::TestNanArgMin::test_nanargmin_zero_size_axis1 tests/third_party/cupy/sorting_tests/test_search.py::TestNonzeroZeroDimension_param_0_{array=array(0)}::test_nonzero tests/third_party/cupy/sorting_tests/test_search.py::TestNonzeroZeroDimension_param_1_{array=array(1)}::test_nonzero tests/third_party/cupy/sorting_tests/test_sort.py::TestArgpartition_param_0_{external=False}::test_argpartition_axis @@ -1100,29 +1072,43 @@ tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_6_{ tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_7_{axis=None, ddof=1, keepdims=False, shape=(4, 3, 5)}::test_nanstd tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_8_{axis=0, ddof=0, keepdims=True, shape=(3, 4)}::test_nanstd tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_9_{axis=0, ddof=0, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmax_all -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmax_all_nan -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmax_axis0 -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmax_axis1 -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmax_axis2 -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmax_axis_large -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmax_nan -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmin_all -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmin_all_nan -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmin_axis0 -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmin_axis1 -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmin_axis2 -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmin_axis_large -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmin_nan -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_bad_q -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_defaults -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_keepdims -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_neg_axis -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_no_axis -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_out -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_q_list -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_scalar_q -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_tuple_axis -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_uxpected_interpolation + +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_bad_q[linear] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_bad_q[lower] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_bad_q[higher] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_bad_q[midpoint] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_defaults[linear] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_defaults[lower] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_defaults[higher] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_defaults[midpoint] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_keepdims[linear] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_keepdims[lower] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_keepdims[higher] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_keepdims[midpoint] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_neg_axis[linear] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_neg_axis[lower] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_neg_axis[higher] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_neg_axis[midpoint] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_no_axis[linear] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_no_axis[lower] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_no_axis[higher] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_no_axis[midpoint] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_out[linear] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_out[lower] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_out[higher] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_out[midpoint] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_q_list[linear] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_q_list[lower] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_q_list[higher] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_q_list[midpoint] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_scalar_q[linear] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_scalar_q[lower] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_scalar_q[higher] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_scalar_q[midpoint] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_tuple_axis[linear] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_tuple_axis[lower] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_tuple_axis[higher] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_tuple_axis[midpoint] +tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_unxpected_method tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_ptp_all_nan tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_ptp_nan diff --git a/tests/test_mathematical.py b/tests/test_mathematical.py index 3cfbd88b4dc1..af3e5fd79fef 100644 --- a/tests/test_mathematical.py +++ b/tests/test_mathematical.py @@ -700,8 +700,8 @@ def test_positive_boolean(): @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) def test_prod_nanprod(func, axis, keepdims, dtype): a = numpy.arange(1, 13, dtype=dtype).reshape((2, 2, 3)) - if func == "nanprod" and issubclass(a.dtype.type, dpnp.inexact): - a[1:2:] = numpy.nan + if func == "nanprod" and dpnp.issubdtype(a.dtype, dpnp.inexact): + a[:, :, 2] = numpy.nan ia = dpnp.array(a) np_res = getattr(numpy, func)(a, axis=axis, keepdims=keepdims) @@ -718,24 +718,20 @@ def test_prod_zero_size(axis): np_res = numpy.prod(a, axis=axis) dpnp_res = dpnp.prod(ia, axis=axis) - - assert dpnp_res.shape == np_res.shape - assert_allclose(dpnp_res, np_res) + assert_dtype_allclose(dpnp_res, np_res) @pytest.mark.parametrize("func", ["prod", "nanprod"]) @pytest.mark.parametrize("axis", [None, 0, 1, -1]) @pytest.mark.parametrize("keepdims", [False, True]) def test_prod_nanprod_bool(func, axis, keepdims): - a = numpy.arange(2, dtype=dpnp.bool) + a = numpy.arange(2, dtype=numpy.bool_) a = numpy.tile(a, (2, 2)) ia = dpnp.array(a) np_res = getattr(numpy, func)(a, axis=axis, keepdims=keepdims) dpnp_res = getattr(dpnp, func)(ia, axis=axis, keepdims=keepdims) - - assert dpnp_res.shape == np_res.shape - assert_allclose(dpnp_res, np_res) + assert_dtype_allclose(dpnp_res, np_res) @pytest.mark.usefixtures("allow_fall_back_on_numpy") @@ -743,41 +739,48 @@ def test_prod_nanprod_bool(func, axis, keepdims): @pytest.mark.usefixtures("suppress_invalid_numpy_warnings") @pytest.mark.parametrize("func", ["prod", "nanprod"]) @pytest.mark.parametrize("in_dtype", get_all_dtypes(no_bool=True)) -@pytest.mark.parametrize("out_dtype", get_all_dtypes(no_bool=True)) +@pytest.mark.parametrize( + "out_dtype", get_all_dtypes(no_bool=True, no_none=True) +) def test_prod_nanprod_dtype(func, in_dtype, out_dtype): a = numpy.arange(1, 13, dtype=in_dtype).reshape((2, 2, 3)) - if func == "nanprod" and issubclass(a.dtype.type, dpnp.inexact): - a[1:2:] = numpy.nan + if func == "nanprod" and dpnp.issubdtype(a.dtype, dpnp.inexact): + a[:, :, 2] = numpy.nan ia = dpnp.array(a) np_res = getattr(numpy, func)(a, dtype=out_dtype) dpnp_res = getattr(dpnp, func)(ia, dtype=out_dtype) - - if out_dtype is not None: - assert dpnp_res.dtype == out_dtype - assert_allclose(dpnp_res, np_res) + assert_dtype_allclose(dpnp_res, np_res) +@pytest.mark.usefixtures("suppress_overflow_encountered_in_cast_numpy_warnings") @pytest.mark.parametrize("func", ["prod", "nanprod"]) def test_prod_nanprod_out(func): - a = numpy.arange(1, 7).reshape((2, 3)) - if func == "nanprod" and issubclass(a.dtype.type, dpnp.inexact): - a[1:2:] = numpy.nan - ia = dpnp.array(a) + ia = dpnp.arange(1, 7).reshape((2, 3)) + ia = ia.astype(dpnp.default_float_type(ia.device)) + if func == "nanprod": + ia[:, 1] = dpnp.nan + a = dpnp.asnumpy(ia) + # output is dpnp_array np_res = getattr(numpy, func)(a, axis=0) - dpnp_res = dpnp.array(numpy.empty_like(np_res)) - getattr(dpnp, func)(ia, axis=0, out=dpnp_res) + dpnp_out = dpnp.empty(np_res.shape, dtype=np_res.dtype) + dpnp_res = getattr(dpnp, func)(ia, axis=0, out=dpnp_out) + assert dpnp_out is dpnp_res assert_allclose(dpnp_res, np_res) - dpnp_res = dpt.asarray(numpy.empty_like(np_res)) - getattr(dpnp, func)(ia, axis=0, out=dpnp_res) + # output is usm_ndarray + dpt_out = dpt.empty(np_res.shape, dtype=np_res.dtype) + dpnp_res = getattr(dpnp, func)(ia, axis=0, out=dpt_out) + assert dpt_out is dpnp_res.get_array() assert_allclose(dpnp_res, np_res) + # out is a numpy array -> TypeError dpnp_res = numpy.empty_like(np_res) with pytest.raises(TypeError): getattr(dpnp, func)(ia, axis=0, out=dpnp_res) + # incorrect shape for out dpnp_res = dpnp.array(numpy.empty((2, 3))) with pytest.raises(ValueError): getattr(dpnp, func)(ia, axis=0, out=dpnp_res) diff --git a/tests/test_search.py b/tests/test_search.py index aa5a2c9915c4..56f4f23739cb 100644 --- a/tests/test_search.py +++ b/tests/test_search.py @@ -5,58 +5,66 @@ import dpnp -from .helper import get_all_dtypes +from .helper import assert_dtype_allclose, get_all_dtypes -@pytest.mark.parametrize("func", ["argmax", "argmin"]) +@pytest.mark.parametrize("func", ["argmax", "argmin", "nanargmin", "nanargmax"]) @pytest.mark.parametrize("axis", [None, 0, 1, -1, 2, -2]) @pytest.mark.parametrize("keepdims", [False, True]) @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) def test_argmax_argmin(func, axis, keepdims, dtype): a = numpy.arange(768, dtype=dtype).reshape((4, 4, 6, 8)) + if func in ["nanargmin", "nanargmax"] and dpnp.issubdtype( + a.dtype, dpnp.inexact + ): + a[2:3, 2, 3:4, 4] = numpy.nan ia = dpnp.array(a) np_res = getattr(numpy, func)(a, axis=axis, keepdims=keepdims) dpnp_res = getattr(dpnp, func)(ia, axis=axis, keepdims=keepdims) - - assert dpnp_res.shape == np_res.shape - assert_allclose(dpnp_res, np_res) + assert_dtype_allclose(dpnp_res, np_res) @pytest.mark.parametrize("func", ["argmax", "argmin"]) @pytest.mark.parametrize("axis", [None, 0, 1, -1]) @pytest.mark.parametrize("keepdims", [False, True]) def test_argmax_argmin_bool(func, axis, keepdims): - a = numpy.arange(2, dtype=dpnp.bool) + a = numpy.arange(2, dtype=numpy.bool_) a = numpy.tile(a, (2, 2)) ia = dpnp.array(a) np_res = getattr(numpy, func)(a, axis=axis, keepdims=keepdims) dpnp_res = getattr(dpnp, func)(ia, axis=axis, keepdims=keepdims) - - assert dpnp_res.shape == np_res.shape - assert_allclose(dpnp_res, np_res) + assert_dtype_allclose(dpnp_res, np_res) -@pytest.mark.parametrize("func", ["argmax", "argmin"]) +@pytest.mark.parametrize("func", ["argmax", "argmin", "nanargmin", "nanargmax"]) def test_argmax_argmin_out(func): - a = numpy.arange(6).reshape((2, 3)) + a = numpy.arange(12, dtype=numpy.float32).reshape((2, 2, 3)) + if func in ["nanargmin", "nanargmax"]: + a[1, 0, 2] = numpy.nan ia = dpnp.array(a) + # out is dpnp_array np_res = getattr(numpy, func)(a, axis=0) - dpnp_res = dpnp.array(numpy.empty_like(np_res)) - getattr(dpnp, func)(ia, axis=0, out=dpnp_res) + dpnp_out = dpnp.empty(np_res.shape, dtype=np_res.dtype) + dpnp_res = getattr(dpnp, func)(ia, axis=0, out=dpnp_out) + assert dpnp_out is dpnp_res assert_allclose(dpnp_res, np_res) - dpnp_res = dpt.asarray(numpy.empty_like(np_res)) - getattr(dpnp, func)(ia, axis=0, out=dpnp_res) + # out is usm_ndarray + dpt_out = dpt.empty(np_res.shape, dtype=np_res.dtype) + dpnp_res = getattr(dpnp, func)(ia, axis=0, out=dpt_out) + assert dpt_out is dpnp_res.get_array() assert_allclose(dpnp_res, np_res) + # out is a numpy array -> TypeError dpnp_res = numpy.empty_like(np_res) with pytest.raises(TypeError): getattr(dpnp, func)(ia, axis=0, out=dpnp_res) - dpnp_res = dpnp.array(numpy.empty((2, 3))) + # out shape is incorrect -> ValueError + dpnp_res = dpnp.array(numpy.empty((2, 2))) with pytest.raises(ValueError): getattr(dpnp, func)(ia, axis=0, out=dpnp_res) @@ -64,13 +72,23 @@ def test_argmax_argmin_out(func): @pytest.mark.parametrize("axis", [None, 0, 1, -1]) @pytest.mark.parametrize("keepdims", [False, True]) def test_ndarray_argmax_argmin(axis, keepdims): - a = numpy.arange(192, dtype="f4").reshape((4, 6, 8)) + a = numpy.arange(192, dtype=numpy.float32).reshape((4, 6, 8)) ia = dpnp.array(a) np_res = a.argmax(axis=axis, keepdims=keepdims) dpnp_res = ia.argmax(axis=axis, keepdims=keepdims) - assert_allclose(dpnp_res, np_res) + assert_dtype_allclose(dpnp_res, np_res) np_res = a.argmin(axis=axis, keepdims=keepdims) dpnp_res = ia.argmin(axis=axis, keepdims=keepdims) - assert_allclose(dpnp_res, np_res) + assert_dtype_allclose(dpnp_res, np_res) + + +@pytest.mark.parametrize("func", ["nanargmin", "nanargmax"]) +def test_nanargmax_nanargmin_error(func): + ia = dpnp.arange(12, dtype=dpnp.float32).reshape((2, 2, 3)) + ia[:, :, 2] = dpnp.nan + + # All-NaN slice encountered -> ValueError + with pytest.raises(ValueError): + getattr(dpnp, func)(ia, axis=0) diff --git a/tests/test_statistics.py b/tests/test_statistics.py index 3caaaf9c805f..f3866b3c27e1 100644 --- a/tests/test_statistics.py +++ b/tests/test_statistics.py @@ -1,3 +1,5 @@ +import warnings + import dpctl.tensor as dpt import numpy import pytest @@ -30,50 +32,52 @@ def test_median(dtype, size): assert_allclose(dpnp_res, np_res) -@pytest.mark.parametrize("func", ["max", "min"]) +@pytest.mark.parametrize("func", ["max", "min", "nanmax", "nanmin"]) @pytest.mark.parametrize("axis", [None, 0, 1, -1, 2, -2, (1, 2), (0, -2)]) @pytest.mark.parametrize("keepdims", [False, True]) @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) def test_max_min(func, axis, keepdims, dtype): a = numpy.arange(768, dtype=dtype).reshape((4, 4, 6, 8)) + if func in ["nanmax", "nanmin"] and dpnp.issubdtype(a.dtype, dpnp.inexact): + a[2:3, 2, 3:4, 4] = numpy.nan ia = dpnp.array(a) np_res = getattr(numpy, func)(a, axis=axis, keepdims=keepdims) dpnp_res = getattr(dpnp, func)(ia, axis=axis, keepdims=keepdims) - - assert dpnp_res.shape == np_res.shape - assert_allclose(dpnp_res, np_res) + assert_dtype_allclose(dpnp_res, np_res) @pytest.mark.parametrize("func", ["max", "min"]) @pytest.mark.parametrize("axis", [None, 0, 1, -1]) @pytest.mark.parametrize("keepdims", [False, True]) def test_max_min_bool(func, axis, keepdims): - a = numpy.arange(2, dtype=dpnp.bool) + a = numpy.arange(2, dtype=numpy.bool_) a = numpy.tile(a, (2, 2)) ia = dpnp.array(a) np_res = getattr(numpy, func)(a, axis=axis, keepdims=keepdims) dpnp_res = getattr(dpnp, func)(ia, axis=axis, keepdims=keepdims) - - assert dpnp_res.shape == np_res.shape - assert_allclose(dpnp_res, np_res) + assert_dtype_allclose(dpnp_res, np_res) -@pytest.mark.parametrize("func", ["max", "min"]) +@pytest.mark.parametrize("func", ["max", "min", "nanmax", "nanmin"]) def test_max_min_out(func): - a = numpy.arange(6).reshape((2, 3)) + a = numpy.arange(12, dtype=numpy.float32).reshape((2, 2, 3)) + if func in ["nanmax", "nanmin"]: + a[1, 0, 2] = numpy.nan ia = dpnp.array(a) + # out is dpnp_array np_res = getattr(numpy, func)(a, axis=0) - # output is dpnp array - dpnp_res = dpnp.array(numpy.empty_like(np_res)) - getattr(dpnp, func)(ia, axis=0, out=dpnp_res) + dpnp_out = dpnp.empty(np_res.shape, dtype=np_res.dtype) + dpnp_res = getattr(dpnp, func)(ia, axis=0, out=dpnp_out) + assert dpnp_out is dpnp_res assert_allclose(dpnp_res, np_res) - # output is usm array - dpnp_res = dpt.asarray(numpy.empty_like(np_res)) - getattr(dpnp, func)(ia, axis=0, out=dpnp_res) + # out is usm_ndarray + dpt_out = dpt.empty(np_res.shape, dtype=np_res.dtype) + dpnp_res = getattr(dpnp, func)(ia, axis=0, out=dpt_out) + assert dpt_out is dpnp_res.get_array() assert_allclose(dpnp_res, np_res) # output is numpy array -> Error @@ -82,12 +86,12 @@ def test_max_min_out(func): getattr(dpnp, func)(ia, axis=0, out=dpnp_res) # output has incorrect shape -> Error - dpnp_res = dpnp.array(numpy.empty((2, 3))) + dpnp_res = dpnp.array(numpy.empty((4, 2))) with pytest.raises(ValueError): getattr(dpnp, func)(ia, axis=0, out=dpnp_res) -@pytest.mark.parametrize("func", ["max", "min"]) +@pytest.mark.parametrize("func", ["max", "min", "nanmax", "nanmin"]) def test_max_min_error(func): ia = dpnp.arange(5) # where is not supported @@ -99,6 +103,32 @@ def test_max_min_error(func): getattr(dpnp, func)(ia, initial=6) +@pytest.mark.parametrize("func", ["nanmax", "nanmin"]) +@pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) +def test_nanmax_nanmin_no_NaN(func, dtype): + a = numpy.arange(768, dtype=dtype).reshape((4, 4, 6, 8)) + ia = dpnp.array(a) + + np_res = getattr(numpy, func)(a, axis=0) + dpnp_res = getattr(dpnp, func)(ia, axis=0) + assert_dtype_allclose(dpnp_res, np_res) + + +@pytest.mark.parametrize("func", ["nanmax", "nanmin"]) +def test_nanmax_nanmin_all_NaN(recwarn, func): + a = numpy.arange(12, dtype=numpy.float32).reshape((2, 2, 3)) + a[:, :, 2] = numpy.nan + ia = dpnp.array(a) + + np_res = getattr(numpy, func)(a, axis=0) + dpnp_res = getattr(dpnp, func)(ia, axis=0) + assert_dtype_allclose(dpnp_res, np_res) + + assert len(recwarn) == 2 + assert all("All-NaN slice encountered" in str(r.message) for r in recwarn) + assert all(r.category is RuntimeWarning for r in recwarn) + + class TestMean: @pytest.mark.parametrize("dtype", get_all_dtypes()) def test_mean_axis_tuple(self, dtype): diff --git a/tests/test_sycl_queue.py b/tests/test_sycl_queue.py index 26d047c103d4..31196e2b3dae 100644 --- a/tests/test_sycl_queue.py +++ b/tests/test_sycl_queue.py @@ -369,8 +369,12 @@ def test_meshgrid(device_x, device_y): pytest.param("max", [1.0, 2.0, 4.0, 7.0]), pytest.param("mean", [1.0, 2.0, 4.0, 7.0]), pytest.param("min", [1.0, 2.0, 4.0, 7.0]), + pytest.param("nanargmax", [1.0, 2.0, 4.0, dpnp.nan]), + pytest.param("nanargmin", [1.0, 2.0, 4.0, dpnp.nan]), pytest.param("nancumprod", [1.0, dpnp.nan]), pytest.param("nancumsum", [1.0, dpnp.nan]), + pytest.param("nanmax", [1.0, 2.0, 4.0, dpnp.nan]), + pytest.param("nanmin", [1.0, 2.0, 4.0, dpnp.nan]), pytest.param("nanprod", [1.0, dpnp.nan]), pytest.param("nansum", [1.0, dpnp.nan]), pytest.param("nanvar", [1.0, 2.0, 4.0, dpnp.nan]), diff --git a/tests/test_usm_type.py b/tests/test_usm_type.py index b889bab6010c..c69671e5bb0b 100644 --- a/tests/test_usm_type.py +++ b/tests/test_usm_type.py @@ -399,6 +399,10 @@ def test_meshgrid(usm_type_x, usm_type_y): pytest.param("max", [1.0, 2.0, 4.0, 7.0]), pytest.param("mean", [1.0, 2.0, 4.0, 7.0]), pytest.param("min", [1.0, 2.0, 4.0, 7.0]), + pytest.param("nanargmax", [1.0, 2.0, 4.0, dp.nan]), + pytest.param("nanargmin", [1.0, 2.0, 4.0, dp.nan]), + pytest.param("nanmax", [1.0, 2.0, 4.0, dp.nan]), + pytest.param("nanmin", [1.0, 2.0, 4.0, dp.nan]), pytest.param("negative", [1.0, 0.0, -1.0]), pytest.param("positive", [1.0, 0.0, -1.0]), pytest.param("prod", [1.0, 2.0]), diff --git a/tests/third_party/cupy/sorting_tests/test_search.py b/tests/third_party/cupy/sorting_tests/test_search.py index edfe4ea02ed1..d14503d6c17b 100644 --- a/tests/third_party/cupy/sorting_tests/test_search.py +++ b/tests/third_party/cupy/sorting_tests/test_search.py @@ -366,40 +366,39 @@ def test_argwhere(self, xp, dtype): # return cupy.nonzero(self.array) -@testing.gpu -class TestNanArgMin(unittest.TestCase): +class TestNanArgMin: @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose() def test_nanargmin_all(self, xp, dtype): a = testing.shaped_random((2, 3), xp, dtype) return xp.nanargmin(a) - @testing.for_all_dtypes(no_complex=True) - @testing.numpy_cupy_allclose(accept_error=ValueError) + @testing.for_float_dtypes() + @testing.numpy_cupy_allclose() def test_nanargmin_nan(self, xp, dtype): a = xp.array([float("nan"), -1, 1], dtype) return xp.nanargmin(a) - @testing.for_all_dtypes(no_complex=True) - @testing.numpy_cupy_allclose(accept_error=ValueError) + @testing.for_float_dtypes() + @testing.numpy_cupy_allclose() def test_nanargmin_nan2(self, xp, dtype): a = xp.array([float("nan"), float("nan"), -1, 1], dtype) return xp.nanargmin(a) - @testing.for_all_dtypes(no_complex=True) - @testing.numpy_cupy_allclose(accept_error=ValueError) + @testing.for_float_dtypes() + @testing.numpy_cupy_allclose() def test_nanargmin_nan3(self, xp, dtype): a = xp.array([float("nan"), float("nan"), -1, 1, 1.0, -2.0], dtype) return xp.nanargmin(a) - @testing.for_all_dtypes(no_complex=True) - @testing.numpy_cupy_allclose(accept_error=ValueError) + @testing.for_float_dtypes() + @testing.numpy_cupy_allclose() def test_nanargmin_nan4(self, xp, dtype): a = xp.array([-1, 1, 1.0, -2.0, float("nan"), float("nan")], dtype) return xp.nanargmin(a) - @testing.for_all_dtypes(no_complex=True) - @testing.numpy_cupy_allclose(accept_error=ValueError) + @testing.for_float_dtypes() + @testing.numpy_cupy_allclose() def test_nanargmin_nan5(self, xp, dtype): a = xp.array( [-1, 1, 1.0, -2.0, float("nan"), float("nan"), -1, 1], dtype @@ -457,40 +456,39 @@ def test_nanargmin_zero_size_axis1(self, xp, dtype): return xp.nanargmin(a, axis=1) -@testing.gpu -class TestNanArgMax(unittest.TestCase): +class TestNanArgMax: @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose() def test_nanargmax_all(self, xp, dtype): a = testing.shaped_random((2, 3), xp, dtype) return xp.nanargmax(a) - @testing.for_all_dtypes(no_complex=True) - @testing.numpy_cupy_allclose(accept_error=ValueError) + @testing.for_float_dtypes() + @testing.numpy_cupy_allclose() def test_nanargmax_nan(self, xp, dtype): a = xp.array([float("nan"), -1, 1], dtype) return xp.nanargmax(a) - @testing.for_all_dtypes(no_complex=True) - @testing.numpy_cupy_allclose(accept_error=ValueError) + @testing.for_float_dtypes() + @testing.numpy_cupy_allclose() def test_nanargmax_nan2(self, xp, dtype): a = xp.array([float("nan"), float("nan"), -1, 1], dtype) return xp.nanargmax(a) - @testing.for_all_dtypes(no_complex=True) - @testing.numpy_cupy_allclose(accept_error=ValueError) + @testing.for_float_dtypes() + @testing.numpy_cupy_allclose() def test_nanargmax_nan3(self, xp, dtype): a = xp.array([float("nan"), float("nan"), -1, 1, 1.0, -2.0], dtype) return xp.nanargmax(a) - @testing.for_all_dtypes(no_complex=True) - @testing.numpy_cupy_allclose(accept_error=ValueError) + @testing.for_float_dtypes() + @testing.numpy_cupy_allclose() def test_nanargmax_nan4(self, xp, dtype): a = xp.array([-1, 1, 1.0, -2.0, float("nan"), float("nan")], dtype) return xp.nanargmax(a) - @testing.for_all_dtypes(no_complex=True) - @testing.numpy_cupy_allclose(accept_error=ValueError) + @testing.for_float_dtypes() + @testing.numpy_cupy_allclose() def test_nanargmax_nan5(self, xp, dtype): a = xp.array( [-1, 1, 1.0, -2.0, float("nan"), float("nan"), -1, 1], dtype diff --git a/tests/third_party/cupy/statistics_tests/test_order.py b/tests/third_party/cupy/statistics_tests/test_order.py index 2ca82b473b52..62ac2f72b361 100644 --- a/tests/third_party/cupy/statistics_tests/test_order.py +++ b/tests/third_party/cupy/statistics_tests/test_order.py @@ -7,106 +7,110 @@ import dpnp as cupy from tests.third_party.cupy import testing -_all_interpolations = ( +_all_methods = ( + # "inverted_cdf", # TODO(takagi) Not implemented + # "averaged_inverted_cdf", # TODO(takagi) Not implemented + # "closest_observation", # TODO(takagi) Not implemented + # "interpolated_inverted_cdf", # TODO(takagi) Not implemented + # "hazen", # TODO(takagi) Not implemented + # "weibull", # TODO(takagi) Not implemented + "linear", + # "median_unbiased", # TODO(takagi) Not implemented + # "normal_unbiased", # TODO(takagi) Not implemented "lower", "higher", "midpoint", - # 'nearest', # TODO(hvy): Not implemented - "linear", + # "nearest", # TODO(hvy): Not implemented ) -def for_all_interpolations(name="interpolation"): - return testing.for_orders(_all_interpolations, name=name) +def for_all_methods(name="method"): + return pytest.mark.parametrize(name, _all_methods) -@testing.gpu -class TestOrder(unittest.TestCase): - @for_all_interpolations() +@testing.with_requires("numpy>=1.22.0rc1") +class TestOrder: + @for_all_methods() @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) @testing.numpy_cupy_allclose() - def test_percentile_defaults(self, xp, dtype, interpolation): + def test_percentile_defaults(self, xp, dtype, method): a = testing.shaped_random((2, 3, 8), xp, dtype) q = testing.shaped_random((3,), xp, dtype=dtype, scale=100) - return xp.percentile(a, q, interpolation=interpolation) + return xp.percentile(a, q, method=method) - @for_all_interpolations() + @for_all_methods() @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) @testing.numpy_cupy_allclose() - def test_percentile_q_list(self, xp, dtype, interpolation): + def test_percentile_q_list(self, xp, dtype, method): a = testing.shaped_arange((1001,), xp, dtype) q = [99, 99.9] - return xp.percentile(a, q, interpolation=interpolation) + return xp.percentile(a, q, method=method) - @for_all_interpolations() + @for_all_methods() @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) @testing.numpy_cupy_allclose(rtol=1e-6) - def test_percentile_no_axis(self, xp, dtype, interpolation): + def test_percentile_no_axis(self, xp, dtype, method): a = testing.shaped_random((10, 2, 4, 8), xp, dtype) q = testing.shaped_random((5,), xp, dtype=dtype, scale=100) - return xp.percentile(a, q, axis=None, interpolation=interpolation) + return xp.percentile(a, q, axis=None, method=method) - @for_all_interpolations() + @for_all_methods() @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) @testing.numpy_cupy_allclose(rtol=1e-6) - def test_percentile_neg_axis(self, xp, dtype, interpolation): + def test_percentile_neg_axis(self, xp, dtype, method): a = testing.shaped_random((4, 3, 10, 2, 8), xp, dtype) q = testing.shaped_random((5,), xp, dtype=dtype, scale=100) - return xp.percentile(a, q, axis=-1, interpolation=interpolation) + return xp.percentile(a, q, axis=-1, method=method) - @for_all_interpolations() + @for_all_methods() @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) @testing.numpy_cupy_allclose(rtol=1e-6) - def test_percentile_tuple_axis(self, xp, dtype, interpolation): + def test_percentile_tuple_axis(self, xp, dtype, method): a = testing.shaped_random((1, 6, 3, 2), xp, dtype) q = testing.shaped_random((5,), xp, dtype=dtype, scale=100) - return xp.percentile(a, q, axis=(0, 1, 2), interpolation=interpolation) + return xp.percentile(a, q, axis=(0, 1, 2), method=method) - @for_all_interpolations() + @for_all_methods() @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) @testing.numpy_cupy_allclose() - def test_percentile_scalar_q(self, xp, dtype, interpolation): + def test_percentile_scalar_q(self, xp, dtype, method): a = testing.shaped_random((2, 3, 8), xp, dtype) q = 13.37 - return xp.percentile(a, q, interpolation=interpolation) + return xp.percentile(a, q, method=method) - @for_all_interpolations() + @for_all_methods() @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) @testing.numpy_cupy_allclose(rtol=1e-5) - def test_percentile_keepdims(self, xp, dtype, interpolation): + def test_percentile_keepdims(self, xp, dtype, method): a = testing.shaped_random((7, 2, 9, 2), xp, dtype) q = testing.shaped_random((5,), xp, dtype=dtype, scale=100) - return xp.percentile( - a, q, axis=None, keepdims=True, interpolation=interpolation - ) + return xp.percentile(a, q, axis=None, keepdims=True, method=method) - @for_all_interpolations() + @for_all_methods() @testing.for_float_dtypes(no_float16=True) # NumPy raises error on int8 @testing.numpy_cupy_allclose(rtol=1e-6) - def test_percentile_out(self, xp, dtype, interpolation): + def test_percentile_out(self, xp, dtype, method): a = testing.shaped_random((10, 2, 3, 2), xp, dtype) q = testing.shaped_random((5,), xp, dtype=dtype, scale=100) out = testing.shaped_random((5, 10, 2, 3), xp, dtype) - return xp.percentile( - a, q, axis=-1, interpolation=interpolation, out=out - ) + return xp.percentile(a, q, axis=-1, method=method, out=out) - @for_all_interpolations() + @for_all_methods() @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) - def test_percentile_bad_q(self, dtype, interpolation): + def test_percentile_bad_q(self, dtype, method): for xp in (numpy, cupy): a = testing.shaped_random((4, 2, 3, 2), xp, dtype) q = testing.shaped_random((1, 2, 3), xp, dtype=dtype, scale=100) with pytest.raises(ValueError): - xp.percentile(a, q, axis=-1, interpolation=interpolation) + xp.percentile(a, q, axis=-1, method=method) @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) - def test_percentile_uxpected_interpolation(self, dtype): + def test_percentile_unxpected_method(self, dtype): for xp in (numpy, cupy): a = testing.shaped_random((4, 2, 3, 2), xp, dtype) q = testing.shaped_random((5,), xp, dtype=dtype, scale=100) with pytest.raises(ValueError): - xp.percentile(a, q, axis=-1, interpolation="deadbeef") + xp.percentile(a, q, axis=-1, method="deadbeef") @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose() @@ -152,8 +156,8 @@ def test_nanmax_all_nan(self, xp, dtype): with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") m = xp.nanmax(a) - self.assertEqual(len(w), 1) - self.assertIs(w[0].category, RuntimeWarning) + assert len(w) == 1 + assert w[0].category is RuntimeWarning return m @testing.for_all_dtypes(no_complex=True) @@ -200,8 +204,8 @@ def test_nanmin_all_nan(self, xp, dtype): with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") m = xp.nanmin(a) - self.assertEqual(len(w), 1) - self.assertIs(w[0].category, RuntimeWarning) + assert len(w) == 1 + assert w[0].category is RuntimeWarning return m @testing.for_all_dtypes(no_bool=True) From 9e8323e8169be4a8fa52a5a5f4558c5107b0ae06 Mon Sep 17 00:00:00 2001 From: Anton <100830759+antonwolfy@users.noreply.github.com> Date: Fri, 22 Dec 2023 17:33:50 +0100 Subject: [PATCH 36/38] Added missing `dpnp.column_stack`, `dpnp.dstack` and `dpnp.row_stack` (#1647) * Implemented column_stack, dstack and row_stack * Updated docstring of all manipulation functions * Added tests * Muted test causing crash on CPU due to known issue * Disable type check in a test for column_stack * Corrected result data type from dpnp.prod on Gen9 * Update dpnp/dpnp_iface_manipulation.py Co-authored-by: vtavana <120411540+vtavana@users.noreply.github.com> * Update dpnp/dpnp_iface_manipulation.py Co-authored-by: vtavana <120411540+vtavana@users.noreply.github.com> * Updated docstrings to address review comments --------- Co-authored-by: vtavana <120411540+vtavana@users.noreply.github.com> --- dpnp/dpnp_iface_manipulation.py | 562 ++++++++++++----- dpnp/dpnp_iface_mathematical.py | 6 +- tests/test_arraymanipulation.py | 593 ++++++++++-------- tests/test_sycl_queue.py | 32 + tests/test_usm_type.py | 24 + .../cupy/manipulation_tests/test_join.py | 32 +- 6 files changed, 796 insertions(+), 453 deletions(-) diff --git a/dpnp/dpnp_iface_manipulation.py b/dpnp/dpnp_iface_manipulation.py index 0913ddb886d3..9efb2aa04f17 100644 --- a/dpnp/dpnp_iface_manipulation.py +++ b/dpnp/dpnp_iface_manipulation.py @@ -55,8 +55,10 @@ "broadcast_arrays", "broadcast_to", "can_cast", + "column_stack", "concatenate", "copyto", + "dstack", "expand_dims", "flip", "fliplr", @@ -69,6 +71,7 @@ "result_type", "roll", "rollaxis", + "row_stack", "shape", "squeeze", "stack", @@ -80,22 +83,52 @@ ] +def _check_stack_arrays(arrays): + """Validate a sequence type of arrays to stack.""" + + if not hasattr(arrays, "__getitem__"): + raise TypeError( + 'arrays to stack must be passed as a "sequence" type ' + "such as list or tuple." + ) + + def asfarray(a, dtype=None, *, device=None, usm_type=None, sycl_queue=None): """ Return an array converted to a float type. For full documentation refer to :obj:`numpy.asfarray`. - Notes - ----- - If `dtype` is ``None``, :obj:`dpnp.bool` or one of the `int` dtypes, - it is replaced with the default floating type (:obj:`dpnp.float64` - if a device supports it, or :obj:`dpnp.float32` type otherwise). + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. + This includes an instance of :class:`dpnp.ndarray` or + :class:`dpctl.tensor.usm_ndarray`, an object representing + SYCL USM allocation and implementing `__sycl_usm_array_interface__` + protocol, an instance of :class:`numpy.ndarray`, an object supporting + Python buffer protocol, a Python scalar, or a (possibly nested) + sequence of Python scalars. + dtype : str or dtype object, optional + Float type code to coerce input array `a`. If `dtype` is ``None``, + :obj:`dpnp.bool` or one of the `int` dtypes, it is replaced with + the default floating type (:obj:`dpnp.float64` if a device supports it, + or :obj:`dpnp.float32` type otherwise). + device : {None, string, SyclDevice, SyclQueue}, optional + An array API concept of device where the output array is created. + The `device` can be ``None`` (the default), an OneAPI filter selector + string, an instance of :class:`dpctl.SyclDevice` corresponding to + a non-partitioned SYCL device, an instance of :class:`dpctl.SyclQueue`, + or a `Device` object returned by :obj:`dpnp.ndarray.device` property. + usm_type : {None, "device", "shared", "host"}, optional + The type of SYCL USM allocation for the output array. + sycl_queue : {None, SyclQueue}, optional + A SYCL queue to use for output array allocation and copying. Returns ------- - y : dpnp.ndarray - The input a as a float ndarray. + out : dpnp.ndarray + The input `a` as a float ndarray. Examples -------- @@ -113,7 +146,7 @@ def asfarray(a, dtype=None, *, device=None, usm_type=None, sycl_queue=None): a, sycl_queue=sycl_queue, device=device ) - if dtype is None or not numpy.issubdtype(dtype, dpnp.inexact): + if dtype is None or not dpnp.issubdtype(dtype, dpnp.inexact): dtype = dpnp.default_float_type(sycl_queue=_sycl_queue) return dpnp.asarray( @@ -315,16 +348,18 @@ def broadcast_arrays(*args, subok=False): For full documentation refer to :obj:`numpy.broadcast_arrays`. + Parameters + ---------- + args : {dpnp.ndarray, usm_ndarray} + A list of arrays to broadcast. + Returns ------- - broadcasted : list of dpnp.ndarray - These arrays are views on the original arrays. + out : list of dpnp.ndarray + A list of arrays which are views on the original arrays from `args`. Limitations ----------- - Parameter `args` is supported as either :class:`dpnp.ndarray` - or :class:`dpctl.tensor.usm_ndarray`. - Otherwise ``TypeError`` exception will be raised. Parameter `subok` is supported with default value. Otherwise ``NotImplementedError`` exception will be raised. @@ -350,12 +385,8 @@ def broadcast_arrays(*args, subok=False): if len(args) == 0: return [] - dpt_arrays = dpt.broadcast_arrays( - *[dpnp.get_usm_ndarray(array) for array in args] - ) - return [ - dpnp_array._create_from_usm_ndarray(usm_arr) for usm_arr in dpt_arrays - ] + usm_arrays = dpt.broadcast_arrays(*[dpnp.get_usm_ndarray(a) for a in args]) + return [dpnp_array._create_from_usm_ndarray(a) for a in usm_arrays] def broadcast_to(array, /, shape, subok=False): @@ -364,19 +395,23 @@ def broadcast_to(array, /, shape, subok=False): For full documentation refer to :obj:`numpy.broadcast_to`. + Parameters + ---------- + array : {dpnp.ndarray, usm_ndarray} + The array to broadcast. + shape : tuple or int + The shape of the desired array. A single integer ``i`` is interpreted + as ``(i,)``. + Returns ------- - y : dpnp.ndarray + out : dpnp.ndarray An array having a specified shape. Must have the same data type as `array`. Limitations ----------- - Parameter `array` is supported as either :class:`dpnp.ndarray` - or :class:`dpctl.tensor.usm_ndarray`. - Otherwise ``TypeError`` exception will be raised. Parameter `subok` is supported with default value. Otherwise ``NotImplementedError`` exception will be raised. - Input array data types of `array` is limited by supported DPNP :ref:`Data types`. See Also -------- @@ -384,9 +419,9 @@ def broadcast_to(array, /, shape, subok=False): Examples -------- - >>> import dpnp as dp - >>> x = dp.array([1, 2, 3]) - >>> dp.broadcast_to(x, (3, 3)) + >>> import dpnp as np + >>> x = np.array([1, 2, 3]) + >>> np.broadcast_to(x, (3, 3)) array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) @@ -396,8 +431,8 @@ def broadcast_to(array, /, shape, subok=False): if subok is not False: raise NotImplementedError(f"subok={subok} is currently not supported") - dpt_array = dpnp.get_usm_ndarray(array) - new_array = dpt.broadcast_to(dpt_array, shape) + usm_array = dpnp.get_usm_ndarray(array) + new_array = dpt.broadcast_to(usm_array, shape) return dpnp_array._create_from_usm_ndarray(new_array) @@ -405,14 +440,11 @@ def can_cast(from_, to, casting="safe"): """ Returns ``True`` if cast between data types can occur according to the casting rule. - If `from` is a scalar or array scalar, also returns ``True`` if the scalar value can - be cast without overflow or truncation to an integer. - For full documentation refer to :obj:`numpy.can_cast`. Parameters ---------- - from : dpnp.array, dtype + from_ : {dpnp.ndarray, usm_ndarray, dtype, dtype specifier} Source data type. to : dtype Target data type. @@ -422,12 +454,62 @@ def can_cast(from_, to, casting="safe"): Returns ------- out: bool - True if cast can occur according to the casting rule. + ``True`` if cast can occur according to the casting rule, + ``False`` otherwise. See Also -------- - :obj:`dpnp.result_type` : Returns the type that results from applying the NumPy - type promotion rules to the arguments. + :obj:`dpnp.result_type` : Returns the type that results from applying + the NumPy type promotion rules to the arguments. + + Examples + -------- + Basic examples + + >>> import dpnp as np + >>> np.can_cast(np.int32, np.int64) + True + >>> np.can_cast(np.float64, complex) + True + >>> np.can_cast(complex, float) + False + + >>> np.can_cast('i8', 'f8') + True + >>> np.can_cast('i8', 'f4') + False + + Array scalar checks the value, array does not + + >>> np.can_cast(np.array(1000.0), np.float32) + True + >>> np.can_cast(np.array([1000.0]), np.float32) + False + + Using the casting rules + + >>> np.can_cast('i8', 'i8', 'no') + True + >>> np.can_cast('i8', 'no') + False + + >>> np.can_cast('i8', 'equiv') + True + >>> np.can_cast('i8', 'equiv') + False + + >>> np.can_cast('i8', 'safe') + True + >>> np.can_cast('i4', 'safe') + False + + >>> np.can_cast('i4', 'same_kind') + True + >>> np.can_cast('u4', 'same_kind') + False + + >>> np.can_cast('u4', 'unsafe') + True """ @@ -442,6 +524,64 @@ def can_cast(from_, to, casting="safe"): return dpt.can_cast(dtype_from, to, casting) +def column_stack(tup): + """ + Stacks 1-D and 2-D arrays as columns into a 2-D array. + + Take a sequence of 1-D arrays and stack them as columns to make a single + 2-D array. 2-D arrays are stacked as-is, just like with :obj:`dpnp.hstack`. + 1-D arrays are turned into 2-D columns first. + + For full documentation refer to :obj:`numpy.column_stack`. + + Parameters + ---------- + tup : {dpnp.ndarray, usm_ndarray} + A sequence of 1-D or 2-D arrays to stack. All of them must have + the same first dimension. + + Returns + ------- + out : dpnp.ndarray + The array formed by stacking the given arrays. + + See Also + -------- + :obj:`dpnp.stack` : Join a sequence of arrays along a new axis. + :obj:`dpnp.dstack` : Stack arrays in sequence depth wise (along third axis). + :obj:`dpnp.hstack` : Stack arrays in sequence horizontally (column wise). + :obj:`dpnp.vstack` : Stack arrays in sequence vertically (row wise). + :obj:`dpnp.concatenate` : Join a sequence of arrays along an existing axis. + + Examples + -------- + >>> import dpnp as np + >>> a = np.array((1, 2, 3)) + >>> b = np.array((2, 3, 4)) + >>> np.column_stack((a, b)) + array([[1, 2], + [2, 3], + [3, 4]]) + + """ + + _check_stack_arrays(tup) + + arrays = [] + for v in tup: + dpnp.check_supported_arrays_type(v) + + if v.ndim == 1: + v = v[:, dpnp.newaxis] + elif v.ndim != 2: + raise ValueError( + "Only 1 or 2 dimensional arrays can be column stacked" + ) + + arrays.append(v) + return dpnp.concatenate(arrays, axis=1) + + def concatenate( arrays, /, *, axis=0, out=None, dtype=None, casting="same_kind" ): @@ -529,14 +669,18 @@ def copyto(dst, src, casting="same_kind", where=True): For full documentation refer to :obj:`numpy.copyto`. - Limitations - ----------- - The `dst` parameter is supported as either :class:`dpnp.ndarray` - or :class:`dpctl.tensor.usm_ndarray`. - The `where` parameter is supported as either :class:`dpnp.ndarray`, - :class:`dpctl.tensor.usm_ndarray` or scalar. - Otherwise ``TypeError`` exception will be raised. - Input array data types are limited by supported DPNP :ref:`Data types`. + Parameters + ---------- + dst : {dpnp.ndarray, usm_ndarray} + The array into which values are copied. + src : array_like + The array from which values are copied. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur when copying. + where : {dpnp.ndarray, usm_ndarray, scalar} of bool, optional + A boolean array or a scalar which is broadcasted to match + the dimensions of `dst`, and selects elements to copy + from `src` to `dst` wherever it contains the value ``True``. Examples -------- @@ -599,6 +743,65 @@ def copyto(dst, src, casting="same_kind", where=True): dst_usm[mask_usm] = src_usm[mask_usm] +def dstack(tup): + """ + Stack arrays in sequence depth wise (along third axis). + + This is equivalent to concatenation along the third axis after 2-D arrays + of shape `(M, N)` have been reshaped to `(M, N, 1)` and 1-D arrays of shape + `(N,)` have been reshaped to `(1, N, 1)`. Rebuilds arrays divided by + :obj:`dpnp.dsplit`. + + For full documentation refer to :obj:`numpy.dstack`. + + Parameters + ---------- + tup : {dpnp.ndarray, usm_ndarray} + One or more array-like sequences. The arrays must have the same shape + along all but the third axis. 1-D or 2-D arrays must have the same shape. + + Returns + ------- + out : dpnp.ndarray + The array formed by stacking the given arrays, will be at least 3-D. + + See Also + -------- + :obj:`dpnp.concatenate` : Join a sequence of arrays along an existing axis. + :obj:`dpnp.vstack` : Stack arrays in sequence vertically (row wise). + :obj:`dpnp.hstack` : Stack arrays in sequence horizontally (column wise). + :obj:`dpnp.column_stack` : Stack 1-D arrays as columns into a 2-D array. + :obj:`dpnp.stack` : Join a sequence of arrays along a new axis. + :obj:`dpnp.block` : Assemble an nd-array from nested lists of blocks. + :obj:`dpnp.dsplit` : Split array along third axis. + + Examples + -------- + >>> import dpnp as np + >>> a = np.array((1, 2, 3)) + >>> b = np.array((2, 3, 4)) + >>> np.dstack((a, b)) + array([[[1, 2], + [2, 3], + [3, 4]]]) + + >>> a = np.array([[1], [2], [3]]) + >>> b = np.array([[2], [3], [4]]) + >>> np.dstack((a, b)) + array([[[1, 2]], + [[2, 3]], + [[3, 4]]]) + + """ + + _check_stack_arrays(tup) + + arrs = atleast_3d(*tup) + if not isinstance(arrs, list): + arrs = [arrs] + return dpnp.concatenate(arrs, axis=2) + + def expand_dims(a, axis): """ Expand the shape of an array. @@ -608,19 +811,19 @@ def expand_dims(a, axis): For full documentation refer to :obj:`numpy.expand_dims`. + Parameters + ---------- + a : {dpnp.ndarray, usm_ndarray} + Input array. + axis : int or tuple of ints + Position in the expanded axes where the new axis (or axes) is placed. + Returns ------- out : dpnp.ndarray An array with the number of dimensions increased. A view is returned whenever possible. - Limitations - ----------- - Parameters `a` is supported either as :class:`dpnp.ndarray` - or :class:`dpctl.tensor.usm_ndarray`. - Input array data types are limited by supported DPNP :ref:`Data types`. - Otherwise ``TypeError`` exception will be raised. - Notes ----- If `a` has rank (i.e, number of dimensions) `N`, a valid `axis` must reside @@ -681,9 +884,9 @@ def expand_dims(a, axis): """ - dpt_array = dpnp.get_usm_ndarray(a) + usm_array = dpnp.get_usm_ndarray(a) return dpnp_array._create_from_usm_ndarray( - dpt.expand_dims(dpt_array, axis=axis) + dpt.expand_dims(usm_array, axis=axis) ) @@ -695,18 +898,22 @@ def flip(m, axis=None): For full documentation refer to :obj:`numpy.flip`. + Parameters + ---------- + m : {dpnp.ndarray, usm_ndarray} + Input array. + axis : None or int or tuple of ints, optional + Axis or axes along which to flip over. The default, + ``axis=None``, will flip over all of the axes of the input array. + If `axis` is negative it counts from the last to the first axis. + If `axis` is a tuple of ints, flipping is performed on all of the axes + specified in the tuple. + Returns ------- out : dpnp.ndarray A view of `m` with the entries of axis reversed. - Limitations - ----------- - Parameters `m` is supported either as :class:`dpnp.ndarray` - or :class:`dpctl.tensor.usm_ndarray`. - Input array data types are limited by supported DPNP :ref:`Data types`. - Otherwise ``TypeError`` exception will be raised. - See Also -------- :obj:`dpnp.flipud` : Flip an array vertically (axis=0). @@ -761,18 +968,16 @@ def fliplr(m): For full documentation refer to :obj:`numpy.fliplr`. + Parameters + ---------- + m : {dpnp.ndarray, usm_ndarray} + Input array, must be at least 2-D. + Returns ------- out : dpnp.ndarray A view of `m` with the columns reversed. - Limitations - ----------- - Parameters `m` is supported either as :class:`dpnp.ndarray` - or :class:`dpctl.tensor.usm_ndarray`. - Input array data types are limited by supported DPNP :ref:`Data types`. - Otherwise ``TypeError`` exception will be raised. - See Also -------- :obj:`dpnp.flipud` : Flip an array vertically (axis=0). @@ -813,18 +1018,16 @@ def flipud(m): For full documentation refer to :obj:`numpy.flipud`. + Parameters + ---------- + m : {dpnp.ndarray, usm_ndarray} + Input array. + Returns ------- out : dpnp.ndarray A view of `m` with the rows reversed. - Limitations - ----------- - Parameters `m` is supported either as :class:`dpnp.ndarray` - or :class:`dpctl.tensor.usm_ndarray`. - Input array data types are limited by supported DPNP :ref:`Data types`. - Otherwise ``TypeError`` exception will be raised. - See Also -------- :obj:`dpnp.fliplr` : Flip array in the left/right direction. @@ -885,33 +1088,34 @@ def hstack(tup, *, dtype=None, casting="same_kind"): :obj:`dpnp.concatenate` : Join a sequence of arrays along an existing axis. :obj:`dpnp.stack` : Join a sequence of arrays along a new axis. :obj:`dpnp.vstack` : Stack arrays in sequence vertically (row wise). + :obj:`dpnp.dstack` : Stack arrays in sequence depth wise (along third dimension). + :obj:`dpnp.column_stack` : Stack 1-D arrays as columns into a 2-D array. :obj:`dpnp.block` : Assemble an nd-array from nested lists of blocks. :obj:`dpnp.split` : Split array into a list of multiple sub-arrays of equal size. Examples -------- >>> import dpnp as np - >>> a = np.array((1,2,3)) - >>> b = np.array((4,5,6)) - >>> np.hstack((a,b)) + >>> a = np.array((1, 2, 3)) + >>> b = np.array((4, 5, 6)) + >>> np.hstack((a, b)) array([1, 2, 3, 4, 5, 6]) - >>> a = np.array([[1],[2],[3]]) - >>> b = np.array([[4],[5],[6]]) - >>> np.hstack((a,b)) + >>> a = np.array([[1], [2], [3]]) + >>> b = np.array([[4], [5], [6]]) + >>> np.hstack((a, b)) array([[1, 4], [2, 5], [3, 6]]) """ - if not hasattr(tup, "__getitem__"): - raise TypeError( - "Arrays to stack must be passed as a sequence type such as list or tuple." - ) + _check_stack_arrays(tup) + arrs = dpnp.atleast_1d(*tup) if not isinstance(arrs, list): arrs = [arrs] + # As a special case, dimension 0 of 1-dimensional arrays is "horizontal" if arrs and arrs[0].ndim == 1: return dpnp.concatenate(arrs, axis=0, dtype=dtype, casting=casting) @@ -925,20 +1129,20 @@ def moveaxis(a, source, destination): For full documentation refer to :obj:`numpy.moveaxis`. + Parameters + ---------- + a : {dpnp.ndarray, usm_ndarray} + The array whose axes should be reordered. + source : int or sequence of int + Original positions of the axes to move. These must be unique. + destination : int or sequence of int + Destination positions for each of the original axes. These must also be + unique. + Returns ------- out : dpnp.ndarray - Array with moved axes. - The returned array will have the same data and - the same USM allocation type as `a`. - - Limitations - ----------- - Parameters `a` is supported as either :class:`dpnp.ndarray` - or :class:`dpctl.tensor.usm_ndarray`. Otherwise ``TypeError`` exception - will be raised. - Input array data types are limited by supported DPNP :ref:`Data types`. - Otherwise ``TypeError`` exception will be raised. + Array with moved axes. This array is a view of the input array. See Also -------- @@ -956,9 +1160,9 @@ def moveaxis(a, source, destination): """ - dpt_array = dpnp.get_usm_ndarray(a) + usm_array = dpnp.get_usm_ndarray(a) return dpnp_array._create_from_usm_ndarray( - dpt.moveaxis(dpt_array, source, destination) + dpt.moveaxis(usm_array, source, destination) ) @@ -984,7 +1188,7 @@ def ravel(a, order="C"): Returns ------- out : dpnp_array - `out` is a contiguous 1-D array of the same subtype as `a`, with shape (a.size,) + A contiguous 1-D array of the same subtype as `a`, with shape (a.size,). See Also -------- @@ -1082,7 +1286,7 @@ def reshape(a, /, newshape, order="C", copy=None): inferred from the length of the array and remaining dimensions. order : {'C', 'F'}, optional Read the elements of `a` using this index order, and place the - elements into the reshaped array using this index order. 'C' + elements into the reshaped array using this index order. 'C' means to read / write the elements using C-like index order, with the last axis index changing fastest, back to the first axis index changing slowest. 'F' means to read / write the @@ -1100,7 +1304,7 @@ def reshape(a, /, newshape, order="C", copy=None): Returns ------- - y : dpnp.ndarray + out : dpnp.ndarray This will be a new view object if possible; otherwise, it will be a copy. Note there is no guarantee of the *memory layout* (C- or Fortran- contiguous) of the returned array. @@ -1153,7 +1357,7 @@ def result_type(*arrays_and_dtypes): Parameters ---------- - arrays_and_dtypes : list of arrays and dtypes + arrays_and_dtypes : list of {dpnp.ndarray, usm_ndarray, dtype} An arbitrary length sequence of arrays or dtypes. Returns @@ -1161,11 +1365,6 @@ def result_type(*arrays_and_dtypes): out : dtype The result type. - Limitations - ----------- - An array in the input list is supported as either :class:`dpnp.ndarray` - or :class:`dpctl.tensor.usm_ndarray`. - Examples -------- >>> import dpnp as dp @@ -1197,20 +1396,27 @@ def roll(x, shift, axis=None): For full documentation refer to :obj:`numpy.roll`. + Parameters + ---------- + a : {dpnp.ndarray, usm_ndarray} + Input array. + shift : int or tuple of ints + The number of places by which elements are shifted. If a tuple, + then `axis` must be a tuple of the same size, and each of the + given axes is shifted by the corresponding number. If an int + while `axis` is a tuple of ints, then the same value is used for + all given axes. + axis : int or tuple of ints, optional + Axis or axes along which elements are shifted. By default, the + array is flattened before shifting, after which the original + shape is restored. + Returns ------- - dpnp.ndarray + out : dpnp.ndarray An array with the same data type as `x` and whose elements, relative to `x`, are shifted. - Limitations - ----------- - Parameter `x` is supported either as :class:`dpnp.ndarray` - or :class:`dpctl.tensor.usm_ndarray`. Otherwise ``TypeError`` exception - will be raised. - Input array data types are limited by supported DPNP :ref:`Data types`. - - See Also -------- :obj:`dpnp.moveaxis` : Move array axes to new positions. @@ -1239,9 +1445,9 @@ def roll(x, shift, axis=None): """ if axis is None: return roll(x.reshape(-1), shift, 0).reshape(x.shape) - dpt_array = dpnp.get_usm_ndarray(x) + usm_array = dpnp.get_usm_ndarray(x) return dpnp_array._create_from_usm_ndarray( - dpt.roll(dpt_array, shift=shift, axis=axis) + dpt.roll(usm_array, shift=shift, axis=axis) ) @@ -1251,19 +1457,25 @@ def rollaxis(x, axis, start=0): For full documentation refer to :obj:`numpy.rollaxis`. + Parameters + ---------- + a : {dpnp.ndarray, usm_ndarray} + Input array. + axis : int + The axis to be rolled. The positions of the other axes do not + change relative to one another. + start : int, optional + When ``start <= axis``, the axis is rolled back until it lies in + this position. When ``start > axis``, the axis is rolled until it + lies before this position. The default, ``0``, results in a "complete" + roll. + Returns ------- - dpnp.ndarray + out : dpnp.ndarray An array with the same data type as `x` where the specified axis has been repositioned to the desired position. - Limitations - ----------- - Parameter `x` is supported either as :class:`dpnp.ndarray` - or :class:`dpctl.tensor.usm_ndarray`. Otherwise ``TypeError`` exception - will be raised. - Input array data types are limited by supported DPNP :ref:`Data types`. - See Also -------- :obj:`dpnp.moveaxis` : Move array axes to new positions. @@ -1294,8 +1506,8 @@ def rollaxis(x, axis, start=0): start -= 1 if axis == start: return x - dpt_array = dpnp.get_usm_ndarray(x) - return dpnp.moveaxis(dpt_array, source=axis, destination=start) + usm_array = dpnp.get_usm_ndarray(x) + return dpnp.moveaxis(usm_array, source=axis, destination=start) def shape(a): @@ -1347,22 +1559,22 @@ def squeeze(a, /, axis=None): For full documentation refer to :obj:`numpy.squeeze`. + Parameters + ---------- + a : {dpnp.ndarray, usm_ndarray} + Input data. + axis : None or int or tuple of ints, optional + Selects a subset of the entries of length one in the shape. + If an axis is selected with shape entry greater than one, + an error is raised. + Returns ------- out : dpnp.ndarray - Output array is a view, if possible, - and a copy otherwise, but with all or a subset of the - dimensions of length 1 removed. Output has the same data - type as the input, is allocated on the same device as the - input and has the same USM allocation type as the input - array `a`. - - Limitations - ----------- - Parameters `a` is supported as either :class:`dpnp.ndarray` - or :class:`dpctl.tensor.usm_ndarray`. - Input array data types are limited by supported DPNP :ref:`Data types`. - Otherwise ``TypeError`` exception will be raised. + Output array is a view, if possible, and a copy otherwise, but with all + or a subset of the dimensions of length 1 removed. Output has the same + data type as the input, is allocated on the same device as the input + and has the same USM allocation type as the input array `a`. Examples -------- @@ -1383,9 +1595,9 @@ def squeeze(a, /, axis=None): """ - dpt_array = dpnp.get_usm_ndarray(a) + usm_array = dpnp.get_usm_ndarray(a) return dpnp_array._create_from_usm_ndarray( - dpt.squeeze(dpt_array, axis=axis) + dpt.squeeze(usm_array, axis=axis) ) @@ -1418,6 +1630,10 @@ def stack(arrays, /, *, axis=0, out=None, dtype=None, casting="same_kind"): See Also -------- :obj:`dpnp.concatenate` : Join a sequence of arrays along an existing axis. + :obj:`dpnp.hstack` : Stack arrays in sequence horizontally (column wise). + :obj:`dpnp.vstack` : Stack arrays in sequence vertically (row wise). + :obj:`dpnp.dstack` : Stack arrays in sequence depth wise (along third dimension). + :obj:`dpnp.column_stack` : Stack 1-D arrays as columns into a 2-D array. :obj:`dpnp.block` : Assemble an nd-array from nested lists of blocks. :obj:`dpnp.split` : Split array into a list of multiple sub-arrays of equal size. @@ -1447,6 +1663,8 @@ def stack(arrays, /, *, axis=0, out=None, dtype=None, casting="same_kind"): """ + _check_stack_arrays(arrays) + if dtype is not None and out is not None: raise TypeError( "stack() only takes `out` or `dtype` as an argument, but both were provided." @@ -1469,19 +1687,21 @@ def swapaxes(a, axis1, axis2): For full documentation refer to :obj:`numpy.swapaxes`. + Parameters + ---------- + a : {dpnp.ndarray, usm_ndarray} + Input array. + axis1 : int + First axis. + axis2 : int + Second axis. + Returns ------- out : dpnp.ndarray An array with with swapped axes. A view is returned whenever possible. - Limitations - ----------- - Parameters `a` is supported either as :class:`dpnp.ndarray` - or :class:`dpctl.tensor.usm_ndarray`. - Input array data types are limited by supported DPNP :ref:`Data types`. - Otherwise ``TypeError`` exception will be raised. - Notes ----- If `a` has rank (i.e., number of dimensions) `N`, @@ -1510,9 +1730,9 @@ def swapaxes(a, axis1, axis2): """ - dpt_array = dpnp.get_usm_ndarray(a) + usm_array = dpnp.get_usm_ndarray(a) return dpnp_array._create_from_usm_ndarray( - dpt.swapaxes(dpt_array, axis1=axis1, axis2=axis2) + dpt.swapaxes(usm_array, axis1=axis1, axis2=axis2) ) @@ -1540,14 +1760,14 @@ def tile(A, reps): Parameters ---------- - A : dpnp.ndarray + A : {dpnp.ndarray, usm_ndarray} The input array. - reps : array_like + reps : int or tuple of ints The number of repetitions of `A` along each axis. Returns ------- - c : dpnp.ndarray + out : dpnp.ndarray The tiled output array. See Also @@ -1590,8 +1810,8 @@ def tile(A, reps): """ - dpt_array = dpnp.get_usm_ndarray(A) - return dpnp_array._create_from_usm_ndarray(dpt.tile(dpt_array, reps)) + usm_array = dpnp.get_usm_ndarray(A) + return dpnp_array._create_from_usm_ndarray(dpt.tile(usm_array, reps)) def transpose(a, axes=None): @@ -1600,16 +1820,22 @@ def transpose(a, axes=None): For full documentation refer to :obj:`numpy.transpose`. + Parameters + ---------- + a : {dpnp.ndarray, usm_ndarray} + Input array. + axes : tuple or list of ints, optional + If specified, it must be a tuple or list which contains a permutation + of [0, 1, ..., N-1] where N is the number of axes of `a`. + The `i`'th axis of the returned array will correspond to the axis + numbered ``axes[i]`` of the input. If not specified, defaults to + ``range(a.ndim)[::-1]``, which reverses the order of the axes. + Returns ------- - y : dpnp.ndarray + out : dpnp.ndarray `a` with its axes permuted. A view is returned whenever possible. - Limitations - ----------- - Input array is supported as either :class:`dpnp.ndarray` - or :class:`dpctl.tensor.usm_ndarray`. - See Also -------- :obj:`dpnp.ndarray.transpose` : Equivalent method. @@ -1680,6 +1906,9 @@ def vstack(tup, *, dtype=None, casting="same_kind"): """ Stack arrays in sequence vertically (row wise). + :obj:`dpnp.row_stack` is an alias for :obj:`dpnp.vstack`. + They are the same function. + For full documentation refer to :obj:`numpy.vstack`. Parameters @@ -1728,11 +1957,12 @@ def vstack(tup, *, dtype=None, casting="same_kind"): """ - if not hasattr(tup, "__getitem__"): - raise TypeError( - "Arrays to stack must be passed as a sequence type such as list or tuple." - ) + _check_stack_arrays(tup) + arrs = dpnp.atleast_2d(*tup) if not isinstance(arrs, list): arrs = [arrs] return dpnp.concatenate(arrs, axis=0, dtype=dtype, casting=casting) + + +row_stack = vstack diff --git a/dpnp/dpnp_iface_mathematical.py b/dpnp/dpnp_iface_mathematical.py index 53e4e619091e..20c5c922dbee 100644 --- a/dpnp/dpnp_iface_mathematical.py +++ b/dpnp/dpnp_iface_mathematical.py @@ -2119,7 +2119,7 @@ def prod( du.intel_device_info(a.sycl_device).get("device_id", 0) & 0xFF00 ) if _any_complex and device_mask in [0x3E00, 0x9B00]: - return call_origin( + res = call_origin( numpy.prod, a, axis=axis, @@ -2129,6 +2129,10 @@ def prod( initial=initial, where=where, ) + if dpnp.isscalar(res): + # numpy may return a scalar, convert it back to dpnp array + return dpnp.array(res, sycl_queue=a.sycl_queue, usm_type=a.usm_type) + return res elif initial is not None: raise NotImplementedError( "initial keyword argument is only supported with its default value." diff --git a/tests/test_arraymanipulation.py b/tests/test_arraymanipulation.py index 03ebf0bab561..69116ef86921 100644 --- a/tests/test_arraymanipulation.py +++ b/tests/test_arraymanipulation.py @@ -5,7 +5,6 @@ assert_array_equal, assert_equal, assert_raises, - assert_warns, ) import dpnp @@ -13,117 +12,130 @@ from .helper import get_all_dtypes, get_float_complex_dtypes -@pytest.mark.parametrize("dtype", get_all_dtypes()) -@pytest.mark.parametrize( - "data", [[1, 2, 3], [1.0, 2.0, 3.0]], ids=["[1, 2, 3]", "[1., 2., 3.]"] -) -def test_asfarray(dtype, data): - expected = numpy.asfarray(data, dtype) - result = dpnp.asfarray(data, dtype) +class TestAtleast1d: + def test_0D_array(self): + a = dpnp.array(1) + b = dpnp.array(2) + res = [dpnp.atleast_1d(a), dpnp.atleast_1d(b)] + desired = [dpnp.array([1]), dpnp.array([2])] + assert_array_equal(res, desired) - assert_array_equal(result, expected) + def test_1D_array(self): + a = dpnp.array([1, 2]) + b = dpnp.array([2, 3]) + res = [dpnp.atleast_1d(a), dpnp.atleast_1d(b)] + desired = [dpnp.array([1, 2]), dpnp.array([2, 3])] + assert_array_equal(res, desired) + def test_2D_array(self): + a = dpnp.array([[1, 2], [1, 2]]) + b = dpnp.array([[2, 3], [2, 3]]) + res = [dpnp.atleast_1d(a), dpnp.atleast_1d(b)] + desired = [a, b] + assert_array_equal(res, desired) -@pytest.mark.parametrize("dtype", get_all_dtypes()) -@pytest.mark.parametrize("data", [[1.0, 2.0, 3.0]], ids=["[1., 2., 3.]"]) -@pytest.mark.parametrize("data_dtype", get_all_dtypes(no_none=True)) -def test_asfarray2(dtype, data, data_dtype): - expected = numpy.asfarray(numpy.array(data, dtype=data_dtype), dtype) - result = dpnp.asfarray(dpnp.array(data, dtype=data_dtype), dtype) + def test_3D_array(self): + a = dpnp.array([[1, 2], [1, 2]]) + b = dpnp.array([[2, 3], [2, 3]]) + a = dpnp.array([a, a]) + b = dpnp.array([b, b]) + res = [dpnp.atleast_1d(a), dpnp.atleast_1d(b)] + desired = [a, b] + assert_array_equal(res, desired) - assert_array_equal(result, expected) +class TestAtleast2d: + def test_0D_array(self): + a = dpnp.array(1) + b = dpnp.array(2) + res = [dpnp.atleast_2d(a), dpnp.atleast_2d(b)] + desired = [dpnp.array([[1]]), dpnp.array([[2]])] + assert_array_equal(res, desired) -class TestDims: - @pytest.mark.parametrize("dt", get_all_dtypes()) - @pytest.mark.parametrize( - "sh", [(0,), (1,), (3,)], ids=["(0,)", "(1,)", "(3,)"] - ) - def test_broadcast_array(self, sh, dt): - np_a = numpy.array(0, dtype=dt) - dp_a = dpnp.array(0, dtype=dt) - func = lambda xp, a: xp.broadcast_to(a, sh) + def test_1D_array(self): + a = dpnp.array([1, 2]) + b = dpnp.array([2, 3]) + res = [dpnp.atleast_2d(a), dpnp.atleast_2d(b)] + desired = [dpnp.array([[1, 2]]), dpnp.array([[2, 3]])] + assert_array_equal(res, desired) - assert_allclose(func(numpy, np_a), func(dpnp, dp_a)) + def test_2D_array(self): + a = dpnp.array([[1, 2], [1, 2]]) + b = dpnp.array([[2, 3], [2, 3]]) + res = [dpnp.atleast_2d(a), dpnp.atleast_2d(b)] + desired = [a, b] + assert_array_equal(res, desired) - @pytest.mark.parametrize("dt", get_all_dtypes()) - @pytest.mark.parametrize( - "sh", [(1,), (2,), (1, 2, 3)], ids=["(1,)", "(2,)", "(1, 2, 3)"] - ) - def test_broadcast_ones(self, sh, dt): - np_a = numpy.ones(1, dtype=dt) - dp_a = dpnp.ones(1, dtype=dt) - func = lambda xp, a: xp.broadcast_to(a, sh) + def test_3D_array(self): + a = dpnp.array([[1, 2], [1, 2]]) + b = dpnp.array([[2, 3], [2, 3]]) + a = dpnp.array([a, a]) + b = dpnp.array([b, b]) + res = [dpnp.atleast_2d(a), dpnp.atleast_2d(b)] + desired = [a, b] + assert_array_equal(res, desired) - assert_allclose(func(numpy, np_a), func(dpnp, dp_a)) - @pytest.mark.parametrize("dt", get_all_dtypes(no_bool=True)) - @pytest.mark.parametrize( - "sh", [(3,), (1, 3), (2, 3)], ids=["(3,)", "(1, 3)", "(2, 3)"] - ) - def test_broadcast_arange(self, sh, dt): - np_a = numpy.arange(3, dtype=dt) - dp_a = dpnp.arange(3, dtype=dt) - func = lambda xp, a: xp.broadcast_to(a, sh) +class TestAtleast3d: + def test_0D_array(self): + a = dpnp.array(1) + b = dpnp.array(2) + res = [dpnp.atleast_3d(a), dpnp.atleast_3d(b)] + desired = [dpnp.array([[[1]]]), dpnp.array([[[2]]])] + assert_array_equal(res, desired) - assert_allclose(func(numpy, np_a), func(dpnp, dp_a)) + def test_1D_array(self): + a = dpnp.array([1, 2]) + b = dpnp.array([2, 3]) + res = [dpnp.atleast_3d(a), dpnp.atleast_3d(b)] + desired = [dpnp.array([[[1], [2]]]), dpnp.array([[[2], [3]]])] + assert_array_equal(res, desired) - @pytest.mark.parametrize("dt", get_all_dtypes()) - @pytest.mark.parametrize( - "sh1, sh2", - [ - pytest.param([0], [0], id="(0)"), - pytest.param([1], [1], id="(1)"), - pytest.param([1], [2], id="(2)"), - ], - ) - def test_broadcast_not_tuple(self, sh1, sh2, dt): - np_a = numpy.ones(sh1, dtype=dt) - dp_a = dpnp.ones(sh1, dtype=dt) - func = lambda xp, a: xp.broadcast_to(a, sh2) + def test_2D_array(self): + a = dpnp.array([[1, 2], [1, 2]]) + b = dpnp.array([[2, 3], [2, 3]]) + res = [dpnp.atleast_3d(a), dpnp.atleast_3d(b)] + desired = [a[:, :, dpnp.newaxis], b[:, :, dpnp.newaxis]] + assert_array_equal(res, desired) - assert_allclose(func(numpy, np_a), func(dpnp, dp_a)) + def test_3D_array(self): + a = dpnp.array([[1, 2], [1, 2]]) + b = dpnp.array([[2, 3], [2, 3]]) + a = dpnp.array([a, a]) + b = dpnp.array([b, b]) + res = [dpnp.atleast_3d(a), dpnp.atleast_3d(b)] + desired = [a, b] + assert_array_equal(res, desired) - @pytest.mark.parametrize("dt", get_all_dtypes()) - @pytest.mark.parametrize( - "sh1, sh2", - [ - pytest.param([1], (0,), id="(0,)"), - pytest.param((1, 2), (0, 2), id="(0, 2)"), - pytest.param((2, 1), (2, 0), id="(2, 0)"), - ], - ) - def test_broadcast_zero_shape(self, sh1, sh2, dt): - np_a = numpy.ones(sh1, dtype=dt) - dp_a = dpnp.ones(sh1, dtype=dt) - func = lambda xp, a: xp.broadcast_to(a, sh2) - assert_allclose(func(numpy, np_a), func(dpnp, dp_a)) +class TestColumnStack: + def test_non_iterable(self): + with pytest.raises(TypeError): + dpnp.column_stack(1) @pytest.mark.parametrize( - "sh1, sh2", + "data1, data2", [ - pytest.param((0,), (), id="(0,)-()"), - pytest.param((1,), (), id="(1,)-()"), - pytest.param((3,), (), id="(3,)-()"), - pytest.param((3,), (1,), id="(3,)-(1,)"), - pytest.param((3,), (2,), id="(3,)-(2,)"), - pytest.param((3,), (4,), id="(3,)-(4,)"), - pytest.param((1, 2), (2, 1), id="(1, 2)-(2, 1)"), - pytest.param((1, 2), (1,), id="(1, 2)-(1,)"), - pytest.param((1,), -1, id="(1,)--1"), - pytest.param((1,), (-1,), id="(1,)-(-1,)"), - pytest.param((1, 2), (-1, 2), id="(1, 2)-(-1, 2)"), + pytest.param((1, 2, 3), (2, 3, 4), id="1D arrays"), + pytest.param([[1], [2], [3]], [[2], [3], [4]], id="2D arrays"), ], ) - def test_broadcast_raise(self, sh1, sh2): - np_a = numpy.zeros(sh1) - dp_a = dpnp.zeros(sh1) - func = lambda xp, a: xp.broadcast_to(a, sh2) + @pytest.mark.parametrize("dtype", get_all_dtypes()) + def test_1d_2d_arrays(self, data1, data2, dtype): + np_a = numpy.array(data1, dtype=dtype) + np_b = numpy.array(data2, dtype=dtype) + dp_a = dpnp.array(np_a, dtype=dtype) + dp_b = dpnp.array(np_b, dtype=dtype) - with pytest.raises(ValueError): - func(numpy, np_a) - func(dpnp, dp_a) + np_res = numpy.column_stack((np_a, np_b)) + dp_res = dpnp.column_stack((dp_a, dp_b)) + assert_array_equal(dp_res.asnumpy(), np_res) + + def test_generator(self): + with pytest.raises(TypeError, match="arrays to stack must be"): + dpnp.column_stack((dpnp.arange(3) for _ in range(2))) + with pytest.raises(TypeError, match="arrays to stack must be"): + dpnp.column_stack(map(lambda x: x, dpnp.ones((3, 2)))) class TestConcatenate: @@ -344,6 +356,136 @@ def test_concatenate_casting(self, dtype, casting): assert_array_equal(dp_res.asnumpy(), np_res) + def test_concatenate_out_dtype(self): + x = dpnp.ones((5, 5)) + out = dpnp.empty_like(x) + with pytest.raises(TypeError): + dpnp.concatenate([x], out=out, dtype="i4") + + +class TestDims: + @pytest.mark.parametrize("dt", get_all_dtypes()) + @pytest.mark.parametrize( + "sh", [(0,), (1,), (3,)], ids=["(0,)", "(1,)", "(3,)"] + ) + def test_broadcast_array(self, sh, dt): + np_a = numpy.array(0, dtype=dt) + dp_a = dpnp.array(0, dtype=dt) + func = lambda xp, a: xp.broadcast_to(a, sh) + + assert_allclose(func(numpy, np_a), func(dpnp, dp_a)) + + @pytest.mark.parametrize("dt", get_all_dtypes()) + @pytest.mark.parametrize( + "sh", [(1,), (2,), (1, 2, 3)], ids=["(1,)", "(2,)", "(1, 2, 3)"] + ) + def test_broadcast_ones(self, sh, dt): + np_a = numpy.ones(1, dtype=dt) + dp_a = dpnp.ones(1, dtype=dt) + func = lambda xp, a: xp.broadcast_to(a, sh) + + assert_allclose(func(numpy, np_a), func(dpnp, dp_a)) + + @pytest.mark.parametrize("dt", get_all_dtypes(no_bool=True)) + @pytest.mark.parametrize( + "sh", [(3,), (1, 3), (2, 3)], ids=["(3,)", "(1, 3)", "(2, 3)"] + ) + def test_broadcast_arange(self, sh, dt): + np_a = numpy.arange(3, dtype=dt) + dp_a = dpnp.arange(3, dtype=dt) + func = lambda xp, a: xp.broadcast_to(a, sh) + + assert_allclose(func(numpy, np_a), func(dpnp, dp_a)) + + @pytest.mark.parametrize("dt", get_all_dtypes()) + @pytest.mark.parametrize( + "sh1, sh2", + [ + pytest.param([0], [0], id="(0)"), + pytest.param([1], [1], id="(1)"), + pytest.param([1], [2], id="(2)"), + ], + ) + def test_broadcast_not_tuple(self, sh1, sh2, dt): + np_a = numpy.ones(sh1, dtype=dt) + dp_a = dpnp.ones(sh1, dtype=dt) + func = lambda xp, a: xp.broadcast_to(a, sh2) + + assert_allclose(func(numpy, np_a), func(dpnp, dp_a)) + + @pytest.mark.parametrize("dt", get_all_dtypes()) + @pytest.mark.parametrize( + "sh1, sh2", + [ + pytest.param([1], (0,), id="(0,)"), + pytest.param((1, 2), (0, 2), id="(0, 2)"), + pytest.param((2, 1), (2, 0), id="(2, 0)"), + ], + ) + def test_broadcast_zero_shape(self, sh1, sh2, dt): + np_a = numpy.ones(sh1, dtype=dt) + dp_a = dpnp.ones(sh1, dtype=dt) + func = lambda xp, a: xp.broadcast_to(a, sh2) + + assert_allclose(func(numpy, np_a), func(dpnp, dp_a)) + + @pytest.mark.parametrize( + "sh1, sh2", + [ + pytest.param((0,), (), id="(0,)-()"), + pytest.param((1,), (), id="(1,)-()"), + pytest.param((3,), (), id="(3,)-()"), + pytest.param((3,), (1,), id="(3,)-(1,)"), + pytest.param((3,), (2,), id="(3,)-(2,)"), + pytest.param((3,), (4,), id="(3,)-(4,)"), + pytest.param((1, 2), (2, 1), id="(1, 2)-(2, 1)"), + pytest.param((1, 2), (1,), id="(1, 2)-(1,)"), + pytest.param((1,), -1, id="(1,)--1"), + pytest.param((1,), (-1,), id="(1,)-(-1,)"), + pytest.param((1, 2), (-1, 2), id="(1, 2)-(-1, 2)"), + ], + ) + def test_broadcast_raise(self, sh1, sh2): + np_a = numpy.zeros(sh1) + dp_a = dpnp.zeros(sh1) + func = lambda xp, a: xp.broadcast_to(a, sh2) + + with pytest.raises(ValueError): + func(numpy, np_a) + func(dpnp, dp_a) + + +class TestDstack: + def test_non_iterable(self): + with pytest.raises(TypeError): + dpnp.dstack(1) + + @pytest.mark.parametrize( + "data1, data2", + [ + pytest.param(1, 2, id="0D arrays"), + pytest.param([1], [2], id="1D arrays"), + pytest.param([[1], [2]], [[1], [2]], id="2D arrays"), + pytest.param([1, 2], [1, 2], id="1D arrays-2"), + ], + ) + @pytest.mark.parametrize("dtype", get_all_dtypes()) + def test_arrays(self, data1, data2, dtype): + np_a = numpy.array(data1, dtype=dtype) + np_b = numpy.array(data2, dtype=dtype) + dp_a = dpnp.array(np_a, dtype=dtype) + dp_b = dpnp.array(np_b, dtype=dtype) + + np_res = numpy.dstack([np_a, np_b]) + dp_res = dpnp.dstack([dp_a, dp_b]) + assert_array_equal(dp_res.asnumpy(), np_res) + + def test_generator(self): + with pytest.raises(TypeError, match="arrays to stack must be"): + dpnp.dstack((dpnp.arange(3) for _ in range(2))) + with pytest.raises(TypeError, match="arrays to stack must be"): + dpnp.dstack(map(lambda x: x, dpnp.ones((3, 2)))) + class TestHstack: def test_non_iterable(self): @@ -374,9 +516,9 @@ def test_2D_array(self): assert_array_equal(res, desired) def test_generator(self): - with pytest.raises(TypeError): + with pytest.raises(TypeError, match="arrays to stack must be"): dpnp.hstack((dpnp.arange(3) for _ in range(2))) - with pytest.raises(TypeError): + with pytest.raises(TypeError, match="arrays to stack must be"): dpnp.hstack(map(lambda x: x, dpnp.ones((3, 2)))) def test_one_element(self): @@ -385,6 +527,71 @@ def test_one_element(self): assert_array_equal(res, a) +class TestRollaxis: + data = [ + (0, 0), + (0, 1), + (0, 2), + (0, 3), + (0, 4), + (1, 0), + (1, 1), + (1, 2), + (1, 3), + (1, 4), + (2, 0), + (2, 1), + (2, 2), + (2, 3), + (2, 4), + (3, 0), + (3, 1), + (3, 2), + (3, 3), + (3, 4), + ] + + @pytest.mark.parametrize( + ("axis", "start"), + [ + (-5, 0), + (0, -5), + (4, 0), + (0, 5), + ], + ) + def test_exceptions(self, axis, start): + a = dpnp.arange(1 * 2 * 3 * 4).reshape(1, 2, 3, 4) + assert_raises(ValueError, dpnp.rollaxis, a, axis, start) + + def test_results(self): + np_a = numpy.arange(1 * 2 * 3 * 4).reshape(1, 2, 3, 4) + dp_a = dpnp.array(np_a) + for i, j in self.data: + # positive axis, positive start + res = dpnp.rollaxis(dp_a, axis=i, start=j) + exp = numpy.rollaxis(np_a, axis=i, start=j) + assert res.shape == exp.shape + + # negative axis, positive start + ip = i + 1 + res = dpnp.rollaxis(dp_a, axis=-ip, start=j) + exp = numpy.rollaxis(np_a, axis=-ip, start=j) + assert res.shape == exp.shape + + # positive axis, negative start + jp = j + 1 if j < 4 else j + res = dpnp.rollaxis(dp_a, axis=i, start=-jp) + exp = numpy.rollaxis(np_a, axis=i, start=-jp) + assert res.shape == exp.shape + + # negative axis, negative start + ip = i + 1 + jp = j + 1 if j < 4 else j + res = dpnp.rollaxis(dp_a, axis=-ip, start=-jp) + exp = numpy.rollaxis(np_a, axis=-ip, start=-jp) + + class TestStack: def test_non_iterable_input(self): with pytest.raises(TypeError): @@ -573,6 +780,18 @@ def test_invalid_casting_dtype(self, arr_dtype, dtype): dtype=dtype, ) + def test_stack_out_dtype(self): + x = dpnp.ones((5, 5)) + out = dpnp.empty_like(x) + with pytest.raises(TypeError): + dpnp.stack([x], out=out, dtype="i4") + + def test_generator(self): + with pytest.raises(TypeError, match="arrays to stack must be"): + dpnp.stack((dpnp.arange(3) for _ in range(2))) + with pytest.raises(TypeError, match="arrays to stack must be"): + dpnp.stack(map(lambda x: x, dpnp.ones((3, 2)))) + class TestVstack: def test_non_iterable(self): @@ -610,169 +829,31 @@ def test_2D_array2(self): assert_array_equal(res, desired) def test_generator(self): - with pytest.raises(TypeError): + with pytest.raises(TypeError, match="arrays to stack must be"): dpnp.vstack((dpnp.arange(3) for _ in range(2))) + with pytest.raises(TypeError, match="arrays to stack must be"): + dpnp.vstack(map(lambda x: x, dpnp.ones((3, 2)))) -class TestAtleast1d: - def test_0D_array(self): - a = dpnp.array(1) - b = dpnp.array(2) - res = [dpnp.atleast_1d(a), dpnp.atleast_1d(b)] - desired = [dpnp.array([1]), dpnp.array([2])] - assert_array_equal(res, desired) - - def test_1D_array(self): - a = dpnp.array([1, 2]) - b = dpnp.array([2, 3]) - res = [dpnp.atleast_1d(a), dpnp.atleast_1d(b)] - desired = [dpnp.array([1, 2]), dpnp.array([2, 3])] - assert_array_equal(res, desired) - - def test_2D_array(self): - a = dpnp.array([[1, 2], [1, 2]]) - b = dpnp.array([[2, 3], [2, 3]]) - res = [dpnp.atleast_1d(a), dpnp.atleast_1d(b)] - desired = [a, b] - assert_array_equal(res, desired) - - def test_3D_array(self): - a = dpnp.array([[1, 2], [1, 2]]) - b = dpnp.array([[2, 3], [2, 3]]) - a = dpnp.array([a, a]) - b = dpnp.array([b, b]) - res = [dpnp.atleast_1d(a), dpnp.atleast_1d(b)] - desired = [a, b] - assert_array_equal(res, desired) - - -class TestRollaxis: - data = [ - (0, 0), - (0, 1), - (0, 2), - (0, 3), - (0, 4), - (1, 0), - (1, 1), - (1, 2), - (1, 3), - (1, 4), - (2, 0), - (2, 1), - (2, 2), - (2, 3), - (2, 4), - (3, 0), - (3, 1), - (3, 2), - (3, 3), - (3, 4), - ] - - @pytest.mark.parametrize( - ("axis", "start"), - [ - (-5, 0), - (0, -5), - (4, 0), - (0, 5), - ], - ) - def test_exceptions(self, axis, start): - a = dpnp.arange(1 * 2 * 3 * 4).reshape(1, 2, 3, 4) - assert_raises(ValueError, dpnp.rollaxis, a, axis, start) - - def test_results(self): - np_a = numpy.arange(1 * 2 * 3 * 4).reshape(1, 2, 3, 4) - dp_a = dpnp.array(np_a) - for i, j in self.data: - # positive axis, positive start - res = dpnp.rollaxis(dp_a, axis=i, start=j) - exp = numpy.rollaxis(np_a, axis=i, start=j) - assert res.shape == exp.shape - - # negative axis, positive start - ip = i + 1 - res = dpnp.rollaxis(dp_a, axis=-ip, start=j) - exp = numpy.rollaxis(np_a, axis=-ip, start=j) - assert res.shape == exp.shape - - # positive axis, negative start - jp = j + 1 if j < 4 else j - res = dpnp.rollaxis(dp_a, axis=i, start=-jp) - exp = numpy.rollaxis(np_a, axis=i, start=-jp) - assert res.shape == exp.shape - - # negative axis, negative start - ip = i + 1 - jp = j + 1 if j < 4 else j - res = dpnp.rollaxis(dp_a, axis=-ip, start=-jp) - exp = numpy.rollaxis(np_a, axis=-ip, start=-jp) - - -class TestAtleast2d: - def test_0D_array(self): - a = dpnp.array(1) - b = dpnp.array(2) - res = [dpnp.atleast_2d(a), dpnp.atleast_2d(b)] - desired = [dpnp.array([[1]]), dpnp.array([[2]])] - assert_array_equal(res, desired) - - def test_1D_array(self): - a = dpnp.array([1, 2]) - b = dpnp.array([2, 3]) - res = [dpnp.atleast_2d(a), dpnp.atleast_2d(b)] - desired = [dpnp.array([[1, 2]]), dpnp.array([[2, 3]])] - assert_array_equal(res, desired) - - def test_2D_array(self): - a = dpnp.array([[1, 2], [1, 2]]) - b = dpnp.array([[2, 3], [2, 3]]) - res = [dpnp.atleast_2d(a), dpnp.atleast_2d(b)] - desired = [a, b] - assert_array_equal(res, desired) - - def test_3D_array(self): - a = dpnp.array([[1, 2], [1, 2]]) - b = dpnp.array([[2, 3], [2, 3]]) - a = dpnp.array([a, a]) - b = dpnp.array([b, b]) - res = [dpnp.atleast_2d(a), dpnp.atleast_2d(b)] - desired = [a, b] - assert_array_equal(res, desired) - +@pytest.mark.parametrize("dtype", get_all_dtypes()) +@pytest.mark.parametrize( + "data", [[1, 2, 3], [1.0, 2.0, 3.0]], ids=["[1, 2, 3]", "[1., 2., 3.]"] +) +def test_asfarray(dtype, data): + expected = numpy.asfarray(data, dtype) + result = dpnp.asfarray(data, dtype) -class TestAtleast3d: - def test_0D_array(self): - a = dpnp.array(1) - b = dpnp.array(2) - res = [dpnp.atleast_3d(a), dpnp.atleast_3d(b)] - desired = [dpnp.array([[[1]]]), dpnp.array([[[2]]])] - assert_array_equal(res, desired) + assert_array_equal(result, expected) - def test_1D_array(self): - a = dpnp.array([1, 2]) - b = dpnp.array([2, 3]) - res = [dpnp.atleast_3d(a), dpnp.atleast_3d(b)] - desired = [dpnp.array([[[1], [2]]]), dpnp.array([[[2], [3]]])] - assert_array_equal(res, desired) - def test_2D_array(self): - a = dpnp.array([[1, 2], [1, 2]]) - b = dpnp.array([[2, 3], [2, 3]]) - res = [dpnp.atleast_3d(a), dpnp.atleast_3d(b)] - desired = [a[:, :, dpnp.newaxis], b[:, :, dpnp.newaxis]] - assert_array_equal(res, desired) +@pytest.mark.parametrize("dtype", get_all_dtypes()) +@pytest.mark.parametrize("data", [[1.0, 2.0, 3.0]], ids=["[1., 2., 3.]"]) +@pytest.mark.parametrize("data_dtype", get_all_dtypes(no_none=True)) +def test_asfarray2(dtype, data, data_dtype): + expected = numpy.asfarray(numpy.array(data, dtype=data_dtype), dtype) + result = dpnp.asfarray(dpnp.array(data, dtype=data_dtype), dtype) - def test_3D_array(self): - a = dpnp.array([[1, 2], [1, 2]]) - b = dpnp.array([[2, 3], [2, 3]]) - a = dpnp.array([a, a]) - b = dpnp.array([b, b]) - res = [dpnp.atleast_3d(a), dpnp.atleast_3d(b)] - desired = [a, b] - assert_array_equal(res, desired) + assert_array_equal(result, expected) def assert_broadcast_correct(input_shapes): @@ -1045,17 +1126,3 @@ def test_repeat_strided_repeats(): res = dpnp.repeat(x, reps) assert res.shape == x.shape assert dpnp.all(res == x) - - -def test_concatenate_out_dtype(): - x = dpnp.ones((5, 5)) - out = dpnp.empty_like(x) - with pytest.raises(TypeError): - dpnp.concatenate([x], out=out, dtype="i4") - - -def test_stack_out_dtype(): - x = dpnp.ones((5, 5)) - out = dpnp.empty_like(x) - with pytest.raises(TypeError): - dpnp.stack([x], out=out, dtype="i4") diff --git a/tests/test_sycl_queue.py b/tests/test_sycl_queue.py index 31196e2b3dae..855ae2651dc2 100644 --- a/tests/test_sycl_queue.py +++ b/tests/test_sycl_queue.py @@ -1290,6 +1290,38 @@ def test_broadcast_to(device): assert_sycl_queue_equal(x.sycl_queue, y.sycl_queue) +@pytest.mark.parametrize( + "func,data1,data2", + [ + pytest.param("column_stack", (1, 2, 3), (2, 3, 4)), + pytest.param("concatenate", [[1, 2], [3, 4]], [[5, 6]]), + pytest.param("dstack", [[1], [2], [3]], [[2], [3], [4]]), + pytest.param("hstack", (1, 2, 3), (4, 5, 6)), + pytest.param("row_stack", [[7], [1], [2], [3]], [[2], [3], [9], [4]]), + pytest.param("stack", [1, 2, 3], [4, 5, 6]), + pytest.param("vstack", [0, 1, 2, 3], [4, 5, 6, 7]), + ], +) +@pytest.mark.parametrize( + "device", + valid_devices, + ids=[device.filter_string for device in valid_devices], +) +def test_concat_stack(func, data1, data2, device): + x1_orig = numpy.array(data1) + x2_orig = numpy.array(data2) + expected = getattr(numpy, func)((x1_orig, x2_orig)) + + x1 = dpnp.array(data1, device=device) + x2 = dpnp.array(data2, device=device) + result = getattr(dpnp, func)((x1, x2)) + + assert_allclose(result, expected) + + assert_sycl_queue_equal(result.sycl_queue, x1.sycl_queue) + assert_sycl_queue_equal(result.sycl_queue, x2.sycl_queue) + + @pytest.mark.parametrize( "device_x", valid_devices, diff --git a/tests/test_usm_type.py b/tests/test_usm_type.py index c69671e5bb0b..9ceb04c59a09 100644 --- a/tests/test_usm_type.py +++ b/tests/test_usm_type.py @@ -480,6 +480,30 @@ def test_broadcast_to(usm_type): assert x.usm_type == y.usm_type +@pytest.mark.parametrize( + "func,data1,data2", + [ + pytest.param("column_stack", (1, 2, 3), (2, 3, 4)), + pytest.param("concatenate", [[1, 2], [3, 4]], [[5, 6]]), + pytest.param("dstack", [[1], [2], [3]], [[2], [3], [4]]), + pytest.param("hstack", (1, 2, 3), (4, 5, 6)), + pytest.param("row_stack", [[7], [1], [2], [3]], [[2], [3], [9], [4]]), + pytest.param("stack", [1, 2, 3], [4, 5, 6]), + pytest.param("vstack", [0, 1, 2, 3], [4, 5, 6, 7]), + ], +) +@pytest.mark.parametrize("usm_type_x", list_of_usm_types, ids=list_of_usm_types) +@pytest.mark.parametrize("usm_type_y", list_of_usm_types, ids=list_of_usm_types) +def test_concat_stack(func, data1, data2, usm_type_x, usm_type_y): + x = dp.array(data1, usm_type=usm_type_x) + y = dp.array(data2, usm_type=usm_type_y) + z = getattr(dp, func)((x, y)) + + assert x.usm_type == usm_type_x + assert y.usm_type == usm_type_y + assert z.usm_type == du.get_coerced_usm_type([usm_type_x, usm_type_y]) + + @pytest.mark.parametrize("func", ["take", "take_along_axis"]) @pytest.mark.parametrize("usm_type_x", list_of_usm_types, ids=list_of_usm_types) @pytest.mark.parametrize( diff --git a/tests/third_party/cupy/manipulation_tests/test_join.py b/tests/third_party/cupy/manipulation_tests/test_join.py index 6f8eb33fa27d..72340355c19d 100644 --- a/tests/third_party/cupy/manipulation_tests/test_join.py +++ b/tests/third_party/cupy/manipulation_tests/test_join.py @@ -1,38 +1,33 @@ -import unittest - import numpy import pytest import dpnp as cupy +from tests.helper import has_support_aspect64 from tests.third_party.cupy import testing -class TestJoin(unittest.TestCase): - @pytest.mark.skip("dpnp.column_stack() is not implemented yet") +class TestJoin: @testing.for_all_dtypes(name="dtype1") @testing.for_all_dtypes(name="dtype2") - @testing.numpy_cupy_array_equal() + @testing.numpy_cupy_array_equal(type_check=has_support_aspect64()) def test_column_stack(self, xp, dtype1, dtype2): a = testing.shaped_arange((4, 3), xp, dtype1) b = testing.shaped_arange((4,), xp, dtype2) c = testing.shaped_arange((4, 2), xp, dtype1) return xp.column_stack((a, b, c)) - @pytest.mark.skip("dpnp.column_stack() is not implemented yet") def test_column_stack_wrong_ndim1(self): a = cupy.zeros(()) b = cupy.zeros((3,)) with pytest.raises(ValueError): cupy.column_stack((a, b)) - @pytest.mark.skip("dpnp.column_stack() is not implemented yet") def test_column_stack_wrong_ndim2(self): a = cupy.zeros((3, 2, 3)) b = cupy.zeros((3, 2)) with pytest.raises(ValueError): cupy.column_stack((a, b)) - @pytest.mark.skip("dpnp.column_stack() is not implemented yet") def test_column_stack_wrong_shape(self): a = cupy.zeros((3, 2)) b = cupy.zeros((4, 3)) @@ -87,9 +82,8 @@ def test_concatenate_large_4(self, xp, dtype): b = testing.shaped_reverse_arange((2, 3, 4), xp, dtype) return xp.concatenate((a, b) * 10, axis=-1) - @pytest.mark.skip("TODO: remove once dpctl #1325 is resolved") @testing.for_all_dtypes(name="dtype") - @testing.numpy_cupy_array_equal() + @testing.numpy_cupy_array_equal(type_check=has_support_aspect64()) def test_concatenate_large_5(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) b = testing.shaped_reverse_arange((2, 3, 4), xp, "i") @@ -113,8 +107,8 @@ def test_concatenate_large_f_contiguous(self, xp, dtype): e = testing.shaped_arange((2, 3, 2), xp, dtype) return xp.concatenate((a, b, c, d, e) * 2, axis=-1) - @pytest.mark.skip("TODO: remove once dpctl #1325 is resolved") - @testing.numpy_cupy_array_equal() + @pytest.mark.skip(reason="lead to crash due to reported issue in OCL RT") + @testing.numpy_cupy_array_equal(type_check=has_support_aspect64()) def test_concatenate_many_multi_dtype(self, xp): a = testing.shaped_arange((2, 1), xp, "i") b = testing.shaped_arange((2, 1), xp, "f") @@ -191,9 +185,8 @@ def test_concatenate_out_invalid_dtype(self): with pytest.raises(TypeError): xp.concatenate((a, b, c), axis=1, out=out) - @pytest.mark.skip("TODO: remove once dpctl #1325 is resolved") @testing.for_all_dtypes_combination(names=["dtype1", "dtype2"]) - @testing.numpy_cupy_array_equal() + @testing.numpy_cupy_array_equal(type_check=has_support_aspect64()) def test_concatenate_different_dtype(self, xp, dtype1, dtype2): a = testing.shaped_arange((3, 4), xp, dtype1) b = testing.shaped_arange((3, 4), xp, dtype2) @@ -235,7 +228,6 @@ def test_concatenate_casting(self, xp, dtype1, dtype2, casting): b = testing.shaped_arange((3, 4), xp, dtype1) return xp.concatenate((a, b), dtype=dtype2, casting=casting) - @pytest.mark.skip("dpnp.dstack() is not implemented yet") @testing.numpy_cupy_array_equal() def test_dstack(self, xp): a = testing.shaped_arange((1, 3, 2), xp) @@ -243,19 +235,16 @@ def test_dstack(self, xp): c = testing.shaped_arange((1, 3), xp) return xp.dstack((a, b, c)) - @pytest.mark.skip("dpnp.dstack() is not implemented yet") @testing.numpy_cupy_array_equal() def test_dstack_single_element(self, xp): a = testing.shaped_arange((1, 2, 3), xp) return xp.dstack((a,)) - @pytest.mark.skip("dpnp.dstack() is not implemented yet") @testing.numpy_cupy_array_equal() def test_dstack_single_element_2(self, xp): a = testing.shaped_arange((1, 2), xp) return xp.dstack((a,)) - @pytest.mark.skip("dpnp.dstack() is not implemented yet") @testing.numpy_cupy_array_equal() def test_dstack_single_element_3(self, xp): a = testing.shaped_arange((1,), xp) @@ -473,33 +462,30 @@ def test_stack_dtype(self, xp, dtype1, dtype2): def test_stack_casting(self, xp, dtype1, dtype2, casting): a = testing.shaped_arange((3, 4), xp, dtype1) b = testing.shaped_arange((3, 4), xp, dtype1) + # may raise TypeError or ComplexWarning return xp.stack((a, b), dtype=dtype2, casting=casting) - @pytest.mark.skip("dpnp.row_stack() is not implemented yet") @testing.for_all_dtypes(name="dtype1") @testing.for_all_dtypes(name="dtype2") - @testing.numpy_cupy_array_equal() + @testing.numpy_cupy_array_equal(type_check=has_support_aspect64()) def test_row_stack(self, xp, dtype1, dtype2): a = testing.shaped_arange((4, 3), xp, dtype1) b = testing.shaped_arange((3,), xp, dtype2) c = testing.shaped_arange((2, 3), xp, dtype1) return xp.row_stack((a, b, c)) - @pytest.mark.skip("dpnp.row_stack() is not implemented yet") def test_row_stack_wrong_ndim1(self): a = cupy.zeros(()) b = cupy.zeros((3,)) with pytest.raises(ValueError): cupy.row_stack((a, b)) - @pytest.mark.skip("dpnp.row_stack() is not implemented yet") def test_row_stack_wrong_ndim2(self): a = cupy.zeros((3, 2, 3)) b = cupy.zeros((3, 2)) with pytest.raises(ValueError): cupy.row_stack((a, b)) - @pytest.mark.skip("dpnp.row_stack() is not implemented yet") def test_row_stack_wrong_shape(self): a = cupy.zeros((3, 2)) b = cupy.zeros((4, 3)) From 20513fb1516fb5b76226daeb7826231ea67e7100 Mon Sep 17 00:00:00 2001 From: vtavana <120411540+vtavana@users.noreply.github.com> Date: Fri, 22 Dec 2023 12:55:43 -0600 Subject: [PATCH 37/38] implement `dpnp.logsumexp` and `dpnp.reduce_hypot` (#1648) * implement logsumexp and reduce_hypot * fix pre-commit * address comments --- doc/reference/math.rst | 2 + dpnp/dpnp_iface_trigonometric.py | 157 +++++++++++++++++++++++++++++++ tests/helper.py | 13 ++- tests/test_mathematical.py | 84 +++++++++++++++++ tests/test_strides.py | 34 +++++-- tests/test_sycl_queue.py | 32 +++++++ tests/test_usm_type.py | 6 +- 7 files changed, 314 insertions(+), 14 deletions(-) diff --git a/doc/reference/math.rst b/doc/reference/math.rst index 7eb6534fa799..430ce5b6de67 100644 --- a/doc/reference/math.rst +++ b/doc/reference/math.rst @@ -23,6 +23,7 @@ Trigonometric functions dpnp.unwrap dpnp.deg2rad dpnp.rad2deg + dpnp.reduce_hypot Hyperbolic functions @@ -94,6 +95,7 @@ Exponents and logarithms dpnp.log1p dpnp.logaddexp dpnp.logaddexp2 + dpnp.logsumexp Other special functions diff --git a/dpnp/dpnp_iface_trigonometric.py b/dpnp/dpnp_iface_trigonometric.py index 5b6447831dfa..fd5e4c1407e6 100644 --- a/dpnp/dpnp_iface_trigonometric.py +++ b/dpnp/dpnp_iface_trigonometric.py @@ -40,10 +40,12 @@ """ +import dpctl.tensor as dpt import numpy import dpnp from dpnp.dpnp_algo import * +from dpnp.dpnp_array import dpnp_array from dpnp.dpnp_utils import * from .dpnp_algo.dpnp_elementwise_common import ( @@ -98,9 +100,11 @@ "log1p", "log2", "logaddexp", + "logsumexp", "rad2deg", "radians", "reciprocal", + "reduce_hypot", "rsqrt", "sin", "sinh", @@ -989,6 +993,10 @@ def hypot( Otherwise the function will be executed sequentially on CPU. Input array data types are limited by supported real-valued data types. + See Also + -------- + :obj:`dpnp.reduce_hypot` : The square root of the sum of squares of elements in the input array. + Examples -------- >>> import dpnp as np @@ -1303,6 +1311,7 @@ def logaddexp( -------- :obj:`dpnp.log` : Natural logarithm, element-wise. :obj:`dpnp.exp` : Exponential, element-wise. + :obj:`dpnp.logsumdexp` : Logarithm of the sum of exponentials of elements in the input array. Examples -------- @@ -1331,6 +1340,81 @@ def logaddexp( ) +def logsumexp(x, axis=None, out=None, dtype=None, keepdims=False): + """ + Calculates the logarithm of the sum of exponentials of elements in the input array. + + Parameters + ---------- + x : {dpnp_array, usm_ndarray} + Input array, expected to have a real-valued data type. + axis : int or tuple of ints, optional + Axis or axes along which values must be computed. If a tuple + of unique integers, values are computed over multiple axes. + If ``None``, the result is computed over the entire array. + Default: ``None``. + out : {dpnp_array, usm_ndarray}, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + dtype : data type, optional + Data type of the returned array. If ``None``, the default data + type is inferred from the "kind" of the input array data type. + * If `x` has a real-valued floating-point data type, + the returned array will have the default real-valued + floating-point data type for the device where input + array `x` is allocated. + * If `x` has a boolean or integral data type, the returned array + will have the default floating point data type for the device + where input array `x` is allocated. + * If `x` has a complex-valued floating-point data type, + an error is raised. + If the data type (either specified or resolved) differs from the + data type of `x`, the input array elements are cast to the + specified data type before computing the result. Default: ``None``. + keepdims : bool + If ``True``, the reduced axes (dimensions) are included in the result + as singleton dimensions, so that the returned array remains + compatible with the input arrays according to Array Broadcasting + rules. Otherwise, if ``False``, the reduced axes are not included in + the returned array. Default: ``False``. + + Returns + ------- + out : dpnp.ndarray + An array containing the results. If the result was computed over + the entire array, a zero-dimensional array is returned. The returned + array has the data type as described in the `dtype` parameter + description above. + + Note + ---- + This function is equivalent of `numpy.logaddexp.reduce`. + + See Also + -------- + :obj:`dpnp.log` : Natural logarithm, element-wise. + :obj:`dpnp.exp` : Exponential, element-wise. + :obj:`dpnp.logaddexp` : Logarithm of the sum of exponentiations of the inputs, element-wise. + + Examples + -------- + >>> import dpnp as np + >>> a = np.ones(10) + >>> np.logsumexp(a) + array(3.30258509) + >>> np.log(np.sum(np.exp(a))) + array(3.30258509) + + """ + + dpt_array = dpnp.get_usm_ndarray(x) + result = dpnp_array._create_from_usm_ndarray( + dpt.logsumexp(dpt_array, axis=axis, dtype=dtype, keepdims=keepdims) + ) + + return dpnp.get_result_array(result, out, casting="same_kind") + + def reciprocal(x1, **kwargs): """ Return the reciprocal of the argument, element-wise. @@ -1363,6 +1447,79 @@ def reciprocal(x1, **kwargs): return call_origin(numpy.reciprocal, x1, **kwargs) +def reduce_hypot(x, axis=None, out=None, dtype=None, keepdims=False): + """ + Calculates the square root of the sum of squares of elements in the input array. + + Parameters + ---------- + x : {dpnp_array, usm_ndarray} + Input array, expected to have a real-valued data type. + axis : int or tuple of ints, optional + Axis or axes along which values must be computed. If a tuple + of unique integers, values are computed over multiple axes. + If ``None``, the result is computed over the entire array. + Default: ``None``. + out : {dpnp_array, usm_ndarray}, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + dtype : data type, optional + Data type of the returned array. If ``None``, the default data + type is inferred from the "kind" of the input array data type. + * If `x` has a real-valued floating-point data type, + the returned array will have the default real-valued + floating-point data type for the device where input + array `x` is allocated. + * If `x` has a boolean or integral data type, the returned array + will have the default floating point data type for the device + where input array `x` is allocated. + * If `x` has a complex-valued floating-point data type, + an error is raised. + If the data type (either specified or resolved) differs from the + data type of `x`, the input array elements are cast to the + specified data type before computing the result. Default: ``None``. + keepdims : bool + If ``True``, the reduced axes (dimensions) are included in the result + as singleton dimensions, so that the returned array remains + compatible with the input arrays according to Array Broadcasting + rules. Otherwise, if ``False``, the reduced axes are not included in + the returned array. Default: ``False``. + + Returns + ------- + out : dpnp.ndarray + An array containing the results. If the result was computed over + the entire array, a zero-dimensional array is returned. The returned + array has the data type as described in the `dtype` parameter + description above. + + Note + ---- + This function is equivalent of `numpy.hypot.reduce`. + + See Also + -------- + :obj:`dpnp.hypot` : Given the "legs" of a right triangle, return its hypotenuse. + + Examples + -------- + >>> import dpnp as np + >>> a = np.ones(10) + >>> np.reduce_hypot(a) + array(3.16227766) + >>> np.sqrt(np.sum(np.square(a))) + array(3.16227766) + + """ + + dpt_array = dpnp.get_usm_ndarray(x) + result = dpnp_array._create_from_usm_ndarray( + dpt.reduce_hypot(dpt_array, axis=axis, dtype=dtype, keepdims=keepdims) + ) + + return dpnp.get_result_array(result, out, casting="same_kind") + + def rsqrt( x, /, diff --git a/tests/helper.py b/tests/helper.py index 8fa26116756d..aac6b51a1c62 100644 --- a/tests/helper.py +++ b/tests/helper.py @@ -34,10 +34,17 @@ def assert_dtype_allclose( list_64bit_types = [numpy.float64, numpy.complex128] is_inexact = lambda x: dpnp.issubdtype(x.dtype, dpnp.inexact) if is_inexact(dpnp_arr) or is_inexact(numpy_arr): - tol = 8 * max( - dpnp.finfo(dpnp_arr).resolution, - numpy.finfo(numpy_arr.dtype).resolution, + tol_dpnp = ( + dpnp.finfo(dpnp_arr).resolution + if is_inexact(dpnp_arr) + else -dpnp.inf ) + tol_numpy = ( + numpy.finfo(numpy_arr.dtype).resolution + if is_inexact(numpy_arr) + else -dpnp.inf + ) + tol = 8 * max(tol_dpnp, tol_numpy) assert_allclose(dpnp_arr.asnumpy(), numpy_arr, atol=tol, rtol=tol) if check_type: numpy_arr_dtype = numpy_arr.dtype diff --git a/tests/test_mathematical.py b/tests/test_mathematical.py index af3e5fd79fef..15ca60908681 100644 --- a/tests/test_mathematical.py +++ b/tests/test_mathematical.py @@ -1752,6 +1752,90 @@ def test_invalid_out(self, out): assert_raises(TypeError, numpy.hypot, a.asnumpy(), 2, out) +class TestLogSumExp: + @pytest.mark.parametrize("dtype", get_all_dtypes(no_complex=True)) + @pytest.mark.parametrize("axis", [None, 2, -1, (0, 1)]) + @pytest.mark.parametrize("keepdims", [True, False]) + def test_logsumexp(self, dtype, axis, keepdims): + a = dpnp.ones((3, 4, 5, 6, 7), dtype=dtype) + res = dpnp.logsumexp(a, axis=axis, keepdims=keepdims) + exp_dtype = dpnp.default_float_type(a.device) + exp = numpy.logaddexp.reduce( + dpnp.asnumpy(a), axis=axis, keepdims=keepdims, dtype=exp_dtype + ) + + assert_dtype_allclose(res, exp) + + @pytest.mark.parametrize("dtype", get_all_dtypes(no_complex=True)) + @pytest.mark.parametrize("axis", [None, 2, -1, (0, 1)]) + @pytest.mark.parametrize("keepdims", [True, False]) + def test_logsumexp_out(self, dtype, axis, keepdims): + a = dpnp.ones((3, 4, 5, 6, 7), dtype=dtype) + exp_dtype = dpnp.default_float_type(a.device) + exp = numpy.logaddexp.reduce( + dpnp.asnumpy(a), axis=axis, keepdims=keepdims, dtype=exp_dtype + ) + dpnp_out = dpnp.empty(exp.shape, dtype=exp_dtype) + res = dpnp.logsumexp(a, axis=axis, out=dpnp_out, keepdims=keepdims) + + assert res is dpnp_out + assert_dtype_allclose(res, exp) + + @pytest.mark.parametrize( + "in_dtype", get_all_dtypes(no_bool=True, no_complex=True) + ) + @pytest.mark.parametrize("out_dtype", get_all_dtypes(no_bool=True)) + def test_logsumexp_dtype(self, in_dtype, out_dtype): + a = dpnp.ones(100, dtype=in_dtype) + res = dpnp.logsumexp(a, dtype=out_dtype) + exp = numpy.logaddexp.reduce(dpnp.asnumpy(a)) + exp = exp.astype(out_dtype) + + assert_allclose(res, exp, rtol=1e-06) + + +class TestReduceHypot: + @pytest.mark.parametrize("dtype", get_all_dtypes(no_complex=True)) + @pytest.mark.parametrize("axis", [None, 2, -1, (0, 1)]) + @pytest.mark.parametrize("keepdims", [True, False]) + def test_reduce_hypot(self, dtype, axis, keepdims): + a = dpnp.ones((3, 4, 5, 6, 7), dtype=dtype) + res = dpnp.reduce_hypot(a, axis=axis, keepdims=keepdims) + exp_dtype = dpnp.default_float_type(a.device) + exp = numpy.hypot.reduce( + dpnp.asnumpy(a), axis=axis, keepdims=keepdims, dtype=exp_dtype + ) + + assert_dtype_allclose(res, exp) + + @pytest.mark.parametrize("dtype", get_all_dtypes(no_complex=True)) + @pytest.mark.parametrize("axis", [None, 2, -1, (0, 1)]) + @pytest.mark.parametrize("keepdims", [True, False]) + def test_reduce_hypot_out(self, dtype, axis, keepdims): + a = dpnp.ones((3, 4, 5, 6, 7), dtype=dtype) + exp_dtype = dpnp.default_float_type(a.device) + exp = numpy.hypot.reduce( + dpnp.asnumpy(a), axis=axis, keepdims=keepdims, dtype=exp_dtype + ) + dpnp_out = dpnp.empty(exp.shape, dtype=exp_dtype) + res = dpnp.reduce_hypot(a, axis=axis, out=dpnp_out, keepdims=keepdims) + + assert res is dpnp_out + assert_dtype_allclose(res, exp) + + @pytest.mark.parametrize( + "in_dtype", get_all_dtypes(no_bool=True, no_complex=True) + ) + @pytest.mark.parametrize("out_dtype", get_all_dtypes(no_bool=True)) + def test_reduce_hypot_dtype(self, in_dtype, out_dtype): + a = dpnp.ones(99, dtype=in_dtype) + res = dpnp.reduce_hypot(a, dtype=out_dtype) + exp = numpy.hypot.reduce(dpnp.asnumpy(a)) + exp = exp.astype(out_dtype) + + assert_allclose(res, exp, rtol=1e-06) + + class TestMaximum: @pytest.mark.parametrize("dtype", get_all_dtypes(no_none=True)) def test_maximum(self, dtype): diff --git a/tests/test_strides.py b/tests/test_strides.py index 071c92a8a35c..903341f10006 100644 --- a/tests/test_strides.py +++ b/tests/test_strides.py @@ -6,7 +6,7 @@ import dpnp -from .helper import get_all_dtypes +from .helper import assert_dtype_allclose, get_all_dtypes def _getattr(ex, str_): @@ -99,17 +99,33 @@ def test_strides_1arg(func_name, dtype, shape): @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True, no_complex=True)) -def test_strides_rsqrt(dtype): - a = numpy.arange(1, 11, dtype=dtype) - b = a[::2] +def test_rsqrt(dtype): + a = numpy.arange(1, 11, dtype=dtype)[::2] + dpa = dpnp.arange(1, 11, dtype=dtype)[::2] - dpa = dpnp.arange(1, 11, dtype=dtype) - dpb = dpa[::2] + result = dpnp.rsqrt(dpa) + expected = 1 / numpy.sqrt(a) + assert_dtype_allclose(result, expected) - result = dpnp.rsqrt(dpb) - expected = 1 / numpy.sqrt(b) - assert_allclose(result, expected, rtol=1e-06) +@pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True, no_complex=True)) +def test_logsumexp(dtype): + a = numpy.arange(10, dtype=dtype)[::2] + dpa = dpnp.arange(10, dtype=dtype)[::2] + + result = dpnp.logsumexp(dpa) + expected = numpy.logaddexp.reduce(a) + assert_allclose(result, expected) + + +@pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True, no_complex=True)) +def test_reduce_hypot(dtype): + a = numpy.arange(10, dtype=dtype)[::2] + dpa = dpnp.arange(10, dtype=dtype)[::2] + + result = dpnp.reduce_hypot(dpa) + expected = numpy.hypot.reduce(a) + assert_allclose(result, expected) @pytest.mark.parametrize( diff --git a/tests/test_sycl_queue.py b/tests/test_sycl_queue.py index 855ae2651dc2..fb31bd59ebfe 100644 --- a/tests/test_sycl_queue.py +++ b/tests/test_sycl_queue.py @@ -470,6 +470,38 @@ def test_rsqrt(device): assert_sycl_queue_equal(result_queue, expected_queue) +@pytest.mark.parametrize( + "device", + valid_devices, + ids=[device.filter_string for device in valid_devices], +) +def test_logsumexp(device): + x = dpnp.arange(10, device=device) + result = dpnp.logsumexp(x) + expected = numpy.logaddexp.reduce(x.asnumpy()) + assert_dtype_allclose(result, expected) + + expected_queue = x.get_array().sycl_queue + result_queue = result.get_array().sycl_queue + assert_sycl_queue_equal(result_queue, expected_queue) + + +@pytest.mark.parametrize( + "device", + valid_devices, + ids=[device.filter_string for device in valid_devices], +) +def test_reduce_hypot(device): + x = dpnp.arange(10, device=device) + result = dpnp.reduce_hypot(x) + expected = numpy.hypot.reduce(x.asnumpy()) + assert_dtype_allclose(result, expected) + + expected_queue = x.get_array().sycl_queue + result_queue = result.get_array().sycl_queue + assert_sycl_queue_equal(result_queue, expected_queue) + + @pytest.mark.parametrize( "func,data1,data2", [ diff --git a/tests/test_usm_type.py b/tests/test_usm_type.py index 9ceb04c59a09..3b5bbbbe6964 100644 --- a/tests/test_usm_type.py +++ b/tests/test_usm_type.py @@ -394,8 +394,7 @@ def test_meshgrid(usm_type_x, usm_type_y): pytest.param("log10", [1.0, 2.0, 4.0, 7.0]), pytest.param("log1p", [1.0e-10, 1.0, 2.0, 4.0, 7.0]), pytest.param("log2", [1.0, 2.0, 4.0, 7.0]), - pytest.param("nanprod", [1.0, 2.0, dp.nan]), - pytest.param("nanvar", [1.0, 2.0, 4.0, dp.nan]), + pytest.param("logsumexp", [1.0, 2.0, 4.0, 7.0]), pytest.param("max", [1.0, 2.0, 4.0, 7.0]), pytest.param("mean", [1.0, 2.0, 4.0, 7.0]), pytest.param("min", [1.0, 2.0, 4.0, 7.0]), @@ -403,6 +402,8 @@ def test_meshgrid(usm_type_x, usm_type_y): pytest.param("nanargmin", [1.0, 2.0, 4.0, dp.nan]), pytest.param("nanmax", [1.0, 2.0, 4.0, dp.nan]), pytest.param("nanmin", [1.0, 2.0, 4.0, dp.nan]), + pytest.param("nanprod", [1.0, 2.0, dp.nan]), + pytest.param("nanvar", [1.0, 2.0, 4.0, dp.nan]), pytest.param("negative", [1.0, 0.0, -1.0]), pytest.param("positive", [1.0, 0.0, -1.0]), pytest.param("prod", [1.0, 2.0]), @@ -411,6 +412,7 @@ def test_meshgrid(usm_type_x, usm_type_y): pytest.param( "real", [complex(1.0, 2.0), complex(3.0, 4.0), complex(5.0, 6.0)] ), + pytest.param("reduce_hypot", [1.0, 2.0, 4.0, 7.0]), pytest.param("rsqrt", [1, 8, 27]), pytest.param("sign", [-5.0, 0.0, 4.5]), pytest.param("signbit", [-5.0, 0.0, 4.5]), From 5b25163eea9568ab1fe7b66ef6c30dce3c697179 Mon Sep 17 00:00:00 2001 From: Anton <100830759+antonwolfy@users.noreply.github.com> Date: Tue, 9 Jan 2024 18:06:11 +0100 Subject: [PATCH 38/38] Resolve compilation warnings (#1651) * Resolved compilation warnings * Explicit instantiation of dpnp_max_c and dpnp_min_c --- .../kernels/dpnp_krnl_arraycreation.cpp | 14 +- dpnp/backend/kernels/dpnp_krnl_indexing.cpp | 98 ++++++----- dpnp/backend/kernels/dpnp_krnl_linalg.cpp | 82 +++++---- dpnp/backend/kernels/dpnp_krnl_sorting.cpp | 6 +- dpnp/backend/kernels/dpnp_krnl_statistics.cpp | 158 +++++++++++++----- 5 files changed, 224 insertions(+), 134 deletions(-) diff --git a/dpnp/backend/kernels/dpnp_krnl_arraycreation.cpp b/dpnp/backend/kernels/dpnp_krnl_arraycreation.cpp index b1af79e019d0..90c3d9a1ee53 100644 --- a/dpnp/backend/kernels/dpnp_krnl_arraycreation.cpp +++ b/dpnp/backend/kernels/dpnp_krnl_arraycreation.cpp @@ -1,5 +1,5 @@ //***************************************************************************** -// Copyright (c) 2016-2023, Intel Corporation +// Copyright (c) 2016-2024, Intel Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without @@ -855,11 +855,12 @@ DPCTLSyclEventRef dpnp_tril_c(DPCTLSyclQueueRef q_ref, _DataType *array_m = input1_ptr.get_ptr(); _DataType *result = result_ptr.get_ptr(); + int *ids = new int[res_ndim]; + if (ndim == 1) { for (size_t i = 0; i < res_size; ++i) { size_t n = res_size; size_t val = i; - int ids[res_ndim]; for (size_t j = 0; j < res_ndim; ++j) { n /= res_shape[j]; size_t p = val / n; @@ -886,7 +887,6 @@ DPCTLSyclEventRef dpnp_tril_c(DPCTLSyclQueueRef q_ref, for (size_t i = 0; i < res_size; ++i) { size_t n = res_size; size_t val = i; - int ids[res_ndim]; for (size_t j = 0; j < res_ndim; ++j) { n /= res_shape[j]; size_t p = val / n; @@ -909,6 +909,8 @@ DPCTLSyclEventRef dpnp_tril_c(DPCTLSyclQueueRef q_ref, } } } + + delete[] ids; return DPCTLEvent_Copy(event_ref); } @@ -989,11 +991,12 @@ DPCTLSyclEventRef dpnp_triu_c(DPCTLSyclQueueRef q_ref, _DataType *array_m = input1_ptr.get_ptr(); _DataType *result = result_ptr.get_ptr(); + int *ids = new int[res_ndim]; + if (ndim == 1) { for (size_t i = 0; i < res_size; ++i) { size_t n = res_size; size_t val = i; - int ids[res_ndim]; for (size_t j = 0; j < res_ndim; ++j) { n /= res_shape[j]; size_t p = val / n; @@ -1020,7 +1023,6 @@ DPCTLSyclEventRef dpnp_triu_c(DPCTLSyclQueueRef q_ref, for (size_t i = 0; i < res_size; ++i) { size_t n = res_size; size_t val = i; - int ids[res_ndim]; for (size_t j = 0; j < res_ndim; ++j) { n /= res_shape[j]; size_t p = val / n; @@ -1043,6 +1045,8 @@ DPCTLSyclEventRef dpnp_triu_c(DPCTLSyclQueueRef q_ref, } } } + + delete[] ids; return DPCTLEvent_Copy(event_ref); } diff --git a/dpnp/backend/kernels/dpnp_krnl_indexing.cpp b/dpnp/backend/kernels/dpnp_krnl_indexing.cpp index e9addf36b707..7dc35fb5a803 100644 --- a/dpnp/backend/kernels/dpnp_krnl_indexing.cpp +++ b/dpnp/backend/kernels/dpnp_krnl_indexing.cpp @@ -1,5 +1,5 @@ //***************************************************************************** -// Copyright (c) 2016-2023, Intel Corporation +// Copyright (c) 2016-2024, Intel Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without @@ -233,18 +233,14 @@ DPCTLSyclEventRef dpnp_diagonal_c(DPCTLSyclQueueRef q_ref, continue; } else { - size_t ind_input_size = ind_list.size() + 2; - size_t ind_input_[ind_input_size]; - ind_input_[0] = i; - ind_input_[1] = i + offset; - size_t ind_output_size = ind_list.size() + 1; - size_t ind_output_[ind_output_size]; - for (size_t k = 0; k < ind_list.size(); k++) { - ind_input_[k + 2] = ind_list.at(k); - ind_output_[k] = ind_list.at(k); - } - ind_output_[ind_list.size()] = i; + std::vector ind_input_{i, i + offset}; + ind_input_.insert(ind_input_.end(), ind_list.begin(), + ind_list.end()); + + std::vector ind_output_ = ind_list; + ind_output_.push_back(i); + const size_t ind_output_size = ind_output_.size(); size_t ind_output = 0; size_t n = 1; for (size_t k = 0; k < ind_output_size; k++) { @@ -253,6 +249,7 @@ DPCTLSyclEventRef dpnp_diagonal_c(DPCTLSyclQueueRef q_ref, n *= res_shape[ind]; } + const size_t ind_input_size = ind_input_.size(); size_t ind_input = 0; size_t m = 1; for (size_t k = 0; k < ind_input_size; k++) { @@ -423,11 +420,13 @@ DPCTLSyclEventRef dpnp_nonzero_c(DPCTLSyclQueueRef q_ref, long *result = result_ptr.get_ptr(); size_t idx = 0; + size_t *ids = new size_t[ndim]; + for (size_t i = 0; i < input1_size; ++i) { if (arr[i] != 0) { - size_t ids[ndim]; size_t ind1 = input1_size; size_t ind2 = i; + for (size_t k = 0; k < ndim; ++k) { ind1 = ind1 / shape[k]; ids[k] = ind2 / ind1; @@ -438,6 +437,7 @@ DPCTLSyclEventRef dpnp_nonzero_c(DPCTLSyclQueueRef q_ref, idx += 1; } } + delete[] ids; return event_ref; } @@ -621,8 +621,6 @@ DPCTLSyclEventRef DPCTLSyclEventRef event_ref = nullptr; sycl::queue q = *(reinterpret_cast(q_ref)); - size_t res_ndim = ndim - 1; - size_t res_shape[res_ndim]; const size_t size_arr = std::accumulate(shape, shape + ndim, 1, std::multiplies()); @@ -635,14 +633,14 @@ DPCTLSyclEventRef _DataType *values = input2_ptr.get_ptr(); _DataType *arr = result_ptr.get_ptr(); - if (axis != res_ndim) { - int ind = 0; + if (axis != (ndim - 1)) { + std::vector res_shape; for (size_t i = 0; i < ndim; i++) { if (axis != i) { - res_shape[ind] = shape[i]; - ind++; + res_shape.push_back(shape[i]); } } + size_t res_ndim = res_shape.size(); size_t prod = 1; for (size_t i = 0; i < res_ndim; ++i) { @@ -651,12 +649,13 @@ DPCTLSyclEventRef } } - size_t ind_array[prod]; - bool bool_ind_array[prod]; + size_t *ind_array = new size_t[prod]; + bool *bool_ind_array = new bool[prod]; for (size_t i = 0; i < prod; ++i) { bool_ind_array[i] = true; } - size_t arr_shape_offsets[ndim]; + + size_t *arr_shape_offsets = new size_t[ndim]; size_t acc = 1; for (size_t i = ndim - 1; i > 0; --i) { arr_shape_offsets[i] = acc; @@ -664,7 +663,7 @@ DPCTLSyclEventRef } arr_shape_offsets[0] = acc; - size_t output_shape_offsets[res_ndim]; + size_t *output_shape_offsets = new size_t[res_ndim]; acc = 1; if (res_ndim > 0) { for (size_t i = res_ndim - 1; i > 0; --i) { @@ -680,31 +679,31 @@ DPCTLSyclEventRef } // init result array + size_t *xyz = new size_t[res_ndim]; for (size_t result_idx = 0; result_idx < size_result; ++result_idx) { - size_t xyz[res_ndim]; size_t remainder = result_idx; for (size_t i = 0; i < res_ndim; ++i) { xyz[i] = remainder / output_shape_offsets[i]; remainder = remainder - xyz[i] * output_shape_offsets[i]; } - size_t source_axis[ndim]; - size_t result_axis_idx = 0; - for (size_t idx = 0; idx < ndim; ++idx) { - bool found = false; - if (axis == idx) { - found = true; - } - if (found) { - source_axis[idx] = 0; - } - else { - source_axis[idx] = xyz[result_axis_idx]; - result_axis_idx++; - } - } + // FIXME: computed and unused. Commented out per compiler warning + // size_t source_axis[ndim]; + // size_t result_axis_idx = 0; + // for (size_t idx = 0; idx < ndim; ++idx) { + // bool found = false; + // if (axis == idx) { + // found = true; + // } + // if (found) { + // source_axis[idx] = 0; + // } + // else { + // source_axis[idx] = xyz[result_axis_idx]; + // result_axis_idx++; + // } + // } - // FIXME: computed, but unused. Commented out per compiler warning // size_t source_idx = 0; // for (size_t i = 0; i < static_cast(ndim); ++i) // { @@ -714,7 +713,6 @@ DPCTLSyclEventRef for (size_t source_idx = 0; source_idx < size_arr; ++source_idx) { // reconstruct x,y,z from linear source_idx - size_t xyz[ndim]; size_t remainder = source_idx; for (size_t i = 0; i < ndim; ++i) { xyz[i] = remainder / arr_shape_offsets[i]; @@ -722,17 +720,11 @@ DPCTLSyclEventRef } // extract result axis - size_t result_axis[res_ndim]; - size_t result_idx = 0; + std::vector result_axis; for (size_t idx = 0; idx < ndim; ++idx) { // try to find current idx in axis array - bool found = false; - if (axis == idx) { - found = true; - } - if (!found) { - result_axis[result_idx] = xyz[idx]; - result_idx++; + if (axis != idx) { + result_axis.push_back(xyz[idx]); } } @@ -756,6 +748,12 @@ DPCTLSyclEventRef arr[source_idx] = values[source_idx % values_size]; } } + + delete[] ind_array; + delete[] bool_ind_array; + delete[] arr_shape_offsets; + delete[] output_shape_offsets; + delete[] xyz; } else { for (size_t i = 0; i < size_arr; ++i) { diff --git a/dpnp/backend/kernels/dpnp_krnl_linalg.cpp b/dpnp/backend/kernels/dpnp_krnl_linalg.cpp index 5bf54c2b84d3..5e78a7cda176 100644 --- a/dpnp/backend/kernels/dpnp_krnl_linalg.cpp +++ b/dpnp/backend/kernels/dpnp_krnl_linalg.cpp @@ -1,5 +1,5 @@ //***************************************************************************** -// Copyright (c) 2016-2023, Intel Corporation +// Copyright (c) 2016-2024, Intel Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without @@ -172,50 +172,52 @@ DPCTLSyclEventRef dpnp_det_c(DPCTLSyclQueueRef q_ref, _DataType *array_1 = input1_ptr.get_ptr(); _DataType *result = result_ptr.get_ptr(); + _DataType *matrix = new _DataType[n * n]; + _DataType *elems = new _DataType[n * n]; + for (size_t i = 0; i < size_out; i++) { - _DataType matrix[n][n]; if (size_out > 1) { - _DataType elems[n * n]; for (size_t j = i * n * n; j < (i + 1) * n * n; j++) { elems[j - i * n * n] = array_1[j]; } for (size_t j = 0; j < n; j++) { for (size_t k = 0; k < n; k++) { - matrix[j][k] = elems[j * n + k]; + matrix[j * n + k] = elems[j * n + k]; } } } else { for (size_t j = 0; j < n; j++) { for (size_t k = 0; k < n; k++) { - matrix[j][k] = array_1[j * n + k]; + matrix[j * n + k] = array_1[j * n + k]; } } } _DataType det_val = 1; for (size_t l = 0; l < n; l++) { - if (matrix[l][l] == 0) { + if (matrix[l * n + l] == 0) { for (size_t j = l; j < n; j++) { - if (matrix[j][l] != 0) { + if (matrix[j * n + l] != 0) { for (size_t k = l; k < n; k++) { - _DataType c = matrix[l][k]; - matrix[l][k] = -1 * matrix[j][k]; - matrix[j][k] = c; + _DataType c = matrix[l * n + k]; + matrix[l * n + k] = -1 * matrix[j * n + k]; + matrix[j * n + k] = c; } break; } - if (j == n - 1 and matrix[j][l] == 0) { + if (j == n - 1 and matrix[j * n + l] == 0) { det_val = 0; } } } if (det_val != 0) { for (size_t j = l + 1; j < n; j++) { - _DataType quotient = -(matrix[j][l] / matrix[l][l]); + _DataType quotient = + -(matrix[j * n + l] / matrix[l * n + l]); for (size_t k = l + 1; k < n; k++) { - matrix[j][k] += quotient * matrix[l][k]; + matrix[j * n + k] += quotient * matrix[l * n + k]; } } } @@ -223,13 +225,15 @@ DPCTLSyclEventRef dpnp_det_c(DPCTLSyclQueueRef q_ref, if (det_val != 0) { for (size_t l = 0; l < n; l++) { - det_val *= matrix[l][l]; + det_val *= matrix[l * n + l]; } } result[i] = det_val; } + delete[] elems; + delete[] matrix; return event_ref; } @@ -291,50 +295,50 @@ DPCTLSyclEventRef dpnp_inv_c(DPCTLSyclQueueRef q_ref, size_t n = shape[0]; - _ResultType a_arr[n][n]; - _ResultType e_arr[n][n]; + _ResultType *a_arr = new _ResultType[n * n]; + _ResultType *e_arr = new _ResultType[n * n]; for (size_t i = 0; i < n; ++i) { for (size_t j = 0; j < n; ++j) { - a_arr[i][j] = array_1[i * n + j]; + a_arr[i * n + j] = array_1[i * n + j]; if (i == j) { - e_arr[i][j] = 1; + e_arr[i * n + j] = 1; } else { - e_arr[i][j] = 0; + e_arr[i * n + j] = 0; } } } for (size_t k = 0; k < n; ++k) { - if (a_arr[k][k] == 0) { + if (a_arr[k * n + k] == 0) { for (size_t i = k; i < n; ++i) { - if (a_arr[i][k] != 0) { + if (a_arr[i * n + k] != 0) { for (size_t j = 0; j < n; ++j) { - float c = a_arr[k][j]; - a_arr[k][j] = a_arr[i][j]; - a_arr[i][j] = c; - float c_e = e_arr[k][j]; - e_arr[k][j] = e_arr[i][j]; - e_arr[i][j] = c_e; + float c = a_arr[k * n + j]; + a_arr[k * n + j] = a_arr[i * n + j]; + a_arr[i * n + j] = c; + float c_e = e_arr[k * n + j]; + e_arr[k * n + j] = e_arr[i * n + j]; + e_arr[i * n + j] = c_e; } break; } } } - float temp = a_arr[k][k]; + float temp = a_arr[k * n + k]; for (size_t j = 0; j < n; ++j) { - a_arr[k][j] = a_arr[k][j] / temp; - e_arr[k][j] = e_arr[k][j] / temp; + a_arr[k * n + j] = a_arr[k * n + j] / temp; + e_arr[k * n + j] = e_arr[k * n + j] / temp; } for (size_t i = k + 1; i < n; ++i) { - temp = a_arr[i][k]; + temp = a_arr[i * n + k]; for (size_t j = 0; j < n; j++) { - a_arr[i][j] = a_arr[i][j] - a_arr[k][j] * temp; - e_arr[i][j] = e_arr[i][j] - e_arr[k][j] * temp; + a_arr[i * n + j] = a_arr[i * n + j] - a_arr[k * n + j] * temp; + e_arr[i * n + j] = e_arr[i * n + j] - e_arr[k * n + j] * temp; } } } @@ -344,20 +348,24 @@ DPCTLSyclEventRef dpnp_inv_c(DPCTLSyclQueueRef q_ref, for (size_t i = 0; i < ind_k; ++i) { size_t ind_i = ind_k - 1 - i; - float temp = a_arr[ind_i][ind_k]; + float temp = a_arr[ind_i * n + ind_k]; for (size_t j = 0; j < n; ++j) { - a_arr[ind_i][j] = a_arr[ind_i][j] - a_arr[ind_k][j] * temp; - e_arr[ind_i][j] = e_arr[ind_i][j] - e_arr[ind_k][j] * temp; + a_arr[ind_i * n + j] = + a_arr[ind_i * n + j] - a_arr[ind_k * n + j] * temp; + e_arr[ind_i * n + j] = + e_arr[ind_i * n + j] - e_arr[ind_k * n + j] * temp; } } } for (size_t i = 0; i < n; ++i) { for (size_t j = 0; j < n; ++j) { - result[i * n + j] = e_arr[i][j]; + result[i * n + j] = e_arr[i * n + j]; } } + delete[] a_arr; + delete[] e_arr; return event_ref; } diff --git a/dpnp/backend/kernels/dpnp_krnl_sorting.cpp b/dpnp/backend/kernels/dpnp_krnl_sorting.cpp index 8a5a8ea69aaf..ac4992466e2f 100644 --- a/dpnp/backend/kernels/dpnp_krnl_sorting.cpp +++ b/dpnp/backend/kernels/dpnp_krnl_sorting.cpp @@ -1,5 +1,5 @@ //***************************************************************************** -// Copyright (c) 2016-2023, Intel Corporation +// Copyright (c) 2016-2024, Intel Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without @@ -192,11 +192,12 @@ DPCTLSyclEventRef dpnp_partition_c(DPCTLSyclQueueRef q_ref, auto arr_to_result_event = q.memcpy(result, arr, size * sizeof(_DataType)); arr_to_result_event.wait(); + _DataType *matrix = new _DataType[shape_[ndim - 1]]; + for (size_t i = 0; i < size_; ++i) { size_t ind_begin = i * shape_[ndim - 1]; size_t ind_end = (i + 1) * shape_[ndim - 1] - 1; - _DataType matrix[shape_[ndim - 1]]; for (size_t j = ind_begin; j < ind_end + 1; ++j) { size_t ind = j - ind_begin; matrix[ind] = arr2[j]; @@ -242,6 +243,7 @@ DPCTLSyclEventRef dpnp_partition_c(DPCTLSyclQueueRef q_ref, event.wait(); + delete[] matrix; sycl::free(shape, q); return event_ref; diff --git a/dpnp/backend/kernels/dpnp_krnl_statistics.cpp b/dpnp/backend/kernels/dpnp_krnl_statistics.cpp index 8f685c97cb38..97df3c2d7f11 100644 --- a/dpnp/backend/kernels/dpnp_krnl_statistics.cpp +++ b/dpnp/backend/kernels/dpnp_krnl_statistics.cpp @@ -358,9 +358,7 @@ DPCTLSyclEventRef dpnp_max_c(DPCTLSyclQueueRef q_ref, } } else { - size_t res_ndim = ndim - naxis; - size_t res_shape[res_ndim]; - int ind = 0; + std::vector res_shape; for (size_t i = 0; i < ndim; ++i) { bool found = false; for (size_t j = 0; j < naxis; ++j) { @@ -370,28 +368,24 @@ DPCTLSyclEventRef dpnp_max_c(DPCTLSyclQueueRef q_ref, } } if (!found) { - res_shape[ind] = shape[i]; - ind++; + res_shape.push_back(shape[i]); } } + const size_t res_ndim = res_shape.size(); - size_t input_shape_offsets[ndim]; size_t acc = 1; - for (size_t i = ndim - 1; i > 0; --i) { - input_shape_offsets[i] = acc; + std::vector input_shape_offsets{acc}; + for (size_t i = ndim - 2; i > 0; --i) { acc *= shape[i]; + input_shape_offsets.insert(input_shape_offsets.begin(), acc); } - input_shape_offsets[0] = acc; - size_t output_shape_offsets[res_ndim]; acc = 1; - if (res_ndim > 0) { - for (size_t i = res_ndim - 1; i > 0; --i) { - output_shape_offsets[i] = acc; - acc *= res_shape[i]; - } + std::vector output_shape_offsets{acc}; + for (size_t i = res_ndim - 2; i > 0; --i) { + acc *= res_shape[i]; + output_shape_offsets.insert(output_shape_offsets.begin(), acc); } - output_shape_offsets[0] = acc; size_t size_result = 1; for (size_t i = 0; i < res_ndim; ++i) { @@ -399,15 +393,16 @@ DPCTLSyclEventRef dpnp_max_c(DPCTLSyclQueueRef q_ref, } // init result array + size_t *xyz = new size_t[res_ndim]; + size_t *source_axis = new size_t[ndim]; + size_t *result_axis = new size_t[res_ndim]; for (size_t result_idx = 0; result_idx < size_result; ++result_idx) { - size_t xyz[res_ndim]; size_t remainder = result_idx; for (size_t i = 0; i < res_ndim; ++i) { xyz[i] = remainder / output_shape_offsets[i]; remainder = remainder - xyz[i] * output_shape_offsets[i]; } - size_t source_axis[ndim]; size_t result_axis_idx = 0; for (size_t idx = 0; idx < ndim; ++idx) { bool found = false; @@ -436,7 +431,6 @@ DPCTLSyclEventRef dpnp_max_c(DPCTLSyclQueueRef q_ref, for (size_t source_idx = 0; source_idx < size_input; ++source_idx) { // reconstruct x,y,z from linear source_idx - size_t xyz[ndim]; size_t remainder = source_idx; for (size_t i = 0; i < ndim; ++i) { xyz[i] = remainder / input_shape_offsets[i]; @@ -444,7 +438,6 @@ DPCTLSyclEventRef dpnp_max_c(DPCTLSyclQueueRef q_ref, } // extract result axis - size_t result_axis[res_ndim]; size_t result_idx = 0; for (size_t idx = 0; idx < ndim; ++idx) { // try to find current idx in axis array @@ -471,11 +464,57 @@ DPCTLSyclEventRef dpnp_max_c(DPCTLSyclQueueRef q_ref, result[result_offset] = array_1[source_idx]; } } + + delete[] xyz; + delete[] source_axis; + delete[] result_axis; } return event_ref; } +// Explicit instantiation of the function, since dpnp_max_c() is used by +// other template functions, but implicit instantiation is not applied anymore. +template DPCTLSyclEventRef dpnp_max_c(DPCTLSyclQueueRef q_ref, + void *, + void *, + const size_t, + const shape_elem_type *, + size_t, + const shape_elem_type *, + size_t, + const DPCTLEventVectorRef); + +template DPCTLSyclEventRef dpnp_max_c(DPCTLSyclQueueRef q_ref, + void *, + void *, + const size_t, + const shape_elem_type *, + size_t, + const shape_elem_type *, + size_t, + const DPCTLEventVectorRef); + +template DPCTLSyclEventRef dpnp_max_c(DPCTLSyclQueueRef q_ref, + void *, + void *, + const size_t, + const shape_elem_type *, + size_t, + const shape_elem_type *, + size_t, + const DPCTLEventVectorRef); + +template DPCTLSyclEventRef dpnp_max_c(DPCTLSyclQueueRef q_ref, + void *, + void *, + const size_t, + const shape_elem_type *, + size_t, + const shape_elem_type *, + size_t, + const DPCTLEventVectorRef); + template void dpnp_max_c(void *array1_in, void *result1, @@ -730,9 +769,7 @@ DPCTLSyclEventRef dpnp_min_c(DPCTLSyclQueueRef q_ref, } } else { - size_t res_ndim = ndim - naxis; - size_t res_shape[res_ndim]; - int ind = 0; + std::vector res_shape; for (size_t i = 0; i < ndim; i++) { bool found = false; for (size_t j = 0; j < naxis; j++) { @@ -742,28 +779,24 @@ DPCTLSyclEventRef dpnp_min_c(DPCTLSyclQueueRef q_ref, } } if (!found) { - res_shape[ind] = shape[i]; - ind++; + res_shape.push_back(shape[i]); } } + const size_t res_ndim = res_shape.size(); - size_t input_shape_offsets[ndim]; size_t acc = 1; - for (size_t i = ndim - 1; i > 0; --i) { - input_shape_offsets[i] = acc; + std::vector input_shape_offsets{acc}; + for (size_t i = ndim - 2; i > 0; --i) { acc *= shape[i]; + input_shape_offsets.insert(input_shape_offsets.begin(), acc); } - input_shape_offsets[0] = acc; - size_t output_shape_offsets[res_ndim]; acc = 1; - if (res_ndim > 0) { - for (size_t i = res_ndim - 1; i > 0; --i) { - output_shape_offsets[i] = acc; - acc *= res_shape[i]; - } + std::vector output_shape_offsets{acc}; + for (size_t i = res_ndim - 2; i > 0; --i) { + acc *= res_shape[i]; + output_shape_offsets.insert(output_shape_offsets.begin(), acc); } - output_shape_offsets[0] = acc; size_t size_result = 1; for (size_t i = 0; i < res_ndim; ++i) { @@ -771,15 +804,16 @@ DPCTLSyclEventRef dpnp_min_c(DPCTLSyclQueueRef q_ref, } // init result array + size_t *xyz = new size_t[res_ndim]; + size_t *source_axis = new size_t[ndim]; + size_t *result_axis = new size_t[res_ndim]; for (size_t result_idx = 0; result_idx < size_result; ++result_idx) { - size_t xyz[res_ndim]; size_t remainder = result_idx; for (size_t i = 0; i < res_ndim; ++i) { xyz[i] = remainder / output_shape_offsets[i]; remainder = remainder - xyz[i] * output_shape_offsets[i]; } - size_t source_axis[ndim]; size_t result_axis_idx = 0; for (size_t idx = 0; idx < ndim; ++idx) { bool found = false; @@ -808,7 +842,6 @@ DPCTLSyclEventRef dpnp_min_c(DPCTLSyclQueueRef q_ref, for (size_t source_idx = 0; source_idx < size_input; ++source_idx) { // reconstruct x,y,z from linear source_idx - size_t xyz[ndim]; size_t remainder = source_idx; for (size_t i = 0; i < ndim; ++i) { xyz[i] = remainder / input_shape_offsets[i]; @@ -816,7 +849,6 @@ DPCTLSyclEventRef dpnp_min_c(DPCTLSyclQueueRef q_ref, } // extract result axis - size_t result_axis[res_ndim]; size_t result_idx = 0; for (size_t idx = 0; idx < ndim; ++idx) { // try to find current idx in axis array @@ -843,11 +875,57 @@ DPCTLSyclEventRef dpnp_min_c(DPCTLSyclQueueRef q_ref, result[result_offset] = array_1[source_idx]; } } + + delete[] xyz; + delete[] source_axis; + delete[] result_axis; } return event_ref; } +// Explicit instantiation of the function, since dpnp_min_c() is used by +// other template functions, but implicit instantiation is not applied anymore. +template DPCTLSyclEventRef dpnp_min_c(DPCTLSyclQueueRef q_ref, + void *, + void *, + const size_t, + const shape_elem_type *, + size_t, + const shape_elem_type *, + size_t, + const DPCTLEventVectorRef); + +template DPCTLSyclEventRef dpnp_min_c(DPCTLSyclQueueRef q_ref, + void *, + void *, + const size_t, + const shape_elem_type *, + size_t, + const shape_elem_type *, + size_t, + const DPCTLEventVectorRef); + +template DPCTLSyclEventRef dpnp_min_c(DPCTLSyclQueueRef q_ref, + void *, + void *, + const size_t, + const shape_elem_type *, + size_t, + const shape_elem_type *, + size_t, + const DPCTLEventVectorRef); + +template DPCTLSyclEventRef dpnp_min_c(DPCTLSyclQueueRef q_ref, + void *, + void *, + const size_t, + const shape_elem_type *, + size_t, + const shape_elem_type *, + size_t, + const DPCTLEventVectorRef); + template void dpnp_min_c(void *array1_in, void *result1,