From d59f6c0edc73169853f1a2407e11f21e7f42b19a Mon Sep 17 00:00:00 2001 From: Nikita Grigorian Date: Tue, 4 Jul 2023 02:40:58 -0700 Subject: [PATCH 1/2] Implements log2 and log10 --- dpctl/tensor/__init__.py | 4 + dpctl/tensor/_elementwise_funcs.py | 42 +++- .../kernels/elementwise_functions/log10.hpp | 224 ++++++++++++++++++ .../kernels/elementwise_functions/log2.hpp | 220 +++++++++++++++++ .../source/elementwise_functions.cpp | 110 ++++++++- dpctl/tests/elementwise/test_log10.py | 133 +++++++++++ dpctl/tests/elementwise/test_log2.py | 129 ++++++++++ 7 files changed, 856 insertions(+), 6 deletions(-) create mode 100644 dpctl/tensor/libtensor/include/kernels/elementwise_functions/log10.hpp create mode 100644 dpctl/tensor/libtensor/include/kernels/elementwise_functions/log2.hpp create mode 100644 dpctl/tests/elementwise/test_log10.py create mode 100644 dpctl/tests/elementwise/test_log2.py diff --git a/dpctl/tensor/__init__.py b/dpctl/tensor/__init__.py index ec488cb3d4..253187722f 100644 --- a/dpctl/tensor/__init__.py +++ b/dpctl/tensor/__init__.py @@ -111,6 +111,8 @@ less_equal, log, log1p, + log2, + log10, logical_and, logical_not, logical_or, @@ -220,6 +222,8 @@ "logical_or", "logical_xor", "log1p", + "log2", + "log10", "proj", "real", "sin", diff --git a/dpctl/tensor/_elementwise_funcs.py b/dpctl/tensor/_elementwise_funcs.py index 2c07ab8e6a..bdd34b58f5 100644 --- a/dpctl/tensor/_elementwise_funcs.py +++ b/dpctl/tensor/_elementwise_funcs.py @@ -563,10 +563,48 @@ ) # U22: ==== LOG2 (x) -# FIXME: implement U22 +_log2_docstring_ = """ +log2(x, out=None, order='K') +Computes the base 2 logarithm element-wise. +Args: + x (usm_ndarray): + Input array, expected to have numeric data type. + out (usm_ndarray): + Output array to populate. Array must have the correct + shape and the expected data type. + order ("C","F","A","K", optional): memory layout of the new + output array, if parameter `out` is `None`. + Default: "K". +Return: + usm_ndarray: + An array containing the element-wise base 2 logarithm values. +""" + +log2 = UnaryElementwiseFunc( + "log2", ti._log2_result_type, ti._log2, _log2_docstring_ +) # U23: ==== LOG10 (x) -# FIXME: implement U23 +_log10_docstring_ = """ +log10(x, out=None, order='K') +Computes the base 10 logarithm element-wise. +Args: + x (usm_ndarray): + Input array, expected to have numeric data type. + out (usm_ndarray): + Output array to populate. Array must have the correct + shape and the expected data type. + order ("C","F","A","K", optional): memory layout of the new + output array, if parameter `out` is `None`. + Default: "K". +Return: + usm_ndarray: + An array containing the element-wise base 10 logarithm values. +""" + +log10 = UnaryElementwiseFunc( + "log10", ti._log10_result_type, ti._log10, _log10_docstring_ +) # B15: ==== LOGADDEXP (x1, x2) # FIXME: implement B15 diff --git a/dpctl/tensor/libtensor/include/kernels/elementwise_functions/log10.hpp b/dpctl/tensor/libtensor/include/kernels/elementwise_functions/log10.hpp new file mode 100644 index 0000000000..6b1185416f --- /dev/null +++ b/dpctl/tensor/libtensor/include/kernels/elementwise_functions/log10.hpp @@ -0,0 +1,224 @@ +//=== log10.hpp - Unary function LOG10 ------ +//*-C++-*--/===// +// +// Data Parallel Control (dpctl) +// +// Copyright 2020-2023 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines kernels for elementwise evaluation of LOG10(x) function. +//===---------------------------------------------------------------------===// + +#pragma once +#include +#include +#include +#include +#include + +#include "kernels/elementwise_functions/common.hpp" + +#include "utils/offset_utils.hpp" +#include "utils/type_dispatch.hpp" +#include "utils/type_utils.hpp" +#include + +namespace dpctl +{ +namespace tensor +{ +namespace kernels +{ +namespace log10 +{ + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +using dpctl::tensor::type_utils::is_complex; +using dpctl::tensor::type_utils::vec_cast; + +template struct Log10Functor +{ + + // is function constant for given argT + using is_constant = typename std::false_type; + // constant value, if constant + // constexpr resT constant_value = resT{}; + // is function defined for sycl::vec + using supports_vec = typename std::negation< + std::disjunction, is_complex>>; + // do both argTy and resTy support sugroup store/load operation + using supports_sg_loadstore = typename std::negation< + std::disjunction, is_complex>>; + + resT operator()(const argT &in) + { + if constexpr (is_complex::value) { + using realT = typename argT::value_type; + return (std::log(in) / std::log(realT{10})); + } + else { + return std::log10(in); + } + } + + template + sycl::vec operator()(const sycl::vec &in) + { + auto const &res_vec = sycl::log10(in); + using deducedT = typename std::remove_cv_t< + std::remove_reference_t>::element_type; + if constexpr (std::is_same_v) { + return res_vec; + } + else { + return vec_cast(res_vec); + } + } +}; + +template +using Log10ContigFunctor = + elementwise_common::UnaryContigFunctor, + vec_sz, + n_vecs>; + +template +using Log10StridedFunctor = elementwise_common:: + UnaryStridedFunctor>; + +template struct Log10OutputType +{ + using value_type = typename std::disjunction< // disjunction is C++17 + // feature, supported by DPC++ + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, std::complex>, + td_ns:: + TypeMapResultEntry, std::complex>, + td_ns::DefaultResultEntry>::result_type; +}; + +typedef sycl::event (*log10_contig_impl_fn_ptr_t)( + sycl::queue, + size_t, + const char *, + char *, + const std::vector &); + +template +class log10_contig_kernel; + +template +sycl::event log10_contig_impl(sycl::queue exec_q, + size_t nelems, + const char *arg_p, + char *res_p, + const std::vector &depends = {}) +{ + return elementwise_common::unary_contig_impl< + argTy, Log10OutputType, Log10ContigFunctor, log10_contig_kernel>( + exec_q, nelems, arg_p, res_p, depends); +} + +template struct Log10ContigFactory +{ + fnT get() + { + if constexpr (std::is_same_v::value_type, + void>) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = log10_contig_impl; + return fn; + } + } +}; + +template struct Log10TypeMapFactory +{ + /*! @brief get typeid for output type of std::log10(T x) */ + std::enable_if_t::value, int> get() + { + using rT = typename Log10OutputType::value_type; + ; + return td_ns::GetTypeid{}.get(); + } +}; + +template class log10_strided_kernel; + +typedef sycl::event (*log10_strided_impl_fn_ptr_t)( + sycl::queue, + size_t, + int, + const py::ssize_t *, + const char *, + py::ssize_t, + char *, + py::ssize_t, + const std::vector &, + const std::vector &); + +template +sycl::event +log10_strided_impl(sycl::queue exec_q, + size_t nelems, + int nd, + const py::ssize_t *shape_and_strides, + const char *arg_p, + py::ssize_t arg_offset, + char *res_p, + py::ssize_t res_offset, + const std::vector &depends, + const std::vector &additional_depends) +{ + return elementwise_common::unary_strided_impl< + argTy, Log10OutputType, Log10StridedFunctor, log10_strided_kernel>( + exec_q, nelems, nd, shape_and_strides, arg_p, arg_offset, res_p, + res_offset, depends, additional_depends); +} + +template struct Log10StridedFactory +{ + fnT get() + { + if constexpr (std::is_same_v::value_type, + void>) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = log10_strided_impl; + return fn; + } + } +}; + +} // namespace log10 +} // namespace kernels +} // namespace tensor +} // namespace dpctl diff --git a/dpctl/tensor/libtensor/include/kernels/elementwise_functions/log2.hpp b/dpctl/tensor/libtensor/include/kernels/elementwise_functions/log2.hpp new file mode 100644 index 0000000000..91bd063117 --- /dev/null +++ b/dpctl/tensor/libtensor/include/kernels/elementwise_functions/log2.hpp @@ -0,0 +1,220 @@ +//=== log2.hpp - Unary function LOG2 ------ +//*-C++-*--/===// +// +// Data Parallel Control (dpctl) +// +// Copyright 2020-2023 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines kernels for elementwise evaluation of LOG2(x) function. +//===---------------------------------------------------------------------===// + +#pragma once +#include +#include +#include +#include +#include + +#include "kernels/elementwise_functions/common.hpp" + +#include "utils/offset_utils.hpp" +#include "utils/type_dispatch.hpp" +#include "utils/type_utils.hpp" +#include + +namespace dpctl +{ +namespace tensor +{ +namespace kernels +{ +namespace log2 +{ + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +using dpctl::tensor::type_utils::is_complex; +using dpctl::tensor::type_utils::vec_cast; + +template struct Log2Functor +{ + + // is function constant for given argT + using is_constant = typename std::false_type; + // constant value, if constant + // constexpr resT constant_value = resT{}; + // is function defined for sycl::vec + using supports_vec = typename std::negation< + std::disjunction, is_complex>>; + // do both argTy and resTy support sugroup store/load operation + using supports_sg_loadstore = typename std::negation< + std::disjunction, is_complex>>; + + resT operator()(const argT &in) + { + if constexpr (is_complex::value) { + using realT = typename argT::value_type; + return std::log(in) / std::log(realT{2}); + } + else { + return std::log2(in); + } + } + + template + sycl::vec operator()(const sycl::vec &in) + { + auto const &res_vec = sycl::log2(in); + using deducedT = typename std::remove_cv_t< + std::remove_reference_t>::element_type; + if constexpr (std::is_same_v) { + return res_vec; + } + else { + return vec_cast(res_vec); + } + } +}; + +template +using Log2ContigFunctor = elementwise_common:: + UnaryContigFunctor, vec_sz, n_vecs>; + +template +using Log2StridedFunctor = elementwise_common:: + UnaryStridedFunctor>; + +template struct Log2OutputType +{ + using value_type = typename std::disjunction< // disjunction is C++17 + // feature, supported by DPC++ + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, std::complex>, + td_ns:: + TypeMapResultEntry, std::complex>, + td_ns::DefaultResultEntry>::result_type; +}; + +typedef sycl::event (*log2_contig_impl_fn_ptr_t)( + sycl::queue, + size_t, + const char *, + char *, + const std::vector &); + +template +class log2_contig_kernel; + +template +sycl::event log2_contig_impl(sycl::queue exec_q, + size_t nelems, + const char *arg_p, + char *res_p, + const std::vector &depends = {}) +{ + return elementwise_common::unary_contig_impl< + argTy, Log2OutputType, Log2ContigFunctor, log2_contig_kernel>( + exec_q, nelems, arg_p, res_p, depends); +} + +template struct Log2ContigFactory +{ + fnT get() + { + if constexpr (std::is_same_v::value_type, + void>) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = log2_contig_impl; + return fn; + } + } +}; + +template struct Log2TypeMapFactory +{ + /*! @brief get typeid for output type of std::log2(T x) */ + std::enable_if_t::value, int> get() + { + using rT = typename Log2OutputType::value_type; + ; + return td_ns::GetTypeid{}.get(); + } +}; + +template class log2_strided_kernel; + +typedef sycl::event (*log2_strided_impl_fn_ptr_t)( + sycl::queue, + size_t, + int, + const py::ssize_t *, + const char *, + py::ssize_t, + char *, + py::ssize_t, + const std::vector &, + const std::vector &); + +template +sycl::event +log2_strided_impl(sycl::queue exec_q, + size_t nelems, + int nd, + const py::ssize_t *shape_and_strides, + const char *arg_p, + py::ssize_t arg_offset, + char *res_p, + py::ssize_t res_offset, + const std::vector &depends, + const std::vector &additional_depends) +{ + return elementwise_common::unary_strided_impl< + argTy, Log2OutputType, Log2StridedFunctor, log2_strided_kernel>( + exec_q, nelems, nd, shape_and_strides, arg_p, arg_offset, res_p, + res_offset, depends, additional_depends); +} + +template struct Log2StridedFactory +{ + fnT get() + { + if constexpr (std::is_same_v::value_type, + void>) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = log2_strided_impl; + return fn; + } + } +}; + +} // namespace log2 +} // namespace kernels +} // namespace tensor +} // namespace dpctl diff --git a/dpctl/tensor/libtensor/source/elementwise_functions.cpp b/dpctl/tensor/libtensor/source/elementwise_functions.cpp index 5898f0ca7d..d07cdb7962 100644 --- a/dpctl/tensor/libtensor/source/elementwise_functions.cpp +++ b/dpctl/tensor/libtensor/source/elementwise_functions.cpp @@ -49,7 +49,9 @@ #include "kernels/elementwise_functions/less.hpp" #include "kernels/elementwise_functions/less_equal.hpp" #include "kernels/elementwise_functions/log.hpp" +#include "kernels/elementwise_functions/log10.hpp" #include "kernels/elementwise_functions/log1p.hpp" +#include "kernels/elementwise_functions/log2.hpp" #include "kernels/elementwise_functions/logical_and.hpp" #include "kernels/elementwise_functions/logical_not.hpp" #include "kernels/elementwise_functions/logical_or.hpp" @@ -1010,13 +1012,72 @@ void populate_log1p_dispatch_vectors(void) // U22: ==== LOG2 (x) namespace impl { -// FIXME: add code for U22 + +namespace log2_fn_ns = dpctl::tensor::kernels::log2; + +static unary_contig_impl_fn_ptr_t log2_contig_dispatch_vector[td_ns::num_types]; +static int log2_output_typeid_vector[td_ns::num_types]; +static unary_strided_impl_fn_ptr_t + log2_strided_dispatch_vector[td_ns::num_types]; + +void populate_log2_dispatch_vectors(void) +{ + using namespace td_ns; + namespace fn_ns = log2_fn_ns; + + using fn_ns::Log2ContigFactory; + DispatchVectorBuilder + dvb1; + dvb1.populate_dispatch_vector(log2_contig_dispatch_vector); + + using fn_ns::Log2StridedFactory; + DispatchVectorBuilder + dvb2; + dvb2.populate_dispatch_vector(log2_strided_dispatch_vector); + + using fn_ns::Log2TypeMapFactory; + DispatchVectorBuilder dvb3; + dvb3.populate_dispatch_vector(log2_output_typeid_vector); +}; + } // namespace impl // U23: ==== LOG10 (x) namespace impl { -// FIXME: add code for U23 + +namespace log10_fn_ns = dpctl::tensor::kernels::log10; + +static unary_contig_impl_fn_ptr_t + log10_contig_dispatch_vector[td_ns::num_types]; +static int log10_output_typeid_vector[td_ns::num_types]; +static unary_strided_impl_fn_ptr_t + log10_strided_dispatch_vector[td_ns::num_types]; + +void populate_log10_dispatch_vectors(void) +{ + using namespace td_ns; + namespace fn_ns = log10_fn_ns; + + using fn_ns::Log10ContigFactory; + DispatchVectorBuilder + dvb1; + dvb1.populate_dispatch_vector(log10_contig_dispatch_vector); + + using fn_ns::Log10StridedFactory; + DispatchVectorBuilder + dvb2; + dvb2.populate_dispatch_vector(log10_strided_dispatch_vector); + + using fn_ns::Log10TypeMapFactory; + DispatchVectorBuilder dvb3; + dvb3.populate_dispatch_vector(log10_output_typeid_vector); +}; + } // namespace impl // B15: ==== LOGADDEXP (x1, x2) @@ -2271,10 +2332,51 @@ void init_elementwise_functions(py::module_ m) } // U22: ==== LOG2 (x) - // FIXME: + { + impl::populate_log2_dispatch_vectors(); + + using impl::log2_contig_dispatch_vector; + using impl::log2_output_typeid_vector; + using impl::log2_strided_dispatch_vector; + auto log2_pyapi = [&](dpctl::tensor::usm_ndarray src, + dpctl::tensor::usm_ndarray dst, + sycl::queue exec_q, + const std::vector &depends = {}) { + return py_unary_ufunc( + src, dst, exec_q, depends, log2_output_typeid_vector, + log2_contig_dispatch_vector, log2_strided_dispatch_vector); + }; + auto log2_result_type_pyapi = [&](py::dtype dtype) { + return py_unary_ufunc_result_type(dtype, log2_output_typeid_vector); + }; + m.def("_log2", log2_pyapi, "", py::arg("src"), py::arg("dst"), + py::arg("sycl_queue"), py::arg("depends") = py::list()); + m.def("_log2_result_type", log2_result_type_pyapi, ""); + } // U23: ==== LOG10 (x) - // FIXME: + { + impl::populate_log10_dispatch_vectors(); + + using impl::log10_contig_dispatch_vector; + using impl::log10_output_typeid_vector; + using impl::log10_strided_dispatch_vector; + auto log10_pyapi = [&](dpctl::tensor::usm_ndarray src, + dpctl::tensor::usm_ndarray dst, + sycl::queue exec_q, + const std::vector &depends = {}) { + return py_unary_ufunc( + src, dst, exec_q, depends, log10_output_typeid_vector, + log10_contig_dispatch_vector, log10_strided_dispatch_vector); + }; + auto log10_result_type_pyapi = [&](py::dtype dtype) { + return py_unary_ufunc_result_type(dtype, + log10_output_typeid_vector); + }; + m.def("_log10", log10_pyapi, "", py::arg("src"), py::arg("dst"), + py::arg("sycl_queue"), py::arg("depends") = py::list()); + m.def("_log10_result_type", log10_result_type_pyapi, ""); + } // B15: ==== LOGADDEXP (x1, x2) // FIXME: diff --git a/dpctl/tests/elementwise/test_log10.py b/dpctl/tests/elementwise/test_log10.py new file mode 100644 index 0000000000..de01b0ba1e --- /dev/null +++ b/dpctl/tests/elementwise/test_log10.py @@ -0,0 +1,133 @@ +# Data Parallel Control (dpctl) +# +# Copyright 2020-2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import itertools + +import numpy as np +import pytest +from numpy.testing import assert_equal + +import dpctl.tensor as dpt +from dpctl.tests.helper import get_queue_or_skip, skip_if_dtype_not_supported + +from .utils import _all_dtypes, _map_to_device_dtype, _usm_types + + +@pytest.mark.parametrize("dtype", _all_dtypes) +def test_log_out_type(dtype): + q = get_queue_or_skip() + skip_if_dtype_not_supported(dtype, q) + + X = dpt.asarray(1, dtype=dtype, sycl_queue=q) + expected_dtype = np.log10(np.array(1, dtype=dtype)).dtype + expected_dtype = _map_to_device_dtype(expected_dtype, q.sycl_device) + assert dpt.log10(X).dtype == expected_dtype + + +@pytest.mark.parametrize("dtype", ["f2", "f4", "f8", "c8", "c16"]) +def test_log_output_contig(dtype): + q = get_queue_or_skip() + skip_if_dtype_not_supported(dtype, q) + + n_seq = 1027 + + X = dpt.linspace(1, 13, num=n_seq, dtype=dtype, sycl_queue=q) + Xnp = dpt.asnumpy(X) + + Y = dpt.log10(X) + tol = 8 * dpt.finfo(Y.dtype).resolution + + np.testing.assert_allclose( + dpt.asnumpy(Y), np.log10(Xnp), atol=tol, rtol=tol + ) + + +@pytest.mark.parametrize("dtype", ["f2", "f4", "f8", "c8", "c16"]) +def test_log_output_strided(dtype): + q = get_queue_or_skip() + skip_if_dtype_not_supported(dtype, q) + + n_seq = 2 * 1027 + + X = dpt.linspace(1, 13, num=n_seq, dtype=dtype, sycl_queue=q)[::-2] + Xnp = dpt.asnumpy(X) + + Y = dpt.log10(X) + tol = 8 * dpt.finfo(Y.dtype).resolution + + np.testing.assert_allclose( + dpt.asnumpy(Y), np.log10(Xnp), atol=tol, rtol=tol + ) + + +@pytest.mark.parametrize("usm_type", _usm_types) +def test_log_usm_type(usm_type): + q = get_queue_or_skip() + + arg_dt = np.dtype("f4") + input_shape = (10, 10, 10, 10) + X = dpt.empty(input_shape, dtype=arg_dt, usm_type=usm_type, sycl_queue=q) + X[..., 0::2] = 4 * dpt.e + X[..., 1::2] = 10 * dpt.e + + Y = dpt.log10(X) + assert Y.usm_type == X.usm_type + assert Y.sycl_queue == X.sycl_queue + assert Y.flags.c_contiguous + + expected_Y = np.empty(input_shape, dtype=arg_dt) + expected_Y[..., 0::2] = np.log10(np.float32(4 * dpt.e)) + expected_Y[..., 1::2] = np.log10(np.float32(10 * dpt.e)) + tol = 8 * dpt.finfo(Y.dtype).resolution + + np.testing.assert_allclose(dpt.asnumpy(Y), expected_Y, atol=tol, rtol=tol) + + +@pytest.mark.parametrize("dtype", _all_dtypes) +def test_log_order(dtype): + q = get_queue_or_skip() + skip_if_dtype_not_supported(dtype, q) + + arg_dt = np.dtype(dtype) + input_shape = (10, 10, 10, 10) + X = dpt.empty(input_shape, dtype=arg_dt, sycl_queue=q) + X[..., 0::2] = 4 * dpt.e + X[..., 1::2] = 10 * dpt.e + + for ord in ["C", "F", "A", "K"]: + for perms in itertools.permutations(range(4)): + U = dpt.permute_dims(X[:, ::-1, ::-1, :], perms) + Y = dpt.log10(U, order=ord) + expected_Y = np.log10(dpt.asnumpy(U)) + tol = 8 * max( + dpt.finfo(Y.dtype).resolution, + np.finfo(expected_Y.dtype).resolution, + ) + np.testing.assert_allclose( + dpt.asnumpy(Y), expected_Y, atol=tol, rtol=tol + ) + + +def test_log_special_cases(): + q = get_queue_or_skip() + + X = dpt.asarray( + [dpt.nan, -1.0, 0.0, -0.0, dpt.inf, -dpt.inf], dtype="f4", sycl_queue=q + ) + Xnp = dpt.asnumpy(X) + + with np.errstate(invalid="ignore", divide="ignore"): + assert_equal(dpt.asnumpy(dpt.log10(X)), np.log10(Xnp)) diff --git a/dpctl/tests/elementwise/test_log2.py b/dpctl/tests/elementwise/test_log2.py new file mode 100644 index 0000000000..3c306812d9 --- /dev/null +++ b/dpctl/tests/elementwise/test_log2.py @@ -0,0 +1,129 @@ +# Data Parallel Control (dpctl) +# +# Copyright 2020-2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import itertools + +import numpy as np +import pytest +from numpy.testing import assert_equal + +import dpctl.tensor as dpt +from dpctl.tests.helper import get_queue_or_skip, skip_if_dtype_not_supported + +from .utils import _all_dtypes, _map_to_device_dtype, _usm_types + + +@pytest.mark.parametrize("dtype", _all_dtypes) +def test_log_out_type(dtype): + q = get_queue_or_skip() + skip_if_dtype_not_supported(dtype, q) + + X = dpt.asarray(1, dtype=dtype, sycl_queue=q) + expected_dtype = np.log2(np.array(1, dtype=dtype)).dtype + expected_dtype = _map_to_device_dtype(expected_dtype, q.sycl_device) + assert dpt.log2(X).dtype == expected_dtype + + +@pytest.mark.parametrize("dtype", ["f2", "f4", "f8", "c8", "c16"]) +def test_log_output_contig(dtype): + q = get_queue_or_skip() + skip_if_dtype_not_supported(dtype, q) + + n_seq = 1027 + + X = dpt.linspace(1, 13, num=n_seq, dtype=dtype, sycl_queue=q) + Xnp = dpt.asnumpy(X) + + Y = dpt.log2(X) + tol = 8 * dpt.finfo(Y.dtype).resolution + + np.testing.assert_allclose(dpt.asnumpy(Y), np.log2(Xnp), atol=tol, rtol=tol) + + +@pytest.mark.parametrize("dtype", ["f2", "f4", "f8", "c8", "c16"]) +def test_log_output_strided(dtype): + q = get_queue_or_skip() + skip_if_dtype_not_supported(dtype, q) + + n_seq = 2 * 1027 + + X = dpt.linspace(1, 13, num=n_seq, dtype=dtype, sycl_queue=q)[::-2] + Xnp = dpt.asnumpy(X) + + Y = dpt.log2(X) + tol = 8 * dpt.finfo(Y.dtype).resolution + + np.testing.assert_allclose(dpt.asnumpy(Y), np.log2(Xnp), atol=tol, rtol=tol) + + +@pytest.mark.parametrize("usm_type", _usm_types) +def test_log_usm_type(usm_type): + q = get_queue_or_skip() + + arg_dt = np.dtype("f4") + input_shape = (10, 10, 10, 10) + X = dpt.empty(input_shape, dtype=arg_dt, usm_type=usm_type, sycl_queue=q) + X[..., 0::2] = 4 * dpt.e + X[..., 1::2] = 10 * dpt.e + + Y = dpt.log2(X) + assert Y.usm_type == X.usm_type + assert Y.sycl_queue == X.sycl_queue + assert Y.flags.c_contiguous + + expected_Y = np.empty(input_shape, dtype=arg_dt) + expected_Y[..., 0::2] = np.log2(np.float32(4 * dpt.e)) + expected_Y[..., 1::2] = np.log2(np.float32(10 * dpt.e)) + tol = 8 * dpt.finfo(Y.dtype).resolution + + np.testing.assert_allclose(dpt.asnumpy(Y), expected_Y, atol=tol, rtol=tol) + + +@pytest.mark.parametrize("dtype", _all_dtypes) +def test_log_order(dtype): + q = get_queue_or_skip() + skip_if_dtype_not_supported(dtype, q) + + arg_dt = np.dtype(dtype) + input_shape = (10, 10, 10, 10) + X = dpt.empty(input_shape, dtype=arg_dt, sycl_queue=q) + X[..., 0::2] = 4 * dpt.e + X[..., 1::2] = 10 * dpt.e + + for ord in ["C", "F", "A", "K"]: + for perms in itertools.permutations(range(4)): + U = dpt.permute_dims(X[:, ::-1, ::-1, :], perms) + Y = dpt.log2(U, order=ord) + expected_Y = np.log2(dpt.asnumpy(U)) + tol = 8 * max( + dpt.finfo(Y.dtype).resolution, + np.finfo(expected_Y.dtype).resolution, + ) + np.testing.assert_allclose( + dpt.asnumpy(Y), expected_Y, atol=tol, rtol=tol + ) + + +def test_log_special_cases(): + q = get_queue_or_skip() + + X = dpt.asarray( + [dpt.nan, -1.0, 0.0, -0.0, dpt.inf, -dpt.inf], dtype="f4", sycl_queue=q + ) + Xnp = dpt.asnumpy(X) + + with np.errstate(invalid="ignore", divide="ignore"): + assert_equal(dpt.asnumpy(dpt.log2(X)), np.log2(Xnp)) From 9c6f67ddb0efc0dab79c36c1f586dc15375fa6ad Mon Sep 17 00:00:00 2001 From: Nikita Grigorian Date: Tue, 4 Jul 2023 04:20:11 -0700 Subject: [PATCH 2/2] Fixed docstrings for log10 and log2 --- dpctl/tensor/_elementwise_funcs.py | 44 ++++++++++++++++++------------ 1 file changed, 26 insertions(+), 18 deletions(-) diff --git a/dpctl/tensor/_elementwise_funcs.py b/dpctl/tensor/_elementwise_funcs.py index bdd34b58f5..e6c05d8620 100644 --- a/dpctl/tensor/_elementwise_funcs.py +++ b/dpctl/tensor/_elementwise_funcs.py @@ -565,19 +565,23 @@ # U22: ==== LOG2 (x) _log2_docstring_ = """ log2(x, out=None, order='K') -Computes the base 2 logarithm element-wise. + +Computes the base-2 logarithm for each element `x_i` of input array `x`. + Args: x (usm_ndarray): Input array, expected to have numeric data type. - out (usm_ndarray): - Output array to populate. Array must have the correct - shape and the expected data type. - order ("C","F","A","K", optional): memory layout of the new - output array, if parameter `out` is `None`. + out ({None, usm_ndarray}, optional): + Output array to populate. + Array have the correct shape and the expected data type. + order ("C","F","A","K", optional): + Memory layout of the newly output array, if parameter `out` is `None`. Default: "K". -Return: - usm_ndarray: - An array containing the element-wise base 2 logarithm values. +Returns: + usm_narray: + An array containing the base-2 logarithm of `x`. + The data type of the returned array is determined by the + Type Promotion Rules. """ log2 = UnaryElementwiseFunc( @@ -587,19 +591,23 @@ # U23: ==== LOG10 (x) _log10_docstring_ = """ log10(x, out=None, order='K') -Computes the base 10 logarithm element-wise. + +Computes the base-10 logarithm for each element `x_i` of input array `x`. + Args: x (usm_ndarray): Input array, expected to have numeric data type. - out (usm_ndarray): - Output array to populate. Array must have the correct - shape and the expected data type. - order ("C","F","A","K", optional): memory layout of the new - output array, if parameter `out` is `None`. + out ({None, usm_ndarray}, optional): + Output array to populate. + Array have the correct shape and the expected data type. + order ("C","F","A","K", optional): + Memory layout of the newly output array, if parameter `out` is `None`. Default: "K". -Return: - usm_ndarray: - An array containing the element-wise base 10 logarithm values. +Returns: + usm_narray: + An array containing the base-1- logarithm of `x`. + The data type of the returned array is determined by the + Type Promotion Rules. """ log10 = UnaryElementwiseFunc(