Skip to content

Elementwise functions tuning #1889

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 13 commits into from
Nov 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
* Fix additional warnings when generating docs [gh-1861](https://github.com/IntelPython/dpctl/pull/1861)
* Add missing include of SYCL header to "math_utils.hpp" [gh-1899](https://github.com/IntelPython/dpctl/pull/1899)
* Add support of CV-qualifiers in `is_complex<T>` helper [gh-1900](https://github.com/IntelPython/dpctl/pull/1900)
* Tuning work for elementwise functions with modest performance gains (under 10%) [gh-1889](https://github.com/IntelPython/dpctl/pull/1889)

## [0.18.1] - Oct. 11, 2024

Expand Down
2 changes: 1 addition & 1 deletion dpctl/tensor/libtensor/include/kernels/alignment.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ namespace kernels
namespace alignment_utils
{

static constexpr size_t required_alignment = 64;
static constexpr size_t required_alignment = 64UL;

template <std::uintptr_t alignment, typename Ptr> bool is_aligned(Ptr p)
{
Expand Down
69 changes: 37 additions & 32 deletions dpctl/tensor/libtensor/include/kernels/clip.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
#include "kernels/alignment.hpp"
#include "utils/math_utils.hpp"
#include "utils/offset_utils.hpp"
#include "utils/sycl_utils.hpp"
#include "utils/type_utils.hpp"

namespace dpctl
Expand All @@ -51,6 +52,9 @@ using dpctl::tensor::kernels::alignment_utils::
using dpctl::tensor::kernels::alignment_utils::is_aligned;
using dpctl::tensor::kernels::alignment_utils::required_alignment;

using dpctl::tensor::sycl_utils::sub_group_load;
using dpctl::tensor::sycl_utils::sub_group_store;

template <typename T> T clip(const T &x, const T &min, const T &max)
{
using dpctl::tensor::type_utils::is_complex;
Expand All @@ -75,8 +79,8 @@ template <typename T> T clip(const T &x, const T &min, const T &max)
}

template <typename T,
int vec_sz = 4,
int n_vecs = 2,
std::uint8_t vec_sz = 4,
std::uint8_t n_vecs = 2,
bool enable_sg_loadstore = true>
class ClipContigFunctor
{
Expand All @@ -100,37 +104,36 @@ class ClipContigFunctor

void operator()(sycl::nd_item<1> ndit) const
{
constexpr std::uint8_t nelems_per_wi = n_vecs * vec_sz;

using dpctl::tensor::type_utils::is_complex;
if constexpr (is_complex<T>::value || !enable_sg_loadstore) {
std::uint8_t sgSize = ndit.get_sub_group().get_local_range()[0];
size_t base = ndit.get_global_linear_id();

base = (base / sgSize) * sgSize * n_vecs * vec_sz + (base % sgSize);
for (size_t offset = base;
offset < std::min(nelems, base + sgSize * (n_vecs * vec_sz));
offset += sgSize)
{
const std::uint16_t sgSize =
ndit.get_sub_group().get_local_range()[0];
const size_t gid = ndit.get_global_linear_id();
const uint16_t nelems_per_sg = sgSize * nelems_per_wi;

const size_t start =
(gid / sgSize) * (nelems_per_sg - sgSize) + gid;
const size_t end = std::min(nelems, start + nelems_per_sg);

for (size_t offset = start; offset < end; offset += sgSize) {
dst_p[offset] = clip(x_p[offset], min_p[offset], max_p[offset]);
}
}
else {
auto sg = ndit.get_sub_group();
std::uint8_t sgSize = sg.get_local_range()[0];
std::uint8_t max_sgSize = sg.get_max_local_range()[0];
size_t base = n_vecs * vec_sz *
(ndit.get_group(0) * ndit.get_local_range(0) +
sg.get_group_id()[0] * max_sgSize);

if (base + n_vecs * vec_sz * sgSize < nelems &&
sgSize == max_sgSize)
{
sycl::vec<T, vec_sz> x_vec;
sycl::vec<T, vec_sz> min_vec;
sycl::vec<T, vec_sz> max_vec;
const std::uint16_t sgSize = sg.get_max_local_range()[0];

const size_t base =
nelems_per_wi * (ndit.get_group(0) * ndit.get_local_range(0) +
sg.get_group_id()[0] * sgSize);

if (base + nelems_per_wi * sgSize < nelems) {
sycl::vec<T, vec_sz> dst_vec;
#pragma unroll
for (std::uint8_t it = 0; it < n_vecs * vec_sz; it += vec_sz) {
auto idx = base + it * sgSize;
const size_t idx = base + it * sgSize;
auto x_multi_ptr = sycl::address_space_cast<
sycl::access::address_space::global_space,
sycl::access::decorated::yes>(&x_p[idx]);
Expand All @@ -144,21 +147,23 @@ class ClipContigFunctor
sycl::access::address_space::global_space,
sycl::access::decorated::yes>(&dst_p[idx]);

x_vec = sg.load<vec_sz>(x_multi_ptr);
min_vec = sg.load<vec_sz>(min_multi_ptr);
max_vec = sg.load<vec_sz>(max_multi_ptr);
const sycl::vec<T, vec_sz> x_vec =
sub_group_load<vec_sz>(sg, x_multi_ptr);
const sycl::vec<T, vec_sz> min_vec =
sub_group_load<vec_sz>(sg, min_multi_ptr);
const sycl::vec<T, vec_sz> max_vec =
sub_group_load<vec_sz>(sg, max_multi_ptr);
#pragma unroll
for (std::uint8_t vec_id = 0; vec_id < vec_sz; ++vec_id) {
dst_vec[vec_id] = clip(x_vec[vec_id], min_vec[vec_id],
max_vec[vec_id]);
}
sg.store<vec_sz>(dst_multi_ptr, dst_vec);
sub_group_store<vec_sz>(sg, dst_vec, dst_multi_ptr);
}
}
else {
for (size_t k = base + sg.get_local_id()[0]; k < nelems;
k += sgSize)
{
const size_t lane_id = sg.get_local_id()[0];
for (size_t k = base + lane_id; k < nelems; k += sgSize) {
dst_p[k] = clip(x_p[k], min_p[k], max_p[k]);
}
}
Expand Down Expand Up @@ -195,8 +200,8 @@ sycl::event clip_contig_impl(sycl::queue &q,
cgh.depends_on(depends);

size_t lws = 64;
constexpr unsigned int vec_sz = 4;
constexpr unsigned int n_vecs = 2;
constexpr std::uint8_t vec_sz = 4;
constexpr std::uint8_t n_vecs = 2;
const size_t n_groups =
((nelems + lws * n_vecs * vec_sz - 1) / (lws * n_vecs * vec_sz));
const auto gws_range = sycl::range<1>(n_groups * lws);
Expand Down
67 changes: 34 additions & 33 deletions dpctl/tensor/libtensor/include/kernels/copy_and_cast.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
#include "dpctl_tensor_types.hpp"
#include "kernels/alignment.hpp"
#include "utils/offset_utils.hpp"
#include "utils/sycl_utils.hpp"
#include "utils/type_utils.hpp"

namespace dpctl
Expand All @@ -49,13 +50,16 @@ using dpctl::tensor::kernels::alignment_utils::
using dpctl::tensor::kernels::alignment_utils::is_aligned;
using dpctl::tensor::kernels::alignment_utils::required_alignment;

using dpctl::tensor::sycl_utils::sub_group_load;
using dpctl::tensor::sycl_utils::sub_group_store;

template <typename srcT, typename dstT, typename IndexerT>
class copy_cast_generic_kernel;

template <typename srcT,
typename dstT,
unsigned int vec_sz,
unsigned int n_vecs>
std::uint8_t vec_sz,
std::uint8_t n_vecs>
class copy_cast_contig_kernel;

template <typename srcT, typename dstT, typename IndexerT>
Expand Down Expand Up @@ -207,8 +211,8 @@ template <typename fnT, typename D, typename S> struct CopyAndCastGenericFactory
template <typename srcT,
typename dstT,
typename CastFnT,
int vec_sz = 4,
int n_vecs = 2,
std::uint8_t vec_sz = 4u,
std::uint8_t n_vecs = 2u,
bool enable_sg_loadstore = true>
class ContigCopyFunctor
{
Expand All @@ -227,58 +231,55 @@ class ContigCopyFunctor
{
CastFnT fn{};

constexpr std::uint8_t elems_per_wi = n_vecs * vec_sz;

using dpctl::tensor::type_utils::is_complex;
if constexpr (!enable_sg_loadstore || is_complex<srcT>::value ||
is_complex<dstT>::value)
{
std::uint8_t sgSize = ndit.get_sub_group().get_local_range()[0];
size_t base = ndit.get_global_linear_id();

base = (base / sgSize) * sgSize * n_vecs * vec_sz + (base % sgSize);
for (size_t offset = base;
offset < std::min(nelems, base + sgSize * (n_vecs * vec_sz));
offset += sgSize)
{
std::uint16_t sgSize = ndit.get_sub_group().get_local_range()[0];
const size_t gid = ndit.get_global_linear_id();

// start = (gid / sgSize) * elems_per_sg + (gid % sgSize)
const std::uint16_t elems_per_sg = sgSize * elems_per_wi;
const size_t start = (gid / sgSize) * (elems_per_sg - sgSize) + gid;
const size_t end = std::min(nelems, start + elems_per_sg);
for (size_t offset = start; offset < end; offset += sgSize) {
dst_p[offset] = fn(src_p[offset]);
}
}
else {
auto sg = ndit.get_sub_group();
std::uint8_t sgSize = sg.get_local_range()[0];
std::uint8_t max_sgSize = sg.get_max_local_range()[0];
size_t base = n_vecs * vec_sz *
(ndit.get_group(0) * ndit.get_local_range(0) +
sg.get_group_id()[0] * max_sgSize);

if (base + n_vecs * vec_sz * sgSize < nelems &&
sgSize == max_sgSize)
{
sycl::vec<srcT, vec_sz> src_vec;
const std::uint16_t sgSize = sg.get_max_local_range()[0];
const size_t base =
elems_per_wi * (ndit.get_group(0) * ndit.get_local_range(0) +
sg.get_group_id()[0] * sgSize);

if (base + elems_per_wi * sgSize < nelems) {
sycl::vec<dstT, vec_sz> dst_vec;

#pragma unroll
for (std::uint8_t it = 0; it < n_vecs * vec_sz; it += vec_sz) {
const size_t offset = base + it * sgSize;
auto src_multi_ptr = sycl::address_space_cast<
sycl::access::address_space::global_space,
sycl::access::decorated::yes>(
&src_p[base + it * sgSize]);
sycl::access::decorated::yes>(&src_p[offset]);
auto dst_multi_ptr = sycl::address_space_cast<
sycl::access::address_space::global_space,
sycl::access::decorated::yes>(
&dst_p[base + it * sgSize]);
sycl::access::decorated::yes>(&dst_p[offset]);

src_vec = sg.load<vec_sz>(src_multi_ptr);
const sycl::vec<srcT, vec_sz> src_vec =
sub_group_load<vec_sz>(sg, src_multi_ptr);
#pragma unroll
for (std::uint8_t k = 0; k < vec_sz; k++) {
dst_vec[k] = fn(src_vec[k]);
}
sg.store<vec_sz>(dst_multi_ptr, dst_vec);
sub_group_store<vec_sz>(sg, dst_vec, dst_multi_ptr);
}
}
else {
for (size_t k = base + sg.get_local_id()[0]; k < nelems;
k += sgSize)
{
const size_t start = base + sg.get_local_id()[0];
for (size_t k = start; k < nelems; k += sgSize) {
dst_p[k] = fn(src_p[k]);
}
}
Expand Down Expand Up @@ -332,8 +333,8 @@ sycl::event copy_and_cast_contig_impl(sycl::queue &q,
dstTy *dst_tp = reinterpret_cast<dstTy *>(dst_cp);

size_t lws = 64;
constexpr unsigned int vec_sz = 4;
constexpr unsigned int n_vecs = 2;
constexpr std::uint32_t vec_sz = 4;
constexpr std::uint32_t n_vecs = 2;
const size_t n_groups =
((nelems + lws * n_vecs * vec_sz - 1) / (lws * n_vecs * vec_sz));
const auto gws_range = sycl::range<1>(n_groups * lws);
Expand Down
Loading
Loading