Skip to content

Commit 6a614f0

Browse files
committed
rename some FNs
1 parent 5d030c0 commit 6a614f0

File tree

7 files changed

+24
-20
lines changed

7 files changed

+24
-20
lines changed

intel_pytorch_extension_py/ops/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,5 +4,5 @@
44
from .pooling import *
55
from .reshape import *
66
from .mlp import *
7-
from .DNNL_linear_relu import *
7+
from .dil_linear_relu import *
88

intel_pytorch_extension_py/ops/DNNL_linear_relu.py renamed to intel_pytorch_extension_py/ops/dil_linear_relu.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
import math
77
import _torch_ipex as core
88

9-
class DNNLFC(Function):
9+
class dilLinearFuseReluFC(Function):
1010
@staticmethod
1111
def forward(ctx, input, weight, bias):
1212
output = core.linear_fuse_relu(input, weight, bias)
@@ -21,17 +21,17 @@ def backward(ctx, grad_output):
2121
output_mask = (input.requires_grad, weight.requires_grad, 0)
2222
else:
2323
output_mask = (input.requires_grad, weight.requires_grad, bias.requires_grad)
24-
grad_output = core.dnnl_relu_use_dst_backward(grad_output, output)
24+
grad_output = core.relu_use_dst_backward(grad_output, output)
2525
grad_input, grad_weight, grad_bias = core.linear_backward(input, grad_output, weight, output_mask)
2626
return (grad_input, grad_weight, grad_bias)
2727

28-
class DNNLLRFuse(nn.Module):
28+
class dilLinearFuseRelu(nn.Module):
2929
r"""DNNL Linear module for using relu fused DNNL kernel"""
3030

3131
__constants__ = ['bias']
3232

3333
def __init__(self, in_features, out_features, bias=True):
34-
super(DNNLLRFuse, self).__init__()
34+
super(dilLinearFuseRelu, self).__init__()
3535
self.in_features = in_features
3636
self.out_features = out_features
3737
self.weight = Parameter(torch.Tensor(out_features, in_features))
@@ -50,6 +50,6 @@ def reset_parameters(self):
5050

5151
def forward(self, input):
5252
# print(self.weight.shape)
53-
output = DNNLFC.apply(input, self.weight, self.bias)
53+
output = dilLinearFuseReluFC.apply(input, self.weight, self.bias)
5454
return output
5555

torch_ipex/csrc/cpu/DevOPs.cpp

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -568,9 +568,9 @@ at::Tensor AtenIpexCPUDev::dil_linear_fuse_relu(
568568
if (bias.has_value()) {
569569
at::Tensor bias_vec = bias.value();
570570
const dil::tensor b = dbl::comm::try_gen_dil_tensor(bias_vec);
571-
dil::inner_product_forward::compute(x, w, b, y, true);
571+
dil::inner_product_forward::compute(x, w, b, y, /*fuse_relu=*/true);
572572
} else {
573-
dil::inner_product_forward::compute(x, w, y, true);
573+
dil::inner_product_forward::compute(x, w, y, /*fuse_relu=*/true);
574574
}
575575

576576
auto input_size = self.sizes();
@@ -1012,6 +1012,15 @@ at::Tensor& AtenIpexCPUDev::dil_relu_(at::Tensor& input) {
10121012
return input;
10131013
}
10141014

1015+
at::Tensor AtenIpexCPUDev::dil_relu_use_dst_for_bwd(const at::Tensor& grad_output, const at::Tensor& output) {
1016+
const dil::tensor& y = dbl::comm::try_gen_dil_tensor(output);
1017+
dil::tensor grady = dbl::comm::try_gen_dil_tensor(grad_output);
1018+
dil::tensor gradx;
1019+
dil::eltwise_backward::compute(y, grady, gradx,
1020+
dil::algorithm::eltwise_relu_use_dst_for_bwd, /*alpha*/ 0.0);
1021+
return dbl::comm::gen_aten_tensor_by(gradx);
1022+
}
1023+
10151024
at::Tensor AtenIpexCPUDev::dil_threshold_backward(const at::Tensor& grad_output, const at::Tensor& input, at::Scalar threshold) {
10161025
DEBUG("AtenIpexCPUDev::dil_threshold_backward\n");
10171026
CHECK_DNNL_OP_PRE_COND(grad_output);

torch_ipex/csrc/cpu/DevOPs.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@ class AtenIpexCPUDev {
5555
static at::Tensor dil_adaptive_avg_pool2d_backward(const at::Tensor& grad_output, const at::Tensor& input);
5656
static at::Tensor dil_relu(const at::Tensor& input);
5757
static at::Tensor& dil_relu_(at::Tensor& input);
58+
static at::Tensor dil_relu_use_dst_for_bwd(const at::Tensor& grad_output, const at::Tensor& output);
5859
static at::Tensor dil_threshold_backward(const at::Tensor& grad_output, const at::Tensor& input, at::Scalar threshold);
5960
static at::Tensor dil__softmax(const at::Tensor& self, const int64_t dim, bool half_to_float);
6061
static at::Tensor dil__softmax_backward_data(const at::Tensor& grad_output, const at::Tensor& output, int64_t dim, const at::Tensor& self);

torch_ipex/csrc/cpu/ExtendOPs.cpp

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -459,15 +459,9 @@ at::Tensor AtenIpexTypeExt::reshape(const at::Tensor& input, at::IntArrayRef siz
459459
}
460460

461461

462-
at::Tensor AtenIpexTypeExt::dnnl_relu_use_dst_for_bwd(const at::Tensor& grad_output, const at::Tensor& output) {
463-
RECORD_FUNCTION("dnnl_relu_use_dst_for_bwd", std::vector<c10::IValue>({grad_output, output}), torch::autograd::Node::peek_at_next_sequence_nr());
464-
const dil::tensor& y = torch_ipex::cpu::dbl::comm::try_gen_dil_tensor(output);
465-
dil::tensor grady = torch_ipex::cpu::dbl::comm::try_gen_dil_tensor(grad_output);
466-
dil::tensor gradx;
467-
dil::eltwise_backward::compute(y, grady, gradx,
468-
dil::algorithm::eltwise_relu_use_dst_for_bwd, /*alpha*/ 0.0);
469-
auto ret = torch_ipex::cpu::dbl::comm::gen_aten_tensor_by(gradx);
470-
return ret;
462+
at::Tensor AtenIpexTypeExt::relu_use_dst_for_bwd(const at::Tensor& grad_output, const at::Tensor& output) {
463+
RECORD_FUNCTION("dil_relu_use_dst_for_bwd", std::vector<c10::IValue>({grad_output, output}), torch::autograd::Node::peek_at_next_sequence_nr());
464+
return cpu::AtenIpexCPUDev::dil_relu_use_dst_for_bwd(grad_output, output);
471465
}
472466

473467
} // namespace torch_ipex

torch_ipex/csrc/cpu/ExtendOPs.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ class AtenIpexTypeExt {
1515
static at::Tensor linear(const at::Tensor& input, const at::Tensor& weight, const c10::optional<at::Tensor>& bias);
1616
static at::Tensor linear_fuse_relu(const at::Tensor& input, const at::Tensor& weight, const c10::optional<at::Tensor>& bias);
1717
static std::tuple<at::Tensor, at::Tensor, at::Tensor> linear_backward(const at::Tensor& input, const at::Tensor& grad_output, const at::Tensor& weight, std::array<bool,3> output_mask);
18-
static at::Tensor dnnl_relu_use_dst_for_bwd(const at::Tensor& grad_output, const at::Tensor& output);
18+
static at::Tensor relu_use_dst_for_bwd(const at::Tensor& grad_output, const at::Tensor& output);
1919
static at::Tensor adaptive_avg_pool2d(at::Tensor const& input, at::IntArrayRef output_size);
2020
static at::Tensor adaptive_avg_pool2d_backward(const at::Tensor& grad_output, const at::Tensor& input);
2121
static at::Tensor max_pooling(const at::Tensor& input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode);

torch_ipex/csrc/init_python_bindings.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -100,9 +100,9 @@ void InitIpexModuleBindings(py::module m) {
100100
[](const at::Tensor& input, const at::Tensor& grad_output, const at::Tensor& weight, std::array<bool,3> output_mask) {
101101
return AtenIpexTypeExt::linear_backward(input, grad_output, weight, output_mask);
102102
});
103-
m.def("dnnl_relu_use_dst_backward",
103+
m.def("relu_use_dst_backward",
104104
[](const at::Tensor& grad_output, const at::Tensor& output) {
105-
return AtenIpexTypeExt::dnnl_relu_use_dst_for_bwd(grad_output, output);
105+
return AtenIpexTypeExt::relu_use_dst_for_bwd(grad_output, output);
106106
});
107107
m.def("adaptive_avg_pool2d",
108108
[](at::Tensor const& input, at::IntArrayRef output_size) {

0 commit comments

Comments
 (0)