@@ -64,7 +64,7 @@ at::Tensor AtenIpexCPUDev::dil_convolution(
64
64
dilation,
65
65
groups);
66
66
67
- return dbl::comm::gen_aten_tensor_by (dil_output);
67
+ return dbl::comm::gen_aten_tensor_by (std::move ( dil_output) );
68
68
}
69
69
70
70
at::Tensor dil_convolution_backward_input (
@@ -87,7 +87,7 @@ at::Tensor dil_convolution_backward_input(
87
87
padding.vec (),
88
88
padding.vec (),
89
89
groups);
90
- return dbl::comm::gen_aten_tensor_by (dil_grad_input);
90
+ return dbl::comm::gen_aten_tensor_by (std::move ( dil_grad_input) );
91
91
}
92
92
93
93
std::tuple<at::Tensor, at::Tensor> dil_convolution_backward_weights (
@@ -117,8 +117,8 @@ std::tuple<at::Tensor, at::Tensor> dil_convolution_backward_weights(
117
117
groups,
118
118
diff_weight_type);
119
119
return std::make_tuple (
120
- dbl::comm::gen_aten_tensor_by (dil_grad_weight),
121
- dbl::comm::gen_aten_tensor_by (dil_grad_bias));
120
+ dbl::comm::gen_aten_tensor_by (std::move ( dil_grad_weight) ),
121
+ dbl::comm::gen_aten_tensor_by (std::move ( dil_grad_bias) ));
122
122
} else {
123
123
dil::convolution_backward_weights::compute (
124
124
dil_input,
@@ -132,7 +132,7 @@ std::tuple<at::Tensor, at::Tensor> dil_convolution_backward_weights(
132
132
groups,
133
133
diff_weight_type);
134
134
return std::make_tuple (
135
- dbl::comm::gen_aten_tensor_by (dil_grad_weight),
135
+ dbl::comm::gen_aten_tensor_by (std::move ( dil_grad_weight) ),
136
136
at::Tensor ());
137
137
}
138
138
}
@@ -255,7 +255,7 @@ at::Tensor AtenIpexCPUDev::dil_add(const at::Tensor& self, const at::Tensor& oth
255
255
const std::vector<float > scales{1.0 , alpha.to <float >()};
256
256
dil::sum::compute (scales, {x, y}, z);
257
257
258
- return dbl::comm::gen_aten_tensor_by (z );
258
+ return dbl::comm::gen_aten_tensor_by (std::move (z) );
259
259
}
260
260
261
261
at::Tensor & AtenIpexCPUDev::dil_add_ (at::Tensor& self, const at::Tensor& other, at::Scalar alpha) {
@@ -552,9 +552,9 @@ at::Tensor AtenIpexCPUDev::dil_linear(
552
552
output_size.push_back (weight.size (0 ));
553
553
554
554
if (self.dim () > 2 ) {
555
- return dbl::comm::gen_aten_tensor_by (y ).reshape (output_size);
555
+ return dbl::comm::gen_aten_tensor_by (std::move (y) ).reshape (output_size);
556
556
}
557
- return dbl::comm::gen_aten_tensor_by (y );
557
+ return dbl::comm::gen_aten_tensor_by (std::move (y) );
558
558
}
559
559
560
560
at::Tensor dil_linear_backward_input (
@@ -574,9 +574,9 @@ at::Tensor dil_linear_backward_input(
574
574
grady, w, {input_reshaped_size.begin (), input_reshaped_size.end ()}, gradx);
575
575
576
576
if (input_size.size () > 2 ) {
577
- return dbl::comm::gen_aten_tensor_by (gradx).reshape (input_size);
577
+ return dbl::comm::gen_aten_tensor_by (std::move ( gradx) ).reshape (input_size);
578
578
}
579
- return dbl::comm::gen_aten_tensor_by (gradx);
579
+ return dbl::comm::gen_aten_tensor_by (std::move ( gradx) );
580
580
}
581
581
582
582
std::tuple<at::Tensor, at::Tensor> dil_linear_backward_weights (
@@ -593,12 +593,12 @@ std::tuple<at::Tensor, at::Tensor> dil_linear_backward_weights(
593
593
if (bias_defined) {
594
594
dil::inner_product_backward_weights::compute (x, grady, gradw, gradb, diff_weight_type);
595
595
return std::tuple<at::Tensor, at::Tensor>{
596
- dbl::comm::gen_aten_tensor_by (gradw),
597
- dbl::comm::gen_aten_tensor_by (gradb)};
596
+ dbl::comm::gen_aten_tensor_by (std::move ( gradw) ),
597
+ dbl::comm::gen_aten_tensor_by (std::move ( gradb) )};
598
598
} else {
599
599
dil::inner_product_backward_weights::compute (x, grady, gradw, diff_weight_type);
600
600
return std::tuple<at::Tensor, at::Tensor>{
601
- dbl::comm::gen_aten_tensor_by (gradw),
601
+ dbl::comm::gen_aten_tensor_by (std::move ( gradw) ),
602
602
at::Tensor ()};
603
603
}
604
604
}
@@ -632,8 +632,8 @@ std::tuple<at::Tensor, at::Tensor> _dil_dropout(
632
632
dil::tensor y;
633
633
dil::dropout_forward::compute (x, ratio, y, mask);
634
634
return std::tuple<at::Tensor, at::Tensor>{
635
- dbl::comm::gen_aten_tensor_by (y ),
636
- dbl::comm::gen_aten_tensor_by (mask)};
635
+ dbl::comm::gen_aten_tensor_by (std::move (y) ),
636
+ dbl::comm::gen_aten_tensor_by (std::move ( mask) )};
637
637
}
638
638
639
639
at::Tensor AtenIpexCPUDev::dil_dropout (const at::Tensor& self, double ratio, bool train) {
@@ -657,7 +657,7 @@ at::Tensor AtenIpexCPUDev::dil_dropout_backward(
657
657
658
658
dil::tensor dX;
659
659
dil::dropout_backward::compute (mask_dil, dY, dX);
660
- return dbl::comm::gen_aten_tensor_by (dX );
660
+ return dbl::comm::gen_aten_tensor_by (std::move (dX) );
661
661
}
662
662
663
663
std::tuple<at::Tensor, at::Tensor, at::Tensor> AtenIpexCPUDev::dil_native_batch_norm (
@@ -696,9 +696,9 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor> AtenIpexCPUDev::dil_native_batch_
696
696
dil::sum::compute (scales_var, {v, saved_var}, v);
697
697
}
698
698
return std::make_tuple (
699
- dbl::comm::gen_aten_tensor_by (y ),
700
- dbl::comm::gen_aten_tensor_by (saved_mean),
701
- dbl::comm::gen_aten_tensor_by (saved_var));
699
+ dbl::comm::gen_aten_tensor_by (std::move (y) ),
700
+ dbl::comm::gen_aten_tensor_by (std::move ( saved_mean) ),
701
+ dbl::comm::gen_aten_tensor_by (std::move ( saved_var) ));
702
702
} else {
703
703
if (use_running_stat) {
704
704
dil::tensor m = dbl::comm::try_gen_dil_tensor (running_mean);
@@ -710,7 +710,7 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor> AtenIpexCPUDev::dil_native_batch_
710
710
x, w, b, y, eps);
711
711
}
712
712
return std::make_tuple (
713
- dbl::comm::gen_aten_tensor_by (y ),
713
+ dbl::comm::gen_aten_tensor_by (std::move (y) ),
714
714
at::Tensor (),
715
715
at::Tensor ());
716
716
}
@@ -742,9 +742,9 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor> AtenIpexCPUDev::dil_native_batch_
742
742
x, m, v, grady, w, gradx, gradw, gradb, eps);
743
743
744
744
return std::make_tuple (
745
- dbl::comm::gen_aten_tensor_by (gradx),
746
- dbl::comm::gen_aten_tensor_by (gradw),
747
- dbl::comm::gen_aten_tensor_by (gradb));
745
+ dbl::comm::gen_aten_tensor_by (std::move ( gradx) ),
746
+ dbl::comm::gen_aten_tensor_by (std::move ( gradw) ),
747
+ dbl::comm::gen_aten_tensor_by (std::move ( gradb) ));
748
748
}
749
749
750
750
at::Tensor AtenIpexCPUDev::dil_max_pooling (
@@ -969,7 +969,7 @@ at::Tensor AtenIpexCPUDev::dil_relu(const at::Tensor& input) {
969
969
dil::tensor y;
970
970
dil::eltwise_forward::compute (
971
971
x, y, dil::algorithm::eltwise_relu, dil::prop_kind::forward_training, /* alpha*/ 0.0 );
972
- return dbl::comm::gen_aten_tensor_by (y );
972
+ return dbl::comm::gen_aten_tensor_by (std::move (y) );
973
973
}
974
974
975
975
at::Tensor& AtenIpexCPUDev::dil_relu_ (at::Tensor& input) {
@@ -998,7 +998,7 @@ at::Tensor AtenIpexCPUDev::dil_threshold_backward(const at::Tensor& grad_output,
998
998
dil::tensor gradx;
999
999
dil::eltwise_backward::compute (x, grady, gradx,
1000
1000
dil::algorithm::eltwise_relu, /* alpha*/ 0.0 );
1001
- return dbl::comm::gen_aten_tensor_by (gradx);
1001
+ return dbl::comm::gen_aten_tensor_by (std::move ( gradx) );
1002
1002
}
1003
1003
1004
1004
at::Tensor AtenIpexCPUDev::dil__softmax (
@@ -1014,7 +1014,7 @@ at::Tensor AtenIpexCPUDev::dil__softmax(
1014
1014
dil::tensor x = dbl::comm::try_gen_dil_tensor (self);
1015
1015
dil::tensor y;
1016
1016
dil::softmax_forward::compute (x, y, wrapped_dim);
1017
- return dbl::comm::gen_aten_tensor_by (y );
1017
+ return dbl::comm::gen_aten_tensor_by (std::move (y) );
1018
1018
}
1019
1019
1020
1020
at::Tensor AtenIpexCPUDev::dil__softmax_backward_data (
@@ -1032,7 +1032,7 @@ at::Tensor AtenIpexCPUDev::dil__softmax_backward_data(
1032
1032
dil::tensor grady = dbl::comm::try_gen_dil_tensor (grad_output_contiguous);
1033
1033
dil::tensor gradx;
1034
1034
dil::softmax_backward::compute (y, grady, gradx, wrapped_dim);
1035
- return dbl::comm::gen_aten_tensor_by (gradx);
1035
+ return dbl::comm::gen_aten_tensor_by (std::move ( gradx) );
1036
1036
}
1037
1037
1038
1038
at::Tensor AtenIpexCPUDev::dil_sigmoid (const at::Tensor& self) {
@@ -1042,7 +1042,7 @@ at::Tensor AtenIpexCPUDev::dil_sigmoid(const at::Tensor& self) {
1042
1042
dil::tensor y;
1043
1043
dil::eltwise_forward::compute (
1044
1044
x, y, dil::algorithm::eltwise_logistic_use_dst_for_bwd, dil::prop_kind::forward);
1045
- return dbl::comm::gen_aten_tensor_by (y );
1045
+ return dbl::comm::gen_aten_tensor_by (std::move (y) );
1046
1046
}
1047
1047
1048
1048
at::Tensor& AtenIpexCPUDev::dil_sigmoid_ (at::Tensor& self) {
@@ -1069,7 +1069,7 @@ at::Tensor AtenIpexCPUDev::dil_sigmoid_backward(
1069
1069
dil::tensor gx;
1070
1070
dil::eltwise_backward::compute (y, gy, gx,
1071
1071
dil::algorithm::eltwise_logistic_use_dst_for_bwd);
1072
- return dbl::comm::gen_aten_tensor_by (gx );
1072
+ return dbl::comm::gen_aten_tensor_by (std::move (gx) );
1073
1073
}
1074
1074
1075
1075
at::Tensor AtenIpexCPUDev::dil_reshape (const at::Tensor& self, at::IntArrayRef size) {
@@ -1082,7 +1082,7 @@ at::Tensor AtenIpexCPUDev::dil_reshape(const at::Tensor& self, at::IntArrayRef s
1082
1082
const dil::tensor x = dbl::comm::try_gen_dil_tensor (self);
1083
1083
dil::tensor y{x};
1084
1084
y.reshape (inferred_size);
1085
- return dbl::comm::gen_aten_tensor_by (y );
1085
+ return dbl::comm::gen_aten_tensor_by (std::move (y) );
1086
1086
}
1087
1087
1088
1088
at::Tensor AtenIpexCPUDev::dil_clone (const at::Tensor& self, c10::optional<c10::MemoryFormat> optional_memory_format) {
@@ -1095,7 +1095,7 @@ at::Tensor AtenIpexCPUDev::dil_clone(const at::Tensor& self, c10::optional<c10::
1095
1095
dil::tensor src = dbl::comm::try_gen_dil_tensor (self);
1096
1096
dil::tensor dst;
1097
1097
dil::direct_copy::compute (src, dst);
1098
- return dbl::comm::gen_aten_tensor_by (dst);
1098
+ return dbl::comm::gen_aten_tensor_by (std::move ( dst) );
1099
1099
}
1100
1100
1101
1101
at::Tensor AtenIpexCPUDev::dil_transpose (const at::Tensor & self, int64_t dim0, int64_t dim1) {
@@ -1110,7 +1110,7 @@ at::Tensor AtenIpexCPUDev::dil_transpose(const at::Tensor & self, int64_t dim0,
1110
1110
dim1 = at::maybe_wrap_dim (dim1, self.dim ());
1111
1111
std::swap (axes[dim0], axes[dim1]);
1112
1112
y.transpose_from (x, axes);
1113
- return dbl::comm::gen_aten_tensor_by (y );
1113
+ return dbl::comm::gen_aten_tensor_by (std::move (y) );
1114
1114
}
1115
1115
1116
1116
inline void check_cat_no_zero_dim (at::TensorList tensors) {
@@ -1154,7 +1154,7 @@ at::Tensor AtenIpexCPUDev::dil_cat(at::TensorList tensors, int64_t dim) {
1154
1154
}
1155
1155
dil::tensor y;
1156
1156
dil::concat::compute (x, dim, y);
1157
- return dbl::comm::gen_aten_tensor_by (y );
1157
+ return dbl::comm::gen_aten_tensor_by (std::move (y) );
1158
1158
}
1159
1159
1160
1160
std::vector<at::Tensor> AtenIpexCPUDev::dil_split_with_sizes (const at::Tensor& self, at::IntArrayRef split_sizes, int64_t dim) {
@@ -1175,7 +1175,7 @@ std::vector<at::Tensor> AtenIpexCPUDev::dil_split_with_sizes(const at::Tensor& s
1175
1175
dim = at::maybe_wrap_dim (dim, self.dim ());
1176
1176
auto y = dil::spliter::compute (x, sizes, dim, false );
1177
1177
for (auto j = 0 ; j < num_splits; j++) {
1178
- splits[j] = dbl::comm::gen_aten_tensor_by (y[j]);
1178
+ splits[j] = dbl::comm::gen_aten_tensor_by (std::move ( y[j]) );
1179
1179
}
1180
1180
return splits;
1181
1181
}
0 commit comments