@@ -232,7 +232,7 @@ std::tuple<at::Tensor,at::Tensor,at::Tensor> AtenIpexCPUDev::dil_convolution_bac
232
232
at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, std::array<bool ,3 > output_mask)
233
233
{
234
234
DEBUG (" AtenIpexCPUDev::dil_convolution_backward\n " );
235
- at::Tensor grad_output = grad_output_t . is_contiguous ( ) ? grad_output_t : grad_output_t .contiguous ();
235
+ at::Tensor grad_output = IS_CONTIGUOUS_ANY ( grad_output_t ) ? grad_output_t : grad_output_t .contiguous ();
236
236
CHECK_DNNL_OP_PRE_COND (input);
237
237
CHECK_DNNL_OP_PRE_COND (weight);
238
238
dbl::comm::reorder_to_bf16_for_mix_prec (input);
@@ -429,14 +429,29 @@ at::Tensor AtenIpexCPUDev::dil_convolution_overrideable(const at::Tensor & input
429
429
}
430
430
if (dbl::chk::dnnl_support_the_tensors (dnnl_input_tensors)) {
431
431
if (transposed) {
432
- return AtenIpexCPUDev::dil_deconvolution (input.is_contiguous () ? input : input.contiguous (), weight.is_contiguous () ? weight : weight.contiguous (), (bias.has_value () && bias.value ().defined ()) ? (bias.value ().is_contiguous () ? bias.value () : bias.value ().contiguous ()) : at::Tensor (), padding, output_padding, stride, dilation, groups);
432
+ return AtenIpexCPUDev::dil_deconvolution (
433
+ IS_CONTIGUOUS_ANY (input) ? input : input.contiguous (),
434
+ IS_CONTIGUOUS_ANY (weight) ? weight : weight.contiguous (),
435
+ (bias.has_value () && bias.value ().defined ()) ? (IS_CONTIGUOUS_ANY (bias.value ()) ? bias.value () : bias.value ().contiguous ()) : at::Tensor (),
436
+ padding,
437
+ output_padding,
438
+ stride,
439
+ dilation,
440
+ groups);
433
441
} else {
434
442
// for int8 path, input always acbd format which is non-contiguous, .contiguous() will reorder to fp32
435
443
auto src_dil_type = dbl::comm::try_gen_dil_tensor (input).get_data_type ();
436
- auto input_temp = (src_dil_type == dil::data_type::u8 || src_dil_type == dil::data_type::s8 || input. is_contiguous ( )) ? input : input.contiguous ();
444
+ auto input_temp = (src_dil_type == dil::data_type::u8 || src_dil_type == dil::data_type::s8 || IS_CONTIGUOUS_ANY (input )) ? input : input.contiguous ();
437
445
auto weight_dil_type = dbl::comm::try_gen_dil_tensor (weight).get_data_type ();
438
- auto weight_temp = (weight_dil_type == dil::data_type::s8 || weight.is_contiguous ()) ? weight : weight.contiguous ();
439
- return AtenIpexCPUDev::dil_convolution (input_temp, weight_temp, (bias.has_value () && bias.value ().defined ()) ? bias.value () : at::Tensor (), stride, padding, dilation, groups);
446
+ auto weight_temp = (weight_dil_type == dil::data_type::s8 || IS_CONTIGUOUS_ANY (weight)) ? weight : weight.contiguous ();
447
+ return AtenIpexCPUDev::dil_convolution (
448
+ input_temp,
449
+ weight_temp,
450
+ (bias.has_value () && bias.value ().defined ()) ? (IS_CONTIGUOUS_ANY (bias.value ()) ? bias.value () : bias.value ().contiguous ()) : at::Tensor (),
451
+ stride,
452
+ padding,
453
+ dilation,
454
+ groups);
440
455
}
441
456
}
442
457
}
@@ -472,9 +487,9 @@ std::tuple<at::Tensor,at::Tensor,at::Tensor> AtenIpexCPUDev::dil_convolution_bac
472
487
if (dbl::chk::dnnl_support_the_tensors (dnnl_input_tensors)) {
473
488
if (transposed) {
474
489
return AtenIpexCPUDev::dil_deconvolution_backward (
475
- input. is_contiguous ( ) ? input : input.contiguous (),
476
- grad_output. is_contiguous ( ) ? grad_output : grad_output.contiguous (),
477
- weight. is_contiguous ( ) ? weight : weight.contiguous (),
490
+ IS_CONTIGUOUS_ANY (input ) ? input : input.contiguous (),
491
+ IS_CONTIGUOUS_ANY (grad_output ) ? grad_output : grad_output.contiguous (),
492
+ IS_CONTIGUOUS_ANY (weight ) ? weight : weight.contiguous (),
478
493
padding,
479
494
output_padding,
480
495
stride,
@@ -483,9 +498,9 @@ std::tuple<at::Tensor,at::Tensor,at::Tensor> AtenIpexCPUDev::dil_convolution_bac
483
498
output_mask);
484
499
} else {
485
500
return AtenIpexCPUDev::dil_convolution_backward (
486
- input. is_contiguous ( ) ? input : input.contiguous (),
487
- grad_output. is_contiguous ( ) ? grad_output : grad_output.contiguous (),
488
- weight. is_contiguous ( ) ? weight : weight.contiguous (),
501
+ IS_CONTIGUOUS_ANY (input ) ? input : input.contiguous (),
502
+ IS_CONTIGUOUS_ANY (grad_output ) ? grad_output : grad_output.contiguous (),
503
+ IS_CONTIGUOUS_ANY (weight ) ? weight : weight.contiguous (),
489
504
padding,
490
505
stride,
491
506
dilation,
@@ -577,9 +592,9 @@ std::tuple<at::Tensor,at::Tensor,at::Tensor> AtenIpexCPUDev::cpu_deconvolution_b
577
592
578
593
std::vector<at::Tensor> g_input (groups), g_weight (groups), g_bias (groups);
579
594
580
- _ipex_self = _ipex_self. is_contiguous ( ) ? _ipex_self : _ipex_self.contiguous ();
581
- _ipex_grad_output = _ipex_grad_output. is_contiguous ( ) ? _ipex_grad_output : _ipex_grad_output.contiguous ();
582
- _ipex_weight = _ipex_weight. is_contiguous ( ) ? _ipex_weight : _ipex_weight.contiguous ();
595
+ _ipex_self = IS_CONTIGUOUS_ANY (_ipex_self ) ? _ipex_self : _ipex_self.contiguous ();
596
+ _ipex_grad_output = IS_CONTIGUOUS_ANY (_ipex_grad_output ) ? _ipex_grad_output : _ipex_grad_output.contiguous ();
597
+ _ipex_weight = IS_CONTIGUOUS_ANY (_ipex_weight ) ? _ipex_weight : _ipex_weight.contiguous ();
583
598
for (int g = 0 ; g < groups; ++g) {
584
599
auto _ipex_self_g = dbl::comm::subtensor (_ipex_self, 1 , groups, g);
585
600
auto _ipex_grad_output_g = dbl::comm::subtensor (_ipex_grad_output, 1 , groups, g);
@@ -1315,7 +1330,7 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor> AtenIpexCPUDev::dil_native_batch_
1315
1330
CHECK_DNNL_OP_PRE_COND (weight);
1316
1331
1317
1332
IPEX_CHECK (train, " mkldnn_batch_norm_backward: currently mkldnn only support train model" );
1318
- auto grad_output_contiguous = grad_output. is_contiguous ( ) ? grad_output : grad_output.contiguous ();
1333
+ auto grad_output_contiguous = IS_CONTIGUOUS_ANY (grad_output ) ? grad_output : grad_output.contiguous ();
1319
1334
1320
1335
dbl::comm::reorder_to_bf16_for_mix_prec (grad_output_contiguous, true );
1321
1336
dbl::comm::reorder_to_bf16_for_mix_prec (input, true );
@@ -1395,7 +1410,7 @@ at::Tensor AtenIpexCPUDev::dil_frozen_batch_norm_backward(const at::Tensor& grad
1395
1410
CHECK_DNNL_OP_PRE_COND (input);
1396
1411
CHECK_DNNL_OP_PRE_COND (weight);
1397
1412
1398
- auto grad_output_contiguous = grad_output. is_contiguous ( ) ? grad_output : grad_output.contiguous ();
1413
+ auto grad_output_contiguous = IS_CONTIGUOUS_ANY (grad_output ) ? grad_output : grad_output.contiguous ();
1399
1414
1400
1415
dbl::comm::reorder_to_bf16_for_mix_prec (grad_output_contiguous, true );
1401
1416
dbl::comm::reorder_to_bf16_for_mix_prec (input, true );
@@ -1483,7 +1498,7 @@ at::Tensor AtenIpexCPUDev::dil_avg_pool2d(
1483
1498
}
1484
1499
1485
1500
return dbl::pool::_dil_pooling (
1486
- input. is_contiguous ( ) ? input : input.contiguous (),
1501
+ IS_CONTIGUOUS_ANY (input ) ? input : input.contiguous (),
1487
1502
kernel_size,
1488
1503
stride,
1489
1504
padding,
@@ -1509,7 +1524,7 @@ at::Tensor AtenIpexCPUDev::dil_avg_pool3d(
1509
1524
dbl::comm::reorder_to_bf16_for_mix_prec (input, true );
1510
1525
1511
1526
return dbl::pool::_dil_pooling (
1512
- input. is_contiguous ( ) ? input : input.contiguous (),
1527
+ IS_CONTIGUOUS_ANY (input ) ? input : input.contiguous (),
1513
1528
kernel_size,
1514
1529
stride,
1515
1530
padding,
@@ -1592,9 +1607,9 @@ at::Tensor AtenIpexCPUDev::dil_max_pooling_backward(
1592
1607
dbl::comm::reorder_to_bf16_for_mix_prec (input, true );
1593
1608
1594
1609
return dbl::pool::_dil_pooling_backward (
1595
- grad_output. is_contiguous ( ) ? grad_output : grad_output.contiguous (),
1596
- output. is_contiguous ( ) ? output : output.contiguous (),
1597
- input. is_contiguous ( ) ? input : input.contiguous (),
1610
+ IS_CONTIGUOUS_ANY (grad_output ) ? grad_output : grad_output.contiguous (),
1611
+ IS_CONTIGUOUS_ANY (output ) ? output : output.contiguous (),
1612
+ IS_CONTIGUOUS_ANY (input ) ? input : input.contiguous (),
1598
1613
kernel_size,
1599
1614
stride,
1600
1615
padding,
@@ -1616,14 +1631,14 @@ at::Tensor AtenIpexCPUDev::dil_avg_pool2d_backward(
1616
1631
CHECK_DNNL_OP_PRE_COND (grad_output);
1617
1632
CHECK_DNNL_OP_PRE_COND (input);
1618
1633
1619
- auto grad_output_contiguous = grad_output. is_contiguous ( ) ? grad_output : grad_output.contiguous ();
1634
+ auto grad_output_contiguous = IS_CONTIGUOUS_ANY (grad_output ) ? grad_output : grad_output.contiguous ();
1620
1635
dbl::comm::reorder_to_bf16_for_mix_prec (grad_output_contiguous, true );
1621
1636
dbl::comm::reorder_to_bf16_for_mix_prec (input, true );
1622
1637
1623
1638
return dbl::pool::_dil_pooling_backward (
1624
1639
grad_output_contiguous,
1625
1640
grad_output_contiguous,
1626
- input. is_contiguous ( ) ? input : input.contiguous (),
1641
+ IS_CONTIGUOUS_ANY (input ) ? input : input.contiguous (),
1627
1642
kernel_size,
1628
1643
stride,
1629
1644
padding,
@@ -1646,15 +1661,15 @@ at::Tensor AtenIpexCPUDev::dil_avg_pool3d_backward(
1646
1661
CHECK_DNNL_OP_PRE_COND (grad_output);
1647
1662
CHECK_DNNL_OP_PRE_COND (input);
1648
1663
1649
- auto grad_output_contiguous = grad_output. is_contiguous ( ) ? grad_output : grad_output.contiguous ();
1664
+ auto grad_output_contiguous = IS_CONTIGUOUS_ANY (grad_output ) ? grad_output : grad_output.contiguous ();
1650
1665
dbl::comm::reorder_to_bf16_for_mix_prec (grad_output_contiguous, true );
1651
1666
dbl::comm::reorder_to_bf16_for_mix_prec (input, true );
1652
1667
1653
1668
std::vector<int64_t > dilation{1 , 1 };
1654
1669
return dbl::pool::_dil_pooling_backward (
1655
1670
grad_output_contiguous,
1656
1671
grad_output_contiguous,
1657
- input. is_contiguous ( ) ? input : input.contiguous (),
1672
+ IS_CONTIGUOUS_ANY (input ) ? input : input.contiguous (),
1658
1673
kernel_size,
1659
1674
stride,
1660
1675
padding,
@@ -1696,7 +1711,7 @@ at::Tensor AtenIpexCPUDev::dil_adaptive_avg_pool2d_backward(
1696
1711
return dbl::pool::_dil_pooling_backward (
1697
1712
grad_output,
1698
1713
grad_output,
1699
- input. is_contiguous ( ) ? input : input.contiguous (),
1714
+ IS_CONTIGUOUS_ANY (input ) ? input : input.contiguous (),
1700
1715
kernel_size,
1701
1716
/* stride*/ kernel_size,
1702
1717
/* padding*/ padding,
@@ -1777,7 +1792,7 @@ at::Tensor AtenIpexCPUDev::dil_threshold_backward(const at::Tensor& grad_output,
1777
1792
CHECK_DNNL_OP_PRE_COND (grad_output);
1778
1793
CHECK_DNNL_OP_PRE_COND (input);
1779
1794
1780
- auto grad_output_contiguous = grad_output. is_contiguous ( ) ? grad_output : grad_output.contiguous ();
1795
+ auto grad_output_contiguous = IS_CONTIGUOUS_ANY (grad_output ) ? grad_output : grad_output.contiguous ();
1781
1796
dbl::comm::reorder_to_bf16_for_mix_prec (grad_output_contiguous, true );
1782
1797
dbl::comm::reorder_to_bf16_for_mix_prec (input, true );
1783
1798
@@ -1819,7 +1834,7 @@ at::Tensor AtenIpexCPUDev::dil__softmax_backward_data(
1819
1834
CHECK_DNNL_OP_PRE_COND (output);
1820
1835
CHECK_DNNL_OP_PRE_COND (self);
1821
1836
1822
- auto grad_output_contiguous = grad_output. is_contiguous ( ) ? grad_output : grad_output.contiguous ();
1837
+ auto grad_output_contiguous = IS_CONTIGUOUS_ANY (grad_output ) ? grad_output : grad_output.contiguous ();
1823
1838
dbl::comm::reorder_to_bf16_for_mix_prec (grad_output_contiguous, true );
1824
1839
dbl::comm::reorder_to_bf16_for_mix_prec (output, true );
1825
1840
dbl::comm::reorder_to_bf16_for_mix_prec (self, true );
@@ -1861,7 +1876,7 @@ at::Tensor AtenIpexCPUDev::dil__log_softmax_backward_data(
1861
1876
CHECK_DNNL_OP_PRE_COND (output);
1862
1877
CHECK_DNNL_OP_PRE_COND (self);
1863
1878
1864
- auto grad_output_contiguous = grad_output. is_contiguous ( ) ? grad_output : grad_output.contiguous ();
1879
+ auto grad_output_contiguous = IS_CONTIGUOUS_ANY (grad_output ) ? grad_output : grad_output.contiguous ();
1865
1880
dbl::comm::reorder_to_bf16_for_mix_prec (grad_output_contiguous, true );
1866
1881
dbl::comm::reorder_to_bf16_for_mix_prec (output, true );
1867
1882
dbl::comm::reorder_to_bf16_for_mix_prec (self, true );
@@ -1909,7 +1924,7 @@ at::Tensor AtenIpexCPUDev::dil_sigmoid_backward(
1909
1924
CHECK_DNNL_OP_PRE_COND (grad_output);
1910
1925
CHECK_DNNL_OP_PRE_COND (output);
1911
1926
1912
- auto grad_output_contiguous = grad_output. is_contiguous ( ) ? grad_output : grad_output.contiguous ();
1927
+ auto grad_output_contiguous = IS_CONTIGUOUS_ANY (grad_output ) ? grad_output : grad_output.contiguous ();
1913
1928
dbl::comm::reorder_to_bf16_for_mix_prec (grad_output_contiguous, true );
1914
1929
dbl::comm::reorder_to_bf16_for_mix_prec (output, true );
1915
1930
@@ -1956,7 +1971,7 @@ at::Tensor AtenIpexCPUDev::dil_tanh_backward(
1956
1971
CHECK_DNNL_OP_PRE_COND (grad_output);
1957
1972
CHECK_DNNL_OP_PRE_COND (output);
1958
1973
1959
- auto grad_output_contiguous = grad_output. is_contiguous ( ) ? grad_output : grad_output.contiguous ();
1974
+ auto grad_output_contiguous = IS_CONTIGUOUS_ANY (grad_output ) ? grad_output : grad_output.contiguous ();
1960
1975
dbl::comm::reorder_to_bf16_for_mix_prec (grad_output_contiguous, true );
1961
1976
dbl::comm::reorder_to_bf16_for_mix_prec (output, true );
1962
1977
@@ -2084,7 +2099,7 @@ at::Tensor AtenIpexCPUDev::dil_cat(at::TensorList tensors, int64_t dim) {
2084
2099
for (auto i = 0 ; i < tensors.size (); i++) {
2085
2100
IPEX_CHECK (!(tensors[i].dim () == 1 && tensors[i].sizes ()[0 ] == 0 ),
2086
2101
" Currently Mkldnn cat operators do not support empty tensor." );
2087
- tensors_contiguous[i] = tensors[i]. is_contiguous ( ) ? tensors[i] : tensors[i].contiguous ();
2102
+ tensors_contiguous[i] = IS_CONTIGUOUS_ANY ( tensors[i]) ? tensors[i] : tensors[i].contiguous ();
2088
2103
2089
2104
dbl::comm::reorder_to_bf16_for_mix_prec (tensors_contiguous[i], true );
2090
2105
@@ -2448,7 +2463,7 @@ at::Tensor AtenIpexCPUDev::dil_gelu_backward(const at::Tensor& grad_output, cons
2448
2463
2449
2464
dbl::comm::reorder_to_bf16_for_mix_prec (input, true );
2450
2465
2451
- auto grad_output_contiguous = grad_output. is_contiguous ( ) ? grad_output : grad_output.contiguous ();
2466
+ auto grad_output_contiguous = IS_CONTIGUOUS_ANY (grad_output ) ? grad_output : grad_output.contiguous ();
2452
2467
dbl::comm::reorder_to_bf16_for_mix_prec (grad_output_contiguous, true );
2453
2468
2454
2469
dil::tensor x = dbl::comm::try_gen_dil_tensor (input);
0 commit comments