@@ -1603,7 +1603,7 @@ v128_t test_i8x16_popcnt(v128_t a) {
1603
1603
// CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[SHL_I]] to <4 x i32>
1604
1604
// CHECK-NEXT: ret <4 x i32> [[TMP3]]
1605
1605
//
1606
- v128_t test_i8x16_shl (v128_t a , int32_t b ) {
1606
+ v128_t test_i8x16_shl (v128_t a , uint32_t b ) {
1607
1607
return wasm_i8x16_shl (a , b );
1608
1608
}
1609
1609
@@ -1617,7 +1617,7 @@ v128_t test_i8x16_shl(v128_t a, int32_t b) {
1617
1617
// CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[SHR_I]] to <4 x i32>
1618
1618
// CHECK-NEXT: ret <4 x i32> [[TMP3]]
1619
1619
//
1620
- v128_t test_i8x16_shr (v128_t a , int32_t b ) {
1620
+ v128_t test_i8x16_shr (v128_t a , uint32_t b ) {
1621
1621
return wasm_i8x16_shr (a , b );
1622
1622
}
1623
1623
@@ -1631,7 +1631,7 @@ v128_t test_i8x16_shr(v128_t a, int32_t b) {
1631
1631
// CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[SHR_I]] to <4 x i32>
1632
1632
// CHECK-NEXT: ret <4 x i32> [[TMP3]]
1633
1633
//
1634
- v128_t test_u8x16_shr (v128_t a , int32_t b ) {
1634
+ v128_t test_u8x16_shr (v128_t a , uint32_t b ) {
1635
1635
return wasm_u8x16_shr (a , b );
1636
1636
}
1637
1637
@@ -1824,7 +1824,7 @@ uint32_t test_i16x8_bitmask(v128_t a) {
1824
1824
// CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[SHL_I]] to <4 x i32>
1825
1825
// CHECK-NEXT: ret <4 x i32> [[TMP3]]
1826
1826
//
1827
- v128_t test_i16x8_shl (v128_t a , int32_t b ) {
1827
+ v128_t test_i16x8_shl (v128_t a , uint32_t b ) {
1828
1828
return wasm_i16x8_shl (a , b );
1829
1829
}
1830
1830
@@ -1838,7 +1838,7 @@ v128_t test_i16x8_shl(v128_t a, int32_t b) {
1838
1838
// CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[SHR_I]] to <4 x i32>
1839
1839
// CHECK-NEXT: ret <4 x i32> [[TMP3]]
1840
1840
//
1841
- v128_t test_i16x8_shr (v128_t a , int32_t b ) {
1841
+ v128_t test_i16x8_shr (v128_t a , uint32_t b ) {
1842
1842
return wasm_i16x8_shr (a , b );
1843
1843
}
1844
1844
@@ -1852,7 +1852,7 @@ v128_t test_i16x8_shr(v128_t a, int32_t b) {
1852
1852
// CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[SHR_I]] to <4 x i32>
1853
1853
// CHECK-NEXT: ret <4 x i32> [[TMP3]]
1854
1854
//
1855
- v128_t test_u16x8_shr (v128_t a , int32_t b ) {
1855
+ v128_t test_u16x8_shr (v128_t a , uint32_t b ) {
1856
1856
return wasm_u16x8_shr (a , b );
1857
1857
}
1858
1858
@@ -2048,7 +2048,7 @@ uint32_t test_i32x4_bitmask(v128_t a) {
2048
2048
// CHECK-NEXT: [[SHL_I:%.*]] = shl <4 x i32> [[A:%.*]], [[SPLAT_SPLAT_I]]
2049
2049
// CHECK-NEXT: ret <4 x i32> [[SHL_I]]
2050
2050
//
2051
- v128_t test_i32x4_shl (v128_t a , int32_t b ) {
2051
+ v128_t test_i32x4_shl (v128_t a , uint32_t b ) {
2052
2052
return wasm_i32x4_shl (a , b );
2053
2053
}
2054
2054
@@ -2059,7 +2059,7 @@ v128_t test_i32x4_shl(v128_t a, int32_t b) {
2059
2059
// CHECK-NEXT: [[SHR_I:%.*]] = ashr <4 x i32> [[A:%.*]], [[SPLAT_SPLAT_I]]
2060
2060
// CHECK-NEXT: ret <4 x i32> [[SHR_I]]
2061
2061
//
2062
- v128_t test_i32x4_shr (v128_t a , int32_t b ) {
2062
+ v128_t test_i32x4_shr (v128_t a , uint32_t b ) {
2063
2063
return wasm_i32x4_shr (a , b );
2064
2064
}
2065
2065
@@ -2070,7 +2070,7 @@ v128_t test_i32x4_shr(v128_t a, int32_t b) {
2070
2070
// CHECK-NEXT: [[SHR_I:%.*]] = lshr <4 x i32> [[A:%.*]], [[SPLAT_SPLAT_I]]
2071
2071
// CHECK-NEXT: ret <4 x i32> [[SHR_I]]
2072
2072
//
2073
- v128_t test_u32x4_shr (v128_t a , int32_t b ) {
2073
+ v128_t test_u32x4_shr (v128_t a , uint32_t b ) {
2074
2074
return wasm_u32x4_shr (a , b );
2075
2075
}
2076
2076
@@ -2198,42 +2198,42 @@ uint32_t test_i64x2_bitmask(v128_t a) {
2198
2198
// CHECK-LABEL: @test_i64x2_shl(
2199
2199
// CHECK-NEXT: entry:
2200
2200
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A:%.*]] to <2 x i64>
2201
- // CHECK-NEXT: [[CONV_I:%.*]] = sext i32 [[B:%.*]] to i64
2201
+ // CHECK-NEXT: [[CONV_I:%.*]] = zext i32 [[B:%.*]] to i64
2202
2202
// CHECK-NEXT: [[SPLAT_SPLATINSERT_I:%.*]] = insertelement <2 x i64> poison, i64 [[CONV_I]], i32 0
2203
2203
// CHECK-NEXT: [[SPLAT_SPLAT_I:%.*]] = shufflevector <2 x i64> [[SPLAT_SPLATINSERT_I]], <2 x i64> poison, <2 x i32> zeroinitializer
2204
2204
// CHECK-NEXT: [[SHL_I:%.*]] = shl <2 x i64> [[TMP0]], [[SPLAT_SPLAT_I]]
2205
2205
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[SHL_I]] to <4 x i32>
2206
2206
// CHECK-NEXT: ret <4 x i32> [[TMP1]]
2207
2207
//
2208
- v128_t test_i64x2_shl (v128_t a , int32_t b ) {
2208
+ v128_t test_i64x2_shl (v128_t a , uint32_t b ) {
2209
2209
return wasm_i64x2_shl (a , b );
2210
2210
}
2211
2211
2212
2212
// CHECK-LABEL: @test_i64x2_shr(
2213
2213
// CHECK-NEXT: entry:
2214
2214
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A:%.*]] to <2 x i64>
2215
- // CHECK-NEXT: [[CONV_I:%.*]] = sext i32 [[B:%.*]] to i64
2215
+ // CHECK-NEXT: [[CONV_I:%.*]] = zext i32 [[B:%.*]] to i64
2216
2216
// CHECK-NEXT: [[SPLAT_SPLATINSERT_I:%.*]] = insertelement <2 x i64> poison, i64 [[CONV_I]], i32 0
2217
2217
// CHECK-NEXT: [[SPLAT_SPLAT_I:%.*]] = shufflevector <2 x i64> [[SPLAT_SPLATINSERT_I]], <2 x i64> poison, <2 x i32> zeroinitializer
2218
2218
// CHECK-NEXT: [[SHR_I:%.*]] = ashr <2 x i64> [[TMP0]], [[SPLAT_SPLAT_I]]
2219
2219
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[SHR_I]] to <4 x i32>
2220
2220
// CHECK-NEXT: ret <4 x i32> [[TMP1]]
2221
2221
//
2222
- v128_t test_i64x2_shr (v128_t a , int32_t b ) {
2222
+ v128_t test_i64x2_shr (v128_t a , uint32_t b ) {
2223
2223
return wasm_i64x2_shr (a , b );
2224
2224
}
2225
2225
2226
2226
// CHECK-LABEL: @test_u64x2_shr(
2227
2227
// CHECK-NEXT: entry:
2228
2228
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A:%.*]] to <2 x i64>
2229
- // CHECK-NEXT: [[CONV_I:%.*]] = sext i32 [[B:%.*]] to i64
2229
+ // CHECK-NEXT: [[CONV_I:%.*]] = zext i32 [[B:%.*]] to i64
2230
2230
// CHECK-NEXT: [[SPLAT_SPLATINSERT_I:%.*]] = insertelement <2 x i64> poison, i64 [[CONV_I]], i32 0
2231
2231
// CHECK-NEXT: [[SPLAT_SPLAT_I:%.*]] = shufflevector <2 x i64> [[SPLAT_SPLATINSERT_I]], <2 x i64> poison, <2 x i32> zeroinitializer
2232
2232
// CHECK-NEXT: [[SHR_I:%.*]] = lshr <2 x i64> [[TMP0]], [[SPLAT_SPLAT_I]]
2233
2233
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[SHR_I]] to <4 x i32>
2234
2234
// CHECK-NEXT: ret <4 x i32> [[TMP1]]
2235
2235
//
2236
- v128_t test_u64x2_shr (v128_t a , int32_t b ) {
2236
+ v128_t test_u64x2_shr (v128_t a , uint32_t b ) {
2237
2237
return wasm_u64x2_shr (a , b );
2238
2238
}
2239
2239
0 commit comments