@@ -2318,7 +2318,7 @@ pub fn u8x16_narrow_i16x8(a: v128, b: v128) -> v128 {
2318
2318
#[ doc( alias( "i8x16.shl" ) ) ]
2319
2319
#[ stable( feature = "wasm_simd" , since = "1.54.0" ) ]
2320
2320
pub fn i8x16_shl ( a : v128 , amt : u32 ) -> v128 {
2321
- unsafe { simd_shl ( a. as_i8x16 ( ) , simd:: i8x16:: splat ( amt as i8 ) ) . v128 ( ) }
2321
+ unsafe { simd_shl ( a. as_i8x16 ( ) , simd:: i8x16:: splat ( ( amt & 0x7 ) as i8 ) ) . v128 ( ) }
2322
2322
}
2323
2323
2324
2324
#[ stable( feature = "wasm_simd" , since = "1.54.0" ) ]
@@ -2335,7 +2335,7 @@ pub use i8x16_shl as u8x16_shl;
2335
2335
#[ doc( alias( "i8x16.shr_s" ) ) ]
2336
2336
#[ stable( feature = "wasm_simd" , since = "1.54.0" ) ]
2337
2337
pub fn i8x16_shr ( a : v128 , amt : u32 ) -> v128 {
2338
- unsafe { simd_shr ( a. as_i8x16 ( ) , simd:: i8x16:: splat ( amt as i8 ) ) . v128 ( ) }
2338
+ unsafe { simd_shr ( a. as_i8x16 ( ) , simd:: i8x16:: splat ( ( amt & 0x7 ) as i8 ) ) . v128 ( ) }
2339
2339
}
2340
2340
2341
2341
/// Shifts each lane to the right by the specified number of bits, shifting in
@@ -2349,7 +2349,7 @@ pub fn i8x16_shr(a: v128, amt: u32) -> v128 {
2349
2349
#[ doc( alias( "i8x16.shr_u" ) ) ]
2350
2350
#[ stable( feature = "wasm_simd" , since = "1.54.0" ) ]
2351
2351
pub fn u8x16_shr ( a : v128 , amt : u32 ) -> v128 {
2352
- unsafe { simd_shr ( a. as_u8x16 ( ) , simd:: u8x16:: splat ( amt as u8 ) ) . v128 ( ) }
2352
+ unsafe { simd_shr ( a. as_u8x16 ( ) , simd:: u8x16:: splat ( ( amt & 0x7 ) as u8 ) ) . v128 ( ) }
2353
2353
}
2354
2354
2355
2355
/// Adds two 128-bit vectors as if they were two packed sixteen 8-bit integers.
@@ -2686,7 +2686,7 @@ pub use i16x8_extend_high_u8x16 as u16x8_extend_high_u8x16;
2686
2686
#[ doc( alias( "i16x8.shl" ) ) ]
2687
2687
#[ stable( feature = "wasm_simd" , since = "1.54.0" ) ]
2688
2688
pub fn i16x8_shl ( a : v128 , amt : u32 ) -> v128 {
2689
- unsafe { simd_shl ( a. as_i16x8 ( ) , simd:: i16x8:: splat ( amt as i16 ) ) . v128 ( ) }
2689
+ unsafe { simd_shl ( a. as_i16x8 ( ) , simd:: i16x8:: splat ( ( amt & 0xf ) as i16 ) ) . v128 ( ) }
2690
2690
}
2691
2691
2692
2692
#[ stable( feature = "wasm_simd" , since = "1.54.0" ) ]
@@ -2703,7 +2703,7 @@ pub use i16x8_shl as u16x8_shl;
2703
2703
#[ doc( alias( "i16x8.shr_s" ) ) ]
2704
2704
#[ stable( feature = "wasm_simd" , since = "1.54.0" ) ]
2705
2705
pub fn i16x8_shr ( a : v128 , amt : u32 ) -> v128 {
2706
- unsafe { simd_shr ( a. as_i16x8 ( ) , simd:: i16x8:: splat ( amt as i16 ) ) . v128 ( ) }
2706
+ unsafe { simd_shr ( a. as_i16x8 ( ) , simd:: i16x8:: splat ( ( amt & 0xf ) as i16 ) ) . v128 ( ) }
2707
2707
}
2708
2708
2709
2709
/// Shifts each lane to the right by the specified number of bits, shifting in
@@ -2717,7 +2717,7 @@ pub fn i16x8_shr(a: v128, amt: u32) -> v128 {
2717
2717
#[ doc( alias( "i16x8.shr_u" ) ) ]
2718
2718
#[ stable( feature = "wasm_simd" , since = "1.54.0" ) ]
2719
2719
pub fn u16x8_shr ( a : v128 , amt : u32 ) -> v128 {
2720
- unsafe { simd_shr ( a. as_u16x8 ( ) , simd:: u16x8:: splat ( amt as u16 ) ) . v128 ( ) }
2720
+ unsafe { simd_shr ( a. as_u16x8 ( ) , simd:: u16x8:: splat ( ( amt & 0xf ) as u16 ) ) . v128 ( ) }
2721
2721
}
2722
2722
2723
2723
/// Adds two 128-bit vectors as if they were two packed eight 16-bit integers.
@@ -3136,7 +3136,7 @@ pub use i32x4_extend_high_u16x8 as u32x4_extend_high_u16x8;
3136
3136
#[ doc( alias( "i32x4.shl" ) ) ]
3137
3137
#[ stable( feature = "wasm_simd" , since = "1.54.0" ) ]
3138
3138
pub fn i32x4_shl ( a : v128 , amt : u32 ) -> v128 {
3139
- unsafe { simd_shl ( a. as_i32x4 ( ) , simd:: i32x4:: splat ( amt as i32 ) ) . v128 ( ) }
3139
+ unsafe { simd_shl ( a. as_i32x4 ( ) , simd:: i32x4:: splat ( ( amt & 0x1f ) as i32 ) ) . v128 ( ) }
3140
3140
}
3141
3141
3142
3142
#[ stable( feature = "wasm_simd" , since = "1.54.0" ) ]
@@ -3153,7 +3153,7 @@ pub use i32x4_shl as u32x4_shl;
3153
3153
#[ doc( alias( "i32x4.shr_s" ) ) ]
3154
3154
#[ stable( feature = "wasm_simd" , since = "1.54.0" ) ]
3155
3155
pub fn i32x4_shr ( a : v128 , amt : u32 ) -> v128 {
3156
- unsafe { simd_shr ( a. as_i32x4 ( ) , simd:: i32x4:: splat ( amt as i32 ) ) . v128 ( ) }
3156
+ unsafe { simd_shr ( a. as_i32x4 ( ) , simd:: i32x4:: splat ( ( amt & 0x1f ) as i32 ) ) . v128 ( ) }
3157
3157
}
3158
3158
3159
3159
/// Shifts each lane to the right by the specified number of bits, shifting in
@@ -3167,7 +3167,7 @@ pub fn i32x4_shr(a: v128, amt: u32) -> v128 {
3167
3167
#[ doc( alias( "i32x4.shr_u" ) ) ]
3168
3168
#[ stable( feature = "wasm_simd" , since = "1.54.0" ) ]
3169
3169
pub fn u32x4_shr ( a : v128 , amt : u32 ) -> v128 {
3170
- unsafe { simd_shr ( a. as_u32x4 ( ) , simd:: u32x4:: splat ( amt) ) . v128 ( ) }
3170
+ unsafe { simd_shr ( a. as_u32x4 ( ) , simd:: u32x4:: splat ( amt & 0x1f ) ) . v128 ( ) }
3171
3171
}
3172
3172
3173
3173
/// Adds two 128-bit vectors as if they were two packed four 32-bit integers.
@@ -3502,7 +3502,7 @@ pub use i64x2_extend_high_u32x4 as u64x2_extend_high_u32x4;
3502
3502
#[ doc( alias( "i64x2.shl" ) ) ]
3503
3503
#[ stable( feature = "wasm_simd" , since = "1.54.0" ) ]
3504
3504
pub fn i64x2_shl ( a : v128 , amt : u32 ) -> v128 {
3505
- unsafe { simd_shl ( a. as_i64x2 ( ) , simd:: i64x2:: splat ( amt as i64 ) ) . v128 ( ) }
3505
+ unsafe { simd_shl ( a. as_i64x2 ( ) , simd:: i64x2:: splat ( ( amt & 0x3f ) as i64 ) ) . v128 ( ) }
3506
3506
}
3507
3507
3508
3508
#[ stable( feature = "wasm_simd" , since = "1.54.0" ) ]
@@ -3519,7 +3519,7 @@ pub use i64x2_shl as u64x2_shl;
3519
3519
#[ doc( alias( "i64x2.shr_s" ) ) ]
3520
3520
#[ stable( feature = "wasm_simd" , since = "1.54.0" ) ]
3521
3521
pub fn i64x2_shr ( a : v128 , amt : u32 ) -> v128 {
3522
- unsafe { simd_shr ( a. as_i64x2 ( ) , simd:: i64x2:: splat ( amt as i64 ) ) . v128 ( ) }
3522
+ unsafe { simd_shr ( a. as_i64x2 ( ) , simd:: i64x2:: splat ( ( amt & 0x3f ) as i64 ) ) . v128 ( ) }
3523
3523
}
3524
3524
3525
3525
/// Shifts each lane to the right by the specified number of bits, shifting in
@@ -3533,7 +3533,7 @@ pub fn i64x2_shr(a: v128, amt: u32) -> v128 {
3533
3533
#[ doc( alias( "i64x2.shr_u" ) ) ]
3534
3534
#[ stable( feature = "wasm_simd" , since = "1.54.0" ) ]
3535
3535
pub fn u64x2_shr ( a : v128 , amt : u32 ) -> v128 {
3536
- unsafe { simd_shr ( a. as_u64x2 ( ) , simd:: u64x2:: splat ( amt as u64 ) ) . v128 ( ) }
3536
+ unsafe { simd_shr ( a. as_u64x2 ( ) , simd:: u64x2:: splat ( ( amt & 0x3f ) as u64 ) ) . v128 ( ) }
3537
3537
}
3538
3538
3539
3539
/// Adds two 128-bit vectors as if they were two packed two 64-bit integers.
@@ -4344,9 +4344,7 @@ mod tests {
4344
4344
} ;
4345
4345
assert_eq ! (
4346
4346
bytes,
4347
- [
4348
- -1 , -2 , -3 , -4 , -5 , -6 , -7 , -8 , -9 , -10 , -11 , -12 , -13 , -14 , -15 , -16
4349
- ]
4347
+ [ -1 , -2 , -3 , -4 , -5 , -6 , -7 , -8 , -9 , -10 , -11 , -12 , -13 , -14 , -15 , -16 ]
4350
4348
) ;
4351
4349
}
4352
4350
0 commit comments