|
| 1 | +/* |
| 2 | + +----------------------------------------------------------------------+ |
| 3 | + | Copyright (c) The PHP Group | |
| 4 | + +----------------------------------------------------------------------+ |
| 5 | + | This source file is subject to version 3.01 of the PHP license, | |
| 6 | + | that is bundled with this package in the file LICENSE, and is | |
| 7 | + | available through the world-wide-web at the following url: | |
| 8 | + | https://www.php.net/license/3_01.txt | |
| 9 | + | If you did not receive a copy of the PHP license and are unable to | |
| 10 | + | obtain it through the world-wide-web, please send a note to | |
| 11 | + | license@php.net so we can mail you a copy immediately. | |
| 12 | + +----------------------------------------------------------------------+ |
| 13 | + | Authors: Saki Takamachi <saki@php.net> | |
| 14 | + +----------------------------------------------------------------------+ |
| 15 | +*/ |
| 16 | + |
| 17 | + |
| 18 | +#ifndef _BCMATH_SIMD_H_ |
| 19 | +#define _BCMATH_SIMD_H_ |
| 20 | + |
| 21 | +#ifdef __SSE2__ |
| 22 | +# include <emmintrin.h> |
| 23 | + typedef __m128i bc_simd_128_t; |
| 24 | +# define HAVE_BC_SIMD_128 |
| 25 | +# define bc_simd_set_8x16(x) _mm_set1_epi8(x) |
| 26 | +# define bc_simd_load_8x16(ptr) _mm_loadu_si128((const __m128i *) (ptr)) |
| 27 | +# define bc_simd_xor_8x16(a, b) _mm_xor_si128(a, b) |
| 28 | +# define bc_simd_store_8x16(ptr, val) _mm_storeu_si128((__m128i *) (ptr), val) |
| 29 | +# define bc_simd_add_8x16(a, b) _mm_add_epi8(a, b) |
| 30 | +# define bc_simd_cmpeq_8x16(a, b) _mm_cmpeq_epi8(a, b) |
| 31 | +# define bc_simd_cmplt_8x16(a, b) _mm_cmplt_epi8(a, b) |
| 32 | +# define bc_simd_movemask_8x16(a) _mm_movemask_epi8(a) |
| 33 | + |
| 34 | +#elif defined(__aarch64__) || defined(_M_ARM64) |
| 35 | +# include <arm_neon.h> |
| 36 | + typedef int8x16_t bc_simd_128_t; |
| 37 | +# define HAVE_BC_SIMD_128 |
| 38 | +# define bc_simd_set_8x16(x) vdupq_n_s8(x) |
| 39 | +# define bc_simd_load_8x16(ptr) vld1q_s8((const int8_t *) (ptr)) |
| 40 | +# define bc_simd_xor_8x16(a, b) veorq_s8(a, b) |
| 41 | +# define bc_simd_store_8x16(ptr, val) vst1q_s8((int8_t *) (ptr), val) |
| 42 | +# define bc_simd_add_8x16(a, b) vaddq_s8(a, b) |
| 43 | +# define bc_simd_cmpeq_8x16(a, b) (vreinterpretq_s8_u8(vceqq_s8(a, b))) |
| 44 | +# define bc_simd_cmplt_8x16(a, b) (vreinterpretq_s8_u8(vcltq_s8(a, b))) |
| 45 | + static inline int bc_simd_movemask_8x16(int8x16_t vec) |
| 46 | + { |
| 47 | + /** |
| 48 | + * based on code from |
| 49 | + * https://community.arm.com/arm-community-blogs/b/servers-and-cloud-computing-blog/posts/porting-x86-vector-bitmask-optimizations-to-arm-neon |
| 50 | + */ |
| 51 | + uint16x8_t high_bits = vreinterpretq_u16_u8(vshrq_n_u8(vreinterpretq_u8_s8(vec), 7)); |
| 52 | + uint32x4_t paired16 = vreinterpretq_u32_u16(vsraq_n_u16(high_bits, high_bits, 7)); |
| 53 | + uint64x2_t paired32 = vreinterpretq_u64_u32(vsraq_n_u32(paired16, paired16, 14)); |
| 54 | + uint8x16_t paired64 = vreinterpretq_u8_u64(vsraq_n_u64(paired32, paired32, 28)); |
| 55 | + return vgetq_lane_u8(paired64, 0) | ((int) vgetq_lane_u8(paired64, 8) << 8); |
| 56 | + } |
| 57 | +#endif |
| 58 | + |
| 59 | +#endif |
0 commit comments