/external/clang/test/CodeGen/ |
D | avx512vlbw-builtins.c | 22 __mmask16 test_mm_cmpeq_epi8_mask(__m128i __a, __m128i __b) { in test_mm_cmpeq_epi8_mask() 25 return (__mmask16)_mm_cmpeq_epi8_mask(__a, __b); in test_mm_cmpeq_epi8_mask() 28 __mmask16 test_mm_mask_cmpeq_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) { in test_mm_mask_cmpeq_epi8_mask() 32 return (__mmask16)_mm_mask_cmpeq_epi8_mask(__u, __a, __b); in test_mm_mask_cmpeq_epi8_mask() 35 __mmask16 test_mm256_cmpeq_epi16_mask(__m256i __a, __m256i __b) { in test_mm256_cmpeq_epi16_mask() 38 return (__mmask16)_mm256_cmpeq_epi16_mask(__a, __b); in test_mm256_cmpeq_epi16_mask() 41 __mmask16 test_mm256_mask_cmpeq_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) { in test_mm256_mask_cmpeq_epi16_mask() 45 return (__mmask16)_mm256_mask_cmpeq_epi16_mask(__u, __a, __b); in test_mm256_mask_cmpeq_epi16_mask() 74 __mmask16 test_mm_cmpgt_epi8_mask(__m128i __a, __m128i __b) { in test_mm_cmpgt_epi8_mask() 77 return (__mmask16)_mm_cmpgt_epi8_mask(__a, __b); in test_mm_cmpgt_epi8_mask() [all …]
|
D | avx512f-builtins.c | 57 __m512 test_mm512_mask_sqrt_ps(__m512 __W, __mmask16 __U, __m512 __A) in test_mm512_mask_sqrt_ps() 64 __m512 test_mm512_maskz_sqrt_ps( __mmask16 __U, __m512 __A) in test_mm512_maskz_sqrt_ps() 71 __m512 test_mm512_mask_sqrt_round_ps(__m512 __W,__mmask16 __U,__m512 __A) in test_mm512_mask_sqrt_round_ps() 78 __m512 test_mm512_maskz_sqrt_round_ps(__mmask16 __U,__m512 __A) in test_mm512_maskz_sqrt_round_ps() 120 __m512 test_mm512_mask_rsqrt14_ps (__m512 __W, __mmask16 __U, __m512 __A) in test_mm512_mask_rsqrt14_ps() 127 __m512 test_mm512_maskz_rsqrt14_ps (__mmask16 __U, __m512 __A) in test_mm512_maskz_rsqrt14_ps() 186 void test_mm512_mask_store_ps(void *p, __m512 a, __mmask16 m) in test_mm512_mask_store_ps() 244 void test_mm512_mask_storeu_epi32(void *__P, __mmask16 __U, __m512i __A) { in test_mm512_mask_storeu_epi32() 263 __m512i test_mm512_mask_loadu_epi32 (__m512i __W, __mmask16 __U, void *__P) in test_mm512_mask_loadu_epi32() 284 __m512 test_mm512_mask_loadu_ps (__m512 __W, __mmask16 __U, void *__P) in test_mm512_mask_loadu_ps() [all …]
|
D | avx512er-builtins.c | 50 __m512 test_mm512_mask_rsqrt28_round_ps(__m512 s, __mmask16 m, __m512 a) { in test_mm512_mask_rsqrt28_round_ps() 56 __m512 test_mm512_maskz_rsqrt28_round_ps(__mmask16 m, __m512 a) { in test_mm512_maskz_rsqrt28_round_ps() 68 __m512 test_mm512_mask_rsqrt28_ps(__m512 s, __mmask16 m, __m512 a) { in test_mm512_mask_rsqrt28_ps() 74 __m512 test_mm512_maskz_rsqrt28_ps(__mmask16 m, __m512 a) { in test_mm512_maskz_rsqrt28_ps() 86 __m128 test_mm_mask_rsqrt28_round_ss(__m128 s, __mmask16 m, __m128 a, __m128 b) { in test_mm_mask_rsqrt28_round_ss() 92 __m128 test_mm_maskz_rsqrt28_round_ss(__mmask16 m, __m128 a, __m128 b) { in test_mm_maskz_rsqrt28_round_ss() 104 __m128 test_mm_mask_rsqrt28_ss(__m128 s, __mmask16 m, __m128 a, __m128 b) { in test_mm_mask_rsqrt28_ss() 110 __m128 test_mm_maskz_rsqrt28_ss(__mmask16 m, __m128 a, __m128 b) { in test_mm_maskz_rsqrt28_ss() 176 __m512 test_mm512_mask_rcp28_round_ps(__m512 s, __mmask16 m, __m512 a) { in test_mm512_mask_rcp28_round_ps() 182 __m512 test_mm512_maskz_rcp28_round_ps(__mmask16 m, __m512 a) { in test_mm512_maskz_rcp28_round_ps() [all …]
|
D | avx512dq-builtins.c | 50 __m512 test_mm512_mask_xor_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { in test_mm512_mask_xor_ps() 56 __m512 test_mm512_maskz_xor_ps (__mmask16 __U, __m512 __A, __m512 __B) { in test_mm512_maskz_xor_ps() 86 __m512 test_mm512_mask_or_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { in test_mm512_mask_or_ps() 92 __m512 test_mm512_maskz_or_ps (__mmask16 __U, __m512 __A, __m512 __B) { in test_mm512_maskz_or_ps() 122 __m512 test_mm512_mask_and_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { in test_mm512_mask_and_ps() 128 __m512 test_mm512_maskz_and_ps (__mmask16 __U, __m512 __A, __m512 __B) { in test_mm512_maskz_and_ps() 158 __m512 test_mm512_mask_andnot_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { in test_mm512_mask_andnot_ps() 164 __m512 test_mm512_maskz_andnot_ps (__mmask16 __U, __m512 __A, __m512 __B) { in test_mm512_maskz_andnot_ps() 716 __m512 test_mm512_mask_range_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { in test_mm512_mask_range_ps() 722 __m512 test_mm512_maskz_range_ps(__mmask16 __U, __m512 __A, __m512 __B) { in test_mm512_maskz_range_ps() [all …]
|
D | avx512vbmivl-builtin.c | 14 __m128i test_mm_maskz_permutexvar_epi8(__mmask16 __M, __m128i __A, __m128i __B) { in test_mm_maskz_permutexvar_epi8() 20 __m128i test_mm_mask_permutexvar_epi8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) { in test_mm_mask_permutexvar_epi8() 44 __m128i test_mm_mask2_permutex2var_epi8(__m128i __A, __m128i __I, __mmask16 __U, __m128i __B) { in test_mm_mask2_permutex2var_epi8() 62 __m128i test_mm_mask_permutex2var_epi8(__m128i __A, __mmask16 __U, __m128i __I, __m128i __B) { in test_mm_mask_permutex2var_epi8() 68 __m128i test_mm_maskz_permutex2var_epi8(__mmask16 __U, __m128i __A, __m128i __I, __m128i __B) { in test_mm_maskz_permutex2var_epi8() 92 __m128i test_mm_mask_multishift_epi64_epi8(__m128i __W, __mmask16 __M, __m128i __X, __m128i __Y) { in test_mm_mask_multishift_epi64_epi8() 98 __m128i test_mm_maskz_multishift_epi64_epi8(__mmask16 __M, __m128i __X, __m128i __Y) { in test_mm_maskz_multishift_epi64_epi8()
|
D | avx512cdintrin.c | 28 __m512i test_mm512_mask_conflict_epi32(__m512i __W, __mmask16 __U, __m512i __A) { in test_mm512_mask_conflict_epi32() 33 __m512i test_mm512_maskz_conflict_epi32(__mmask16 __U, __m512i __A) { in test_mm512_maskz_conflict_epi32() 43 __m512i test_mm512_mask_lzcnt_epi32(__m512i __W, __mmask16 __U, __m512i __A) { in test_mm512_mask_lzcnt_epi32() 49 __m512i test_mm512_maskz_lzcnt_epi32(__mmask16 __U, __m512i __A) { in test_mm512_maskz_lzcnt_epi32() 79 __m512i test_mm512_broadcastmw_epi32(__mmask16 __A) { in test_mm512_broadcastmw_epi32()
|
D | avx512pf-builtins.c | 20 void test_mm512_mask_prefetch_i32gather_ps(__m512i index, __mmask16 mask, void const *addr, int hin… in test_mm512_mask_prefetch_i32gather_ps() 74 void test_mm512_mask_prefetch_i32scatter_ps(void *addr, __mmask16 mask, __m512i index) { in test_mm512_mask_prefetch_i32scatter_ps() 86 void test_mm512_mask_prefetch_i64scatter_pd(void *addr, __mmask16 mask, __m512i index) { in test_mm512_mask_prefetch_i64scatter_pd() 98 void test_mm512_mask_prefetch_i64scatter_ps(void *addr, __mmask16 mask, __m512i index) { in test_mm512_mask_prefetch_i64scatter_ps()
|
D | avx512vlcd-builtins.c | 20 __m128i test_mm_broadcastmw_epi32(__mmask16 __A) { in test_mm_broadcastmw_epi32() 26 __m256i test_mm256_broadcastmw_epi32(__mmask16 __A) { in test_mm256_broadcastmw_epi32()
|
/external/clang/lib/Headers/ |
D | avx512vlbwintrin.h | 41 static __inline__ __mmask16 __DEFAULT_FN_ATTRS 43 return (__mmask16)__builtin_ia32_pcmpeqb128_mask((__v16qi)__a, (__v16qi)__b, in _mm_cmpeq_epi8_mask() 44 (__mmask16)-1); in _mm_cmpeq_epi8_mask() 47 static __inline__ __mmask16 __DEFAULT_FN_ATTRS 48 _mm_mask_cmpeq_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) { in _mm_mask_cmpeq_epi8_mask() 49 return (__mmask16)__builtin_ia32_pcmpeqb128_mask((__v16qi)__a, (__v16qi)__b, in _mm_mask_cmpeq_epi8_mask() 53 static __inline__ __mmask16 __DEFAULT_FN_ATTRS 55 return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 0, in _mm_cmpeq_epu8_mask() 56 (__mmask16)-1); in _mm_cmpeq_epu8_mask() 59 static __inline__ __mmask16 __DEFAULT_FN_ATTRS [all …]
|
D | avx512fintrin.h | 48 typedef unsigned short __mmask16; typedef 208 _mm512_mask_broadcastd_epi32 (__m512i __O, __mmask16 __M, __m128i __A) in _mm512_mask_broadcastd_epi32() 216 _mm512_maskz_broadcastd_epi32 (__mmask16 __M, __m128i __A) in _mm512_maskz_broadcastd_epi32() 249 _mm512_maskz_set1_epi32(__mmask16 __M, int __A) in _mm512_maskz_set1_epi32() 514 _mm512_mask_and_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b) in _mm512_mask_and_epi32() 516 return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k, in _mm512_mask_and_epi32() 522 _mm512_maskz_and_epi32(__mmask16 __k, __m512i __a, __m512i __b) in _mm512_maskz_and_epi32() 562 _mm512_mask_andnot_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) in _mm512_mask_andnot_epi32() 564 return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, in _mm512_mask_andnot_epi32() 570 _mm512_maskz_andnot_epi32(__mmask16 __U, __m512i __A, __m512i __B) in _mm512_maskz_andnot_epi32() [all …]
|
D | avx512vbmivlintrin.h | 36 _mm_mask2_permutex2var_epi8 (__m128i __A, __m128i __I, __mmask16 __U, in _mm_mask2_permutex2var_epi8() 43 (__mmask16) in _mm_mask2_permutex2var_epi8() 66 (__mmask16) - in _mm_permutex2var_epi8() 71 _mm_mask_permutex2var_epi8 (__m128i __A, __mmask16 __U, __m128i __I, in _mm_mask_permutex2var_epi8() 78 (__mmask16) in _mm_mask_permutex2var_epi8() 83 _mm_maskz_permutex2var_epi8 (__mmask16 __U, __m128i __A, __m128i __I, in _mm_maskz_permutex2var_epi8() 90 (__mmask16) in _mm_maskz_permutex2var_epi8() 135 (__mmask16) -1); in _mm_permutexvar_epi8() 139 _mm_maskz_permutexvar_epi8 (__mmask16 __M, __m128i __A, __m128i __B) in _mm_maskz_permutexvar_epi8() 144 (__mmask16) __M); in _mm_maskz_permutexvar_epi8() [all …]
|
D | avx512cdintrin.h | 63 (__mmask16) -1); in _mm512_conflict_epi32() 67 _mm512_mask_conflict_epi32 (__m512i __W, __mmask16 __U, __m512i __A) in _mm512_mask_conflict_epi32() 71 (__mmask16) __U); in _mm512_mask_conflict_epi32() 75 _mm512_maskz_conflict_epi32 (__mmask16 __U, __m512i __A) in _mm512_maskz_conflict_epi32() 79 (__mmask16) __U); in _mm512_maskz_conflict_epi32() 87 (__mmask16) -1); in _mm512_lzcnt_epi32() 91 _mm512_mask_lzcnt_epi32 (__m512i __W, __mmask16 __U, __m512i __A) in _mm512_mask_lzcnt_epi32() 95 (__mmask16) __U); in _mm512_mask_lzcnt_epi32() 99 _mm512_maskz_lzcnt_epi32 (__mmask16 __U, __m512i __A) in _mm512_maskz_lzcnt_epi32() 103 (__mmask16) __U); in _mm512_maskz_lzcnt_epi32() [all …]
|
D | avx512dqintrin.h | 84 _mm512_mask_xor_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { in _mm512_mask_xor_ps() 88 (__mmask16) __U); in _mm512_mask_xor_ps() 92 _mm512_maskz_xor_ps (__mmask16 __U, __m512 __A, __m512 __B) { in _mm512_maskz_xor_ps() 97 (__mmask16) __U); in _mm512_maskz_xor_ps() 128 _mm512_mask_or_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { in _mm512_mask_or_ps() 132 (__mmask16) __U); in _mm512_mask_or_ps() 136 _mm512_maskz_or_ps (__mmask16 __U, __m512 __A, __m512 __B) { in _mm512_maskz_or_ps() 141 (__mmask16) __U); in _mm512_maskz_or_ps() 172 _mm512_mask_and_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { in _mm512_mask_and_ps() 176 (__mmask16) __U); in _mm512_mask_and_ps() [all …]
|
D | avx512erintrin.h | 58 (__mmask16)-1, (int)(R)); }) 62 (__v16sf)(__m512)(S), (__mmask16)(M), \ 68 (__mmask16)(M), (int)(R)); }) 107 (__mmask16)-1, (int)(R)); }) 111 (__v16sf)(__m512)(S), (__mmask16)(M), \ 117 (__mmask16)(M), (int)(R)); }) 210 (__mmask16)-1, (int)(R)); }) 214 (__v16sf)(__m512)(S), (__mmask16)(M), \ 220 (__mmask16)(M), (int)(R)); })
|
D | avx512pfintrin.h | 45 __builtin_ia32_gatherpfdps((__mmask16)(mask), \ 50 __builtin_ia32_gatherpfdps((__mmask16) -1, \ 83 __builtin_ia32_scatterpfdps((__mmask16)-1, (__v16si)(__m512i)(index), \ 87 __builtin_ia32_scatterpfdps((__mmask16)(mask), \
|
D | avx512vlcdintrin.h | 47 _mm_broadcastmw_epi32 (__mmask16 __A) in _mm_broadcastmw_epi32() 53 _mm256_broadcastmw_epi32 (__mmask16 __A) in _mm256_broadcastmw_epi32()
|
D | avx512bwintrin.h | 1314 (__mmask16) -1); in _mm512_madd_epi16() 1318 _mm512_mask_madd_epi16 (__m512i __W, __mmask16 __U, __m512i __A, in _mm512_mask_madd_epi16() 1323 (__mmask16) __U); in _mm512_mask_madd_epi16() 1327 _mm512_maskz_madd_epi16 (__mmask16 __U, __m512i __A, __m512i __B) { in _mm512_maskz_madd_epi16() 1331 (__mmask16) __U); in _mm512_maskz_madd_epi16()
|
/external/mesa3d/src/gallium/drivers/swr/rasterizer/common/ |
D | simdlib_128_avx512.inl | 50 #define SIMD_WRAPPER_1(op) SIMD_WRAPPER_1_(op, op, __mmask16(0xf)) 58 #define SIMD_WRAPPER_1I(op) SIMD_WRAPPER_1I_(op, op, __mmask16(0xf)) 65 #define SIMD_WRAPPER_2(op) SIMD_WRAPPER_2_(op, op, __mmask16(0xf)) 79 #define SIMD_WRAPPER_3(op) SIMD_WRAPPER_3_(op, op, __mmask16(0xf)) 93 #define SIMD_IWRAPPER_1_32(op) SIMD_IWRAPPER_1_(op, op, __mmask16(0xf)) 101 #define SIMD_IWRAPPER_1I_32(op) SIMD_IWRAPPER_1I_(op, op, __mmask16(0xf)) 108 #define SIMD_IWRAPPER_2_32(op) SIMD_IWRAPPER_2_(op, op, __mmask16(0xf)) 127 SIMD_WRAPPER_1_(rcp_ps, rcp14_ps, __mmask16(0xf)); // return 1.0f / a 128 SIMD_WRAPPER_1_(rsqrt_ps, rsqrt14_ps, __mmask16(0xf)); // return 1.0f / sqrt(a) 158 SIMD_IWRAPPER_2_(and_si, and_epi32, __mmask16(0xf)); // return a & b (int) [all …]
|
D | simdlib_256_avx512.inl | 50 #define SIMD_WRAPPER_1(op) SIMD_WRAPPER_1_(op, op, __mmask16(0xff)) 58 #define SIMD_WRAPPER_1I(op) SIMD_WRAPPER_1I_(op, op, __mmask16(0xff)) 65 #define SIMD_WRAPPER_2(op) SIMD_WRAPPER_2_(op, op, __mmask16(0xff)) 79 #define SIMD_WRAPPER_3(op) SIMD_WRAPPER_3_(op, op, __mmask16(0xff)) 93 #define SIMD_IWRAPPER_1_32(op) SIMD_IWRAPPER_1_(op, op, __mmask16(0xff)) 101 #define SIMD_IWRAPPER_1I_32(op) SIMD_IWRAPPER_1I_(op, op, __mmask16(0xff)) 108 #define SIMD_IWRAPPER_2_32(op) SIMD_IWRAPPER_2_(op, op, __mmask16(0xff)) 127 SIMD_WRAPPER_1_(rcp_ps, rcp14_ps, __mmask16(0xff)); // return 1.0f / a 128 SIMD_WRAPPER_1_(rsqrt_ps, rsqrt14_ps, __mmask16(0xff)); // return 1.0f / sqrt(a) 158 SIMD_IWRAPPER_2_(and_si, and_epi32, __mmask16(0xff)); // return a & b (int) [all …]
|
D | simdlib_128_avx512_core.inl | 41 #define SIMD_WRAPPER_1(op) SIMD_WRAPPER_1_(op, op, __mmask16(0xf)) 49 #define SIMD_WRAPPER_1I(op) SIMD_WRAPPER_1I_(op, op, __mmask16(0xf)) 56 #define SIMD_WRAPPER_2(op) SIMD_WRAPPER_2_(op, op, __mmask16(0xf)) 70 #define SIMD_WRAPPER_3(op) SIMD_WRAPPER_3_(op, op, __mmask16(0xf))
|
D | simdlib_512_avx512.inl | 156 static SIMDINLINE Integer vmask(__mmask16 m) 313 __mmask16 result = cmp_ps_mask<CmpTypeT>(a, b); 328 __mmask16 result = _mm512_cmp_epi32_mask(a, b, static_cast<const int>(CmpTypeT)); 365 return _mm512_mask_blend_ps(__mmask16(ImmT), a, b); 371 return _mm512_mask_blend_epi32(__mmask16(ImmT), a, b); 376 return _mm512_mask_blend_ps(__mmask16(movemask_ps(mask)), a, b); 519 __mmask16 k = _mm512_cmpneq_ps_mask(mask, setzero_ps()); 543 __mmask16 m = _mm512_test_epi32_mask(castps_si(a), set1_epi32(0x80000000)); 638 return castsi_ps(_mm512_maskz_mov_epi32(__mmask16(mask), set1_epi32(-1)));
|
D | simdlib_types.hpp | 295 using __mmask16 = uint16_t; typedef 373 using Mask = __mmask16;
|
/external/eigen/Eigen/src/Core/arch/AVX512/ |
D | MathFunctions.h | 67 __mmask16 invalid_mask = 69 __mmask16 iszero_mask = 90 __mmask16 mask = _mm512_cmp_ps_mask(x, p16f_cephes_SQRTHF, _CMP_LT_OQ); 268 __mmask16 non_zero_mask = _mm512_cmp_ps_mask(_x, p16f_flt_min, _CMP_GE_OQ); 335 __mmask16 le_zero_mask = _mm512_cmp_ps_mask(_x, p16f_flt_min, _CMP_LT_OQ); 340 __mmask16 neg_mask = _mm512_cmp_ps_mask(_x, _mm512_setzero_ps(), _CMP_LT_OQ);
|
/external/clang/test/Sema/ |
D | builtins-x86.c | 11 typedef unsigned short __mmask16; typedef 29 __mmask16 test__builtin_ia32_cmpps512_mask(__m512d __a, __m512d __b) { in test__builtin_ia32_cmpps512_mask()
|
/external/clang/test/Headers/ |
D | x86intrin-2.c | 83 __mmask16 __attribute__((__target__("avx512vl,avx512bw"))) mm_cmpeq_epi8_mask_wrap(__m128i a, __m12… in mm_cmpeq_epi8_mask_wrap()
|