Home
last modified time | relevance | path

Searched refs:__mmask64 (Results 1 – 25 of 33) sorted by relevance

12

/external/clang/test/CodeGen/
Davx512bw-builtins.c9 __mmask64 test_mm512_cmpeq_epi8_mask(__m512i __a, __m512i __b) { in test_mm512_cmpeq_epi8_mask()
12 return (__mmask64)_mm512_cmpeq_epi8_mask(__a, __b); in test_mm512_cmpeq_epi8_mask()
15 __mmask64 test_mm512_mask_cmpeq_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) { in test_mm512_mask_cmpeq_epi8_mask()
19 return (__mmask64)_mm512_mask_cmpeq_epi8_mask(__u, __a, __b); in test_mm512_mask_cmpeq_epi8_mask()
35 __mmask64 test_mm512_cmpgt_epi8_mask(__m512i __a, __m512i __b) { in test_mm512_cmpgt_epi8_mask()
38 return (__mmask64)_mm512_cmpgt_epi8_mask(__a, __b); in test_mm512_cmpgt_epi8_mask()
41 __mmask64 test_mm512_mask_cmpgt_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) { in test_mm512_mask_cmpgt_epi8_mask()
45 return (__mmask64)_mm512_mask_cmpgt_epi8_mask(__u, __a, __b); in test_mm512_mask_cmpgt_epi8_mask()
61 __mmask64 test_mm512_cmpeq_epu8_mask(__m512i __a, __m512i __b) { in test_mm512_cmpeq_epu8_mask()
64 return (__mmask64)_mm512_cmpeq_epu8_mask(__a, __b); in test_mm512_cmpeq_epu8_mask()
[all …]
Davx512vbmi-builtins.c8 __m512i test_mm512_mask2_permutex2var_epi8(__m512i __A, __m512i __I, __mmask64 __U, __m512i __B) { in test_mm512_mask2_permutex2var_epi8()
20 __m512i test_mm512_mask_permutex2var_epi8(__m512i __A, __mmask64 __U, __m512i __I, __m512i __B) { in test_mm512_mask_permutex2var_epi8()
26 __m512i test_mm512_maskz_permutex2var_epi8(__mmask64 __U, __m512i __A, __m512i __I, __m512i __B) { in test_mm512_maskz_permutex2var_epi8()
38 __m512i test_mm512_maskz_permutexvar_epi8(__mmask64 __M, __m512i __A, __m512i __B) { in test_mm512_maskz_permutexvar_epi8()
44 __m512i test_mm512_mask_permutexvar_epi8(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) { in test_mm512_mask_permutexvar_epi8()
50 __m512i test_mm512_mask_multishift_epi64_epi8(__m512i __W, __mmask64 __M, __m512i __X, __m512i __Y)… in test_mm512_mask_multishift_epi64_epi8()
56 __m512i test_mm512_maskz_multishift_epi64_epi8(__mmask64 __M, __m512i __X, __m512i __Y) { in test_mm512_maskz_multishift_epi64_epi8()
/external/clang/lib/Headers/
Davx512vbmiintrin.h37 __mmask64 __U, __m512i __B) in _mm512_mask2_permutex2var_epi8()
43 (__mmask64) __U); in _mm512_mask2_permutex2var_epi8()
53 (__mmask64) -1); in _mm512_permutex2var_epi8()
57 _mm512_mask_permutex2var_epi8 (__m512i __A, __mmask64 __U, in _mm512_mask_permutex2var_epi8()
64 (__mmask64) __U); in _mm512_mask_permutex2var_epi8()
68 _mm512_maskz_permutex2var_epi8 (__mmask64 __U, __m512i __A, in _mm512_maskz_permutex2var_epi8()
75 (__mmask64) __U); in _mm512_maskz_permutex2var_epi8()
84 (__mmask64) -1); in _mm512_permutexvar_epi8()
88 _mm512_maskz_permutexvar_epi8 (__mmask64 __M, __m512i __A, in _mm512_maskz_permutexvar_epi8()
94 (__mmask64) __M); in _mm512_maskz_permutexvar_epi8()
[all …]
Davx512bwintrin.h32 typedef unsigned long long __mmask64; typedef
59 static __inline__ __mmask64 __DEFAULT_FN_ATTRS
61 return (__mmask64)__builtin_ia32_pcmpeqb512_mask((__v64qi)__a, (__v64qi)__b, in _mm512_cmpeq_epi8_mask()
62 (__mmask64)-1); in _mm512_cmpeq_epi8_mask()
65 static __inline__ __mmask64 __DEFAULT_FN_ATTRS
66 _mm512_mask_cmpeq_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) { in _mm512_mask_cmpeq_epi8_mask()
67 return (__mmask64)__builtin_ia32_pcmpeqb512_mask((__v64qi)__a, (__v64qi)__b, in _mm512_mask_cmpeq_epi8_mask()
71 static __inline__ __mmask64 __DEFAULT_FN_ATTRS
73 return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 0, in _mm512_cmpeq_epu8_mask()
74 (__mmask64)-1); in _mm512_cmpeq_epu8_mask()
[all …]
/external/llvm-project/clang/lib/Headers/
Davx512bwintrin.h18 typedef unsigned long long __mmask64; typedef
30 static __inline __mmask64 __DEFAULT_FN_ATTRS
31 _knot_mask64(__mmask64 __M) in _knot_mask64()
42 static __inline__ __mmask64 __DEFAULT_FN_ATTRS
43 _kand_mask64(__mmask64 __A, __mmask64 __B) in _kand_mask64()
45 return (__mmask64)__builtin_ia32_kanddi((__mmask64)__A, (__mmask64)__B); in _kand_mask64()
54 static __inline__ __mmask64 __DEFAULT_FN_ATTRS
55 _kandn_mask64(__mmask64 __A, __mmask64 __B) in _kandn_mask64()
57 return (__mmask64)__builtin_ia32_kandndi((__mmask64)__A, (__mmask64)__B); in _kandn_mask64()
66 static __inline__ __mmask64 __DEFAULT_FN_ATTRS
[all …]
Davx512vbmiintrin.h29 _mm512_mask_permutex2var_epi8(__m512i __A, __mmask64 __U, __m512i __I, in _mm512_mask_permutex2var_epi8()
38 _mm512_mask2_permutex2var_epi8(__m512i __A, __m512i __I, __mmask64 __U, in _mm512_mask2_permutex2var_epi8()
47 _mm512_maskz_permutex2var_epi8(__mmask64 __U, __m512i __A, __m512i __I, in _mm512_maskz_permutex2var_epi8()
62 _mm512_maskz_permutexvar_epi8 (__mmask64 __M, __m512i __A, in _mm512_maskz_permutexvar_epi8()
65 return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, in _mm512_maskz_permutexvar_epi8()
71 _mm512_mask_permutexvar_epi8 (__m512i __W, __mmask64 __M, __m512i __A, in _mm512_mask_permutexvar_epi8()
74 return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, in _mm512_mask_permutexvar_epi8()
86 _mm512_mask_multishift_epi64_epi8(__m512i __W, __mmask64 __M, __m512i __X, in _mm512_mask_multishift_epi64_epi8()
89 return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, in _mm512_mask_multishift_epi64_epi8()
95 _mm512_maskz_multishift_epi64_epi8(__mmask64 __M, __m512i __X, __m512i __Y) in _mm512_maskz_multishift_epi64_epi8()
[all …]
Davx512bitalgintrin.h49 _mm512_mask_popcnt_epi8(__m512i __A, __mmask64 __U, __m512i __B) in _mm512_mask_popcnt_epi8()
51 return (__m512i) __builtin_ia32_selectb_512((__mmask64) __U, in _mm512_mask_popcnt_epi8()
57 _mm512_maskz_popcnt_epi8(__mmask64 __U, __m512i __B) in _mm512_maskz_popcnt_epi8()
64 static __inline__ __mmask64 __DEFAULT_FN_ATTRS
65 _mm512_mask_bitshuffle_epi64_mask(__mmask64 __U, __m512i __A, __m512i __B) in _mm512_mask_bitshuffle_epi64_mask()
67 return (__mmask64) __builtin_ia32_vpshufbitqmb512_mask((__v64qi) __A, in _mm512_mask_bitshuffle_epi64_mask()
72 static __inline__ __mmask64 __DEFAULT_FN_ATTRS
75 return _mm512_mask_bitshuffle_epi64_mask((__mmask64) -1, in _mm512_bitshuffle_epi64_mask()
Davx512vbmi2intrin.h38 _mm512_mask_compress_epi8(__m512i __S, __mmask64 __U, __m512i __D) in _mm512_mask_compress_epi8()
46 _mm512_maskz_compress_epi8(__mmask64 __U, __m512i __D) in _mm512_maskz_compress_epi8()
61 _mm512_mask_compressstoreu_epi8(void *__P, __mmask64 __U, __m512i __D) in _mm512_mask_compressstoreu_epi8()
84 _mm512_mask_expand_epi8(__m512i __S, __mmask64 __U, __m512i __D) in _mm512_mask_expand_epi8()
92 _mm512_maskz_expand_epi8(__mmask64 __U, __m512i __D) in _mm512_maskz_expand_epi8()
116 _mm512_mask_expandloadu_epi8(__m512i __S, __mmask64 __U, void const *__P) in _mm512_mask_expandloadu_epi8()
124 _mm512_maskz_expandloadu_epi8(__mmask64 __U, void const *__P) in _mm512_maskz_expandloadu_epi8()
Dgfniintrin.h73 (__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
87 (__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
103 _mm512_mask_gf2p8mul_epi8(__m512i __S, __mmask64 __U, __m512i __A, __m512i __B) in _mm512_mask_gf2p8mul_epi8()
111 _mm512_maskz_gf2p8mul_epi8(__mmask64 __U, __m512i __A, __m512i __B) in _mm512_maskz_gf2p8mul_epi8()
/external/llvm-project/clang/test/CodeGen/X86/
Davx512bw-builtins.c15 __mmask64 test_knot_mask64(__mmask64 a) { in test_knot_mask64()
34 __mmask64 test_kand_mask64(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i… in test_kand_mask64()
57 __mmask64 test_kandn_mask64(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512… in test_kandn_mask64()
80 __mmask64 test_kor_mask64(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i … in test_kor_mask64()
103 __mmask64 test_kxnor_mask64(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512… in test_kxnor_mask64()
126 __mmask64 test_kxor_mask64(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i… in test_kxor_mask64()
308 __mmask64 test_kadd_mask64(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i… in test_kadd_mask64()
335 __mmask64 test_kshiftli_mask64(__m512i A, __m512i B, __m512i C, __m512i D) { in test_kshiftli_mask64()
343 __mmask64 test_kshiftri_mask64(__m512i A, __m512i B, __m512i C, __m512i D) { in test_kshiftri_mask64()
371 __mmask64 test_cvtu64_mask64(__m512i A, __m512i B, unsigned long long C) { in test_cvtu64_mask64()
[all …]
Davx512vbmi-builtins.c6 __m512i test_mm512_mask2_permutex2var_epi8(__m512i __A, __m512i __I, __mmask64 __U, __m512i __B) { in test_mm512_mask2_permutex2var_epi8()
19 __m512i test_mm512_mask_permutex2var_epi8(__m512i __A, __mmask64 __U, __m512i __I, __m512i __B) { in test_mm512_mask_permutex2var_epi8()
26 __m512i test_mm512_maskz_permutex2var_epi8(__mmask64 __U, __m512i __A, __m512i __I, __m512i __B) { in test_mm512_maskz_permutex2var_epi8()
39 __m512i test_mm512_maskz_permutexvar_epi8(__mmask64 __M, __m512i __A, __m512i __B) { in test_mm512_maskz_permutexvar_epi8()
46 __m512i test_mm512_mask_permutexvar_epi8(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) { in test_mm512_mask_permutexvar_epi8()
53 __m512i test_mm512_mask_multishift_epi64_epi8(__m512i __W, __mmask64 __M, __m512i __X, __m512i __Y)… in test_mm512_mask_multishift_epi64_epi8()
60 __m512i test_mm512_maskz_multishift_epi64_epi8(__mmask64 __M, __m512i __X, __m512i __Y) { in test_mm512_maskz_multishift_epi64_epi8()
Davx512bitalg-builtins.c30 __m512i test_mm512_mask_popcnt_epi8(__m512i __A, __mmask64 __U, __m512i __B) { in test_mm512_mask_popcnt_epi8()
36 __m512i test_mm512_maskz_popcnt_epi8(__mmask64 __U, __m512i __B) { in test_mm512_maskz_popcnt_epi8()
43 __mmask64 test_mm512_mask_bitshuffle_epi64_mask(__mmask64 __U, __m512i __A, __m512i __B) { in test_mm512_mask_bitshuffle_epi64_mask()
50 __mmask64 test_mm512_bitshuffle_epi64_mask(__m512i __A, __m512i __B) { in test_mm512_bitshuffle_epi64_mask()
Dgfni-builtins.c52 __m512i test_mm512_mask_gf2p8affineinv_epi64_epi8(__m512i S, __mmask64 U, __m512i A, __m512i B) { in test_mm512_mask_gf2p8affineinv_epi64_epi8()
59 __m512i test_mm512_maskz_gf2p8affineinv_epi64_epi8(__mmask64 U, __m512i A, __m512i B) { in test_mm512_maskz_gf2p8affineinv_epi64_epi8()
100 __m512i test_mm512_mask_gf2p8affine_epi64_epi8(__m512i S, __mmask64 U, __m512i A, __m512i B) { in test_mm512_mask_gf2p8affine_epi64_epi8()
107 __m512i test_mm512_maskz_gf2p8affine_epi64_epi8(__mmask64 U, __m512i A, __m512i B) { in test_mm512_maskz_gf2p8affine_epi64_epi8()
148 __m512i test_mm512_mask_gf2p8mul_epi8(__m512i S, __mmask64 U, __m512i A, __m512i B) { in test_mm512_mask_gf2p8mul_epi8()
155 __m512i test_mm512_maskz_gf2p8mul_epi8(__mmask64 U, __m512i A, __m512i B) { in test_mm512_maskz_gf2p8mul_epi8()
Davx512vbmi2-builtins.c17 __m512i test_mm512_mask_compress_epi8(__m512i __S, __mmask64 __U, __m512i __D) { in test_mm512_mask_compress_epi8()
23 __m512i test_mm512_maskz_compress_epi8(__mmask64 __U, __m512i __D) { in test_mm512_maskz_compress_epi8()
35 void test_mm512_mask_compressstoreu_epi8(void *__P, __mmask64 __U, __m512i __D) { in test_mm512_mask_compressstoreu_epi8()
53 __m512i test_mm512_mask_expand_epi8(__m512i __S, __mmask64 __U, __m512i __D) { in test_mm512_mask_expand_epi8()
59 __m512i test_mm512_maskz_expand_epi8(__mmask64 __U, __m512i __D) { in test_mm512_maskz_expand_epi8()
77 __m512i test_mm512_mask_expandloadu_epi8(__m512i __S, __mmask64 __U, void const* __P) { in test_mm512_mask_expandloadu_epi8()
83 __m512i test_mm512_maskz_expandloadu_epi8(__mmask64 __U, void const* __P) { in test_mm512_maskz_expandloadu_epi8()
/external/XNNPACK/src/xnnpack/
Dintrinsics-polyfill.h49 __mmask64 _cvtu64_mask64(unsigned long long mask) { in _cvtu64_mask64()
50 return (__mmask64) mask; in _cvtu64_mask64()
54 __mmask64 _kshiftli_mask64(__mmask64 a, unsigned int count) { in _kshiftli_mask64()
55 return (__mmask64) ((unsigned long long) a << count); in _kshiftli_mask64()
59 __mmask64 _kshiftri_mask64(__mmask64 a, unsigned int count) { in _kshiftri_mask64()
60 return (__mmask64) ((unsigned long long) a >> count); in _kshiftri_mask64()
/external/mesa3d/src/gallium/drivers/swr/rasterizer/common/
Dsimdlib_256_avx512_core.inl63 #define SIMD_IWRAPPER_1_8(op) SIMD_IWRAPPER_1_(op, op, __mmask64(0xffffffffull))
73 #define SIMD_IWRAPPER_1I_8(op) SIMD_IWRAPPER_1I_(op, op, __mmask64(0xffffffffull))
82 #define SIMD_IWRAPPER_2_8(op) SIMD_IWRAPPER_2_(op, op, __mmask64(0xffffffffull))
107 __mmask64 m = 0xffffffffull;
Dsimdlib_128_avx512_core.inl106 #define SIMD_IWRAPPER_1_8(op) SIMD_IWRAPPER_1_(op, op, __mmask64(0xffffull))
116 #define SIMD_IWRAPPER_1I_8(op) SIMD_IWRAPPER_1I_(op, op, __mmask64(0xffffull))
125 #define SIMD_IWRAPPER_2_8(op) SIMD_IWRAPPER_2_(op, op, __mmask64(0xffffull))
157 __mmask64 m = 0xffffull;
Dsimdlib_512_avx512_core.inl112 static SIMDINLINE Integer vmask(__mmask64 m)
133 __mmask64 result = _mm512_cmp_epi8_mask(a, b, static_cast<const int>(CmpTypeT));
162 __mmask64 m = _mm512_cmplt_epi8_mask(a, setzero_si());
/external/XNNPACK/src/qs8-gemm/
DMRx16c8-avx512skx.c.in186 __mmask64 vmask = _cvtu64_mask64((uint64_t) ((UINT32_C(1) << nc) - UINT32_C(1)));
209 __mmask64 vmask = _cvtu64_mask64((uint64_t) ((UINT32_C(1) << nc) - UINT32_C(1)));
230 const __mmask64 vmask = _cvtu64_mask64((uint64_t) ((UINT32_C(1) << nc) - UINT32_C(1)));
/external/XNNPACK/src/qs8-igemm/
DMRx16c8-avx512skx.c.in193__mmask64 vmask = _cvtu64_mask64((uint64_t) ((UINT64_C(1) << (nc + ${16 * (MR - 1)})) - (UINT64_C(…
215__mmask64 vmask = _cvtu64_mask64((uint64_t) ((UINT32_C(1) << (nc + 16)) - (UINT32_C(1) << 16)));
235 const __mmask64 vmask = _cvtu64_mask64((uint64_t) ((UINT32_C(1) << nc) - UINT32_C(1)));
/external/XNNPACK/src/qs8-gemm/gen/
D1x16c8-minmax-avx512skx.c124 const __mmask64 vmask = _cvtu64_mask64((uint64_t) ((UINT32_C(1) << nc) - UINT32_C(1))); in xnn_qs8_gemm_minmax_ukernel_1x16c8__avx512skx()
D2x16c8-minmax-avx512skx.c159 __mmask64 vmask = _cvtu64_mask64((uint64_t) ((UINT32_C(1) << nc) - UINT32_C(1))); in xnn_qs8_gemm_minmax_ukernel_2x16c8__avx512skx()
/external/XNNPACK/src/qs8-igemm/gen/
D1x16c8-minmax-avx512skx.c135 const __mmask64 vmask = _cvtu64_mask64((uint64_t) ((UINT32_C(1) << nc) - UINT32_C(1))); in xnn_qs8_igemm_minmax_ukernel_1x16c8__avx512skx()
D2x16c8-minmax-avx512skx.c171__mmask64 vmask = _cvtu64_mask64((uint64_t) ((UINT32_C(1) << (nc + 16)) - (UINT32_C(1) << 16))); in xnn_qs8_igemm_minmax_ukernel_2x16c8__avx512skx()
D3x16c8-minmax-avx512skx.c207__mmask64 vmask = _cvtu64_mask64((uint64_t) ((UINT64_C(1) << (nc + 32)) - (UINT64_C(1) << 32))); in xnn_qs8_igemm_minmax_ukernel_3x16c8__avx512skx()

12