Home
last modified time | relevance | path

Searched refs:__v4sf (Results 1 – 11 of 11) sorted by relevance

/external/clang/lib/Headers/
Davx512vldqintrin.h131 return (__m128) __builtin_ia32_andnps128_mask ((__v4sf) __A, in _mm_mask_andnot_ps()
132 (__v4sf) __B, in _mm_mask_andnot_ps()
133 (__v4sf) __W, in _mm_mask_andnot_ps()
139 return (__m128) __builtin_ia32_andnps128_mask ((__v4sf) __A, in _mm_maskz_andnot_ps()
140 (__v4sf) __B, in _mm_maskz_andnot_ps()
141 (__v4sf) in _mm_maskz_andnot_ps()
199 return (__m128) __builtin_ia32_andps128_mask ((__v4sf) __A, in _mm_mask_and_ps()
200 (__v4sf) __B, in _mm_mask_and_ps()
201 (__v4sf) __W, in _mm_mask_and_ps()
207 return (__m128) __builtin_ia32_andps128_mask ((__v4sf) __A, in _mm_maskz_and_ps()
[all …]
Davx512erintrin.h129 (__m128)__builtin_ia32_rsqrt28ss_round((__v4sf)(__m128)(A), \
130 (__v4sf)(__m128)(B), \
131 (__v4sf)_mm_setzero_ps(), \
135 (__m128)__builtin_ia32_rsqrt28ss_round((__v4sf)(__m128)(A), \
136 (__v4sf)(__m128)(B), \
137 (__v4sf)(__m128)(S), \
141 (__m128)__builtin_ia32_rsqrt28ss_round((__v4sf)(__m128)(A), \
142 (__v4sf)(__m128)(B), \
143 (__v4sf)_mm_setzero_ps(), \
232 (__m128)__builtin_ia32_rcp28ss_round((__v4sf)(__m128)(A), \
[all …]
Davx512vlintrin.h1301 (__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \
1302 (__v4sf)(__m128)(b), \
1306 (__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \
1307 (__v4sf)(__m128)(b), \
1467 return (__m128) __builtin_ia32_vfmaddps128_mask ((__v4sf) __A, in _mm_mask_fmadd_ps()
1468 (__v4sf) __B, in _mm_mask_fmadd_ps()
1469 (__v4sf) __C, in _mm_mask_fmadd_ps()
1476 return (__m128) __builtin_ia32_vfmaddps128_mask3 ((__v4sf) __A, in _mm_mask3_fmadd_ps()
1477 (__v4sf) __B, in _mm_mask3_fmadd_ps()
1478 (__v4sf) __C, in _mm_mask3_fmadd_ps()
[all …]
Davx512fintrin.h572 return (__m128) __builtin_ia32_maxss_round ((__v4sf) __A, in _mm_mask_max_ss()
573 (__v4sf) __B, in _mm_mask_max_ss()
574 (__v4sf) __W, in _mm_mask_max_ss()
581 return (__m128) __builtin_ia32_maxss_round ((__v4sf) __A, in _mm_maskz_max_ss()
582 (__v4sf) __B, in _mm_maskz_max_ss()
583 (__v4sf) _mm_setzero_ps (), in _mm_maskz_max_ss()
589 (__m128) __builtin_ia32_maxss_round ((__v4sf) __A, (__v4sf) __B, \
590 (__v4sf) _mm_setzero_ps(), (__mmask8) -1, __R); })
593 (__m128) __builtin_ia32_maxss_round ((__v4sf) __A, (__v4sf) __B, \
594 (__v4sf) __W, (__mmask8) __U,__R); })
[all …]
Dsmmintrin.h60 (__m128)__builtin_ia32_roundps((__v4sf)(__m128)(X), (M)); })
63 (__m128)__builtin_ia32_roundss((__v4sf)(__m128)(X), \
64 (__v4sf)(__m128)(Y), (M)); })
81 (__m128)__builtin_shufflevector((__v4sf)(__m128)(V1), (__v4sf)(__m128)(V2), \
97 return (__m128) __builtin_ia32_blendvps ((__v4sf)__V1, (__v4sf)__V2, in _mm_blendv_ps()
98 (__v4sf)__M); in _mm_blendv_ps()
135 (__m128) __builtin_ia32_dpps((__v4sf)(__m128)(X), \
136 (__v4sf)(__m128)(Y), (M)); })
202 __v4sf __a = (__v4sf)(__m128)(X); \
208 #define _MM_EXTRACT_FLOAT(D, X, N) (__extension__ ({ __v4sf __a = (__v4sf)(X); \
Davxintrin.h259 return (__m128)__builtin_ia32_vpermilvarps((__v4sf)__a, (__v4si)__c); in _mm_permutevar_ps()
281 (__m128)__builtin_shufflevector((__v4sf)(__m128)(A), \
282 (__v4sf)_mm_setzero_ps(), \
408 (__m128)__builtin_ia32_cmpps((__v4sf)(__m128)(a), \
409 (__v4sf)(__m128)(b), (c)); })
424 (__m128)__builtin_ia32_cmpss((__v4sf)(__m128)(a), \
425 (__v4sf)(__m128)(b), (c)); })
519 return (__m256d)__builtin_ia32_cvtps2pd256((__v4sf) __a); in _mm256_cvtps_pd()
606 return __builtin_ia32_vtestzps((__v4sf)__a, (__v4sf)__b); in _mm_testz_ps()
612 return __builtin_ia32_vtestcps((__v4sf)__a, (__v4sf)__b); in _mm_testc_ps()
[all …]
Davx2intrin.h761 return (__m128)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0); in _mm_broadcastss_ps()
773 return (__m256)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0, 0, 0, 0, 0); in _mm256_broadcastss_ps()
1031 (__m128)__builtin_ia32_gatherd_ps((__v4sf)(__m128)(a), \
1034 (__v4sf)(__m128)(mask), (s)); })
1043 (__m128)__builtin_ia32_gatherq_ps((__v4sf)(__m128)(a), \
1046 (__v4sf)(__m128)(mask), (s)); })
1049 (__m128)__builtin_ia32_gatherq_ps256((__v4sf)(__m128)(a), \
1052 (__v4sf)(__m128)(mask), (s)); })
1137 (__m128)__builtin_ia32_gatherd_ps((__v4sf)_mm_undefined_ps(), \
1140 (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
[all …]
Df16cintrin.h35 (__m128i)__builtin_ia32_vcvtps2ph((__v4sf)(__m128)(a), (imm)); })
Dammintrin.h204 __builtin_ia32_movntss(__p, (__v4sf)__a); in _mm_stream_ss()
Dxopintrin.h736 (__m128)__builtin_ia32_vpermil2ps((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), \
747 return (__m128)__builtin_ia32_vfrczss((__v4sf)__A); in _mm_frcz_ss()
759 return (__m128)__builtin_ia32_vfrczps((__v4sf)__A); in _mm_frcz_ps()
Dxmmintrin.h30 typedef float __v4sf __attribute__((__vector_size__(16))); typedef
796 (__m128)__builtin_shufflevector((__v4sf)(__m128)(a), (__v4sf)(__m128)(b), \