Home
last modified time | relevance | path

Searched refs:_mm512_cmp_ps_mask (Results 1 – 25 of 36) sorted by relevance

12

/external/llvm-project/clang/test/CodeGen/X86/
Davx512f-builtins-constrained-cmp.c20 return _mm512_cmp_ps_mask(a, b, _CMP_EQ_OQ); in test_mm512_cmp_ps_mask_eq_oq()
26 return _mm512_cmp_ps_mask(a, b, _CMP_LT_OS); in test_mm512_cmp_ps_mask_lt_os()
32 return _mm512_cmp_ps_mask(a, b, _CMP_LE_OS); in test_mm512_cmp_ps_mask_le_os()
38 return _mm512_cmp_ps_mask(a, b, _CMP_UNORD_Q); in test_mm512_cmp_ps_mask_unord_q()
44 return _mm512_cmp_ps_mask(a, b, _CMP_NEQ_UQ); in test_mm512_cmp_ps_mask_neq_uq()
50 return _mm512_cmp_ps_mask(a, b, _CMP_NLT_US); in test_mm512_cmp_ps_mask_nlt_us()
56 return _mm512_cmp_ps_mask(a, b, _CMP_NLE_US); in test_mm512_cmp_ps_mask_nle_us()
62 return _mm512_cmp_ps_mask(a, b, _CMP_ORD_Q); in test_mm512_cmp_ps_mask_ord_q()
68 return _mm512_cmp_ps_mask(a, b, _CMP_EQ_UQ); in test_mm512_cmp_ps_mask_eq_uq()
74 return _mm512_cmp_ps_mask(a, b, _CMP_NGE_US); in test_mm512_cmp_ps_mask_nge_us()
[all …]
/external/XNNPACK/src/f32-prelu/gen/
Davx512f-2x32.c60 …const __mmask16 vsign0x0123456789ABCDEF = _mm512_cmp_ps_mask(vi0x0123456789ABCDEF, vzero, _CMP_LT_… in xnn_f32_prelu_ukernel__avx512f_2x32()
62 …const __mmask16 vsign0xGHIJKLMNOPQRSTUV = _mm512_cmp_ps_mask(vi0xGHIJKLMNOPQRSTUV, vzero, _CMP_LT_… in xnn_f32_prelu_ukernel__avx512f_2x32()
64 …const __mmask16 vsign1x0123456789ABCDEF = _mm512_cmp_ps_mask(vi1x0123456789ABCDEF, vzero, _CMP_LT_… in xnn_f32_prelu_ukernel__avx512f_2x32()
66 …const __mmask16 vsign1xGHIJKLMNOPQRSTUV = _mm512_cmp_ps_mask(vi1xGHIJKLMNOPQRSTUV, vzero, _CMP_LT_… in xnn_f32_prelu_ukernel__avx512f_2x32()
85 const __mmask16 vsign0 = _mm512_cmp_ps_mask(vi0, vzero, _CMP_LT_OQ); in xnn_f32_prelu_ukernel__avx512f_2x32()
87 const __mmask16 vsign1 = _mm512_cmp_ps_mask(vi1, vzero, _CMP_LT_OQ); in xnn_f32_prelu_ukernel__avx512f_2x32()
108 const __mmask16 vsign0 = _mm512_cmp_ps_mask(vi0, vzero, _CMP_LT_OQ); in xnn_f32_prelu_ukernel__avx512f_2x32()
110 const __mmask16 vsign1 = _mm512_cmp_ps_mask(vi1, vzero, _CMP_LT_OQ); in xnn_f32_prelu_ukernel__avx512f_2x32()
Davx512f-2x16.c57 …const __mmask16 vsign0x0123456789ABCDEF = _mm512_cmp_ps_mask(vi0x0123456789ABCDEF, vzero, _CMP_LT_… in xnn_f32_prelu_ukernel__avx512f_2x16()
59 …const __mmask16 vsign1x0123456789ABCDEF = _mm512_cmp_ps_mask(vi1x0123456789ABCDEF, vzero, _CMP_LT_… in xnn_f32_prelu_ukernel__avx512f_2x16()
80 const __mmask16 vsign0 = _mm512_cmp_ps_mask(vi0, vzero, _CMP_LT_OQ); in xnn_f32_prelu_ukernel__avx512f_2x16()
82 const __mmask16 vsign1 = _mm512_cmp_ps_mask(vi1, vzero, _CMP_LT_OQ); in xnn_f32_prelu_ukernel__avx512f_2x16()
/external/XNNPACK/src/f32-vlrelu/gen/
Dvlrelu-avx512f-x32.c36 …const __mmask16 vsign0123456789ABCDEF = _mm512_cmp_ps_mask(vacc0123456789ABCDEF, vzero, _CMP_LT_OQ… in xnn_f32_vlrelu_ukernel__avx512f_x32()
37 …const __mmask16 vsignGHIJKLMNOPQRSTUV = _mm512_cmp_ps_mask(vaccGHIJKLMNOPQRSTUV, vzero, _CMP_LT_OQ… in xnn_f32_vlrelu_ukernel__avx512f_x32()
49 const __mmask16 vsign = _mm512_cmp_ps_mask(vacc, vzero, _CMP_LT_OQ); in xnn_f32_vlrelu_ukernel__avx512f_x32()
/external/XNNPACK/src/f32-velu/gen/
Dvelu-avx512f-rr1-lut16-p3-perm-x128.c160 const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x128()
162 const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x128()
164 const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x128()
166 const __mmask16 vsign3 = _mm512_cmp_ps_mask(vx3, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x128()
168 const __mmask16 vsign4 = _mm512_cmp_ps_mask(vx4, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x128()
170 const __mmask16 vsign5 = _mm512_cmp_ps_mask(vx5, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x128()
172 const __mmask16 vsign6 = _mm512_cmp_ps_mask(vx6, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x128()
174 const __mmask16 vsign7 = _mm512_cmp_ps_mask(vx7, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x128()
200 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x128()
233 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x128()
Dvelu-avx512f-rr1-lut16-p3-perm-x112.c147 const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x112()
149 const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x112()
151 const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x112()
153 const __mmask16 vsign3 = _mm512_cmp_ps_mask(vx3, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x112()
155 const __mmask16 vsign4 = _mm512_cmp_ps_mask(vx4, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x112()
157 const __mmask16 vsign5 = _mm512_cmp_ps_mask(vx5, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x112()
159 const __mmask16 vsign6 = _mm512_cmp_ps_mask(vx6, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x112()
183 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x112()
216 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x112()
Dvelu-avx512f-rr1-p6-x128.c170 const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x128()
172 const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x128()
174 const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x128()
176 const __mmask16 vsign3 = _mm512_cmp_ps_mask(vx3, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x128()
178 const __mmask16 vsign4 = _mm512_cmp_ps_mask(vx4, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x128()
180 const __mmask16 vsign5 = _mm512_cmp_ps_mask(vx5, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x128()
182 const __mmask16 vsign6 = _mm512_cmp_ps_mask(vx6, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x128()
184 const __mmask16 vsign7 = _mm512_cmp_ps_mask(vx7, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x128()
210 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x128()
244 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x128()
Dvelu-avx512f-rr1-lut16-p3-perm-x80.c121 const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x80()
123 const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x80()
125 const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x80()
127 const __mmask16 vsign3 = _mm512_cmp_ps_mask(vx3, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x80()
129 const __mmask16 vsign4 = _mm512_cmp_ps_mask(vx4, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x80()
149 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x80()
182 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x80()
Dvelu-avx512f-rr1-lut16-p3-perm-x96.c134 const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x96()
136 const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x96()
138 const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x96()
140 const __mmask16 vsign3 = _mm512_cmp_ps_mask(vx3, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x96()
142 const __mmask16 vsign4 = _mm512_cmp_ps_mask(vx4, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x96()
144 const __mmask16 vsign5 = _mm512_cmp_ps_mask(vx5, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x96()
166 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x96()
199 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x96()
Dvelu-avx512f-rr1-p6-x96.c142 const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x96()
144 const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x96()
146 const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x96()
148 const __mmask16 vsign3 = _mm512_cmp_ps_mask(vx3, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x96()
150 const __mmask16 vsign4 = _mm512_cmp_ps_mask(vx4, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x96()
152 const __mmask16 vsign5 = _mm512_cmp_ps_mask(vx5, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x96()
174 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x96()
208 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x96()
Dvelu-avx512f-rr1-p6-x112.c156 const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x112()
158 const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x112()
160 const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x112()
162 const __mmask16 vsign3 = _mm512_cmp_ps_mask(vx3, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x112()
164 const __mmask16 vsign4 = _mm512_cmp_ps_mask(vx4, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x112()
166 const __mmask16 vsign5 = _mm512_cmp_ps_mask(vx5, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x112()
168 const __mmask16 vsign6 = _mm512_cmp_ps_mask(vx6, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x112()
192 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x112()
226 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x112()
Dvelu-avx512f-rr1-p6-x80.c128 const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x80()
130 const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x80()
132 const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x80()
134 const __mmask16 vsign3 = _mm512_cmp_ps_mask(vx3, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x80()
136 const __mmask16 vsign4 = _mm512_cmp_ps_mask(vx4, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x80()
156 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x80()
190 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x80()
Dvelu-avx512f-rr1-p6-x64.c114 const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x64()
116 const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x64()
118 const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x64()
120 const __mmask16 vsign3 = _mm512_cmp_ps_mask(vx3, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x64()
138 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x64()
172 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x64()
Dvelu-avx512f-rr1-lut16-p3-perm-x64.c108 const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64()
110 const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64()
112 const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64()
114 const __mmask16 vsign3 = _mm512_cmp_ps_mask(vx3, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64()
132 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64()
165 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64()
Dvelu-avx512f-rr1-lut16-p3-perm-x48.c95 const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x48()
97 const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x48()
99 const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x48()
115 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x48()
148 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x48()
Dvelu-avx512f-rr1-p6-x48.c100 const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x48()
102 const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x48()
104 const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x48()
120 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x48()
154 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x48()
Dvelu-avx512f-rr1-lut16-p3-perm-x32.c82 const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x32()
84 const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x32()
98 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x32()
131 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x32()
Dvelu-avx512f-rr1-p6-x32.c86 const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x32()
88 const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x32()
102 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x32()
136 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x32()
Dvelu-avx512f-rr1-lut16-p3-perm-x16.c47 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x16()
80 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x16()
/external/eigen/Eigen/src/Core/arch/AVX512/
DMathFunctions.h68 _mm512_cmp_ps_mask(x, _mm512_setzero_ps(), _CMP_NGE_UQ);
70 _mm512_cmp_ps_mask(x, _mm512_setzero_ps(), _CMP_EQ_UQ);
90 __mmask16 mask = _mm512_cmp_ps_mask(x, p16f_cephes_SQRTHF, _CMP_LT_OQ);
268 __mmask16 non_zero_mask = _mm512_cmp_ps_mask(_x, p16f_flt_min, _CMP_GE_OQ);
335 __mmask16 le_zero_mask = _mm512_cmp_ps_mask(_x, p16f_flt_min, _CMP_LT_OQ);
340 __mmask16 neg_mask = _mm512_cmp_ps_mask(_x, _mm512_setzero_ps(), _CMP_LT_OQ);
/external/XNNPACK/src/math/
Dexp-avx512f-rr2-p5-scalef.c49 const __mmask16 vinvof = _mm512_cmp_ps_mask(vx, vinf_cutoff, _CMP_NGT_UQ); in xnn_math_f32_exp__avx512f_rr2_p5_scalef()
50 const __mmask16 vinvuf = _mm512_cmp_ps_mask(vx, vzero_cutoff, _CMP_NLT_UQ); in xnn_math_f32_exp__avx512f_rr2_p5_scalef()
Dexp-avx512f-rr2-lut32-p2-perm2.c61 const __mmask16 vinvof = _mm512_cmp_ps_mask(vx, vinf_cutoff, _CMP_NGT_UQ); in xnn_math_f32_exp__avx512f_rr2_lut32_p2_perm2()
62 const __mmask16 vinvuf = _mm512_cmp_ps_mask(vx, vzero_cutoff, _CMP_NLT_UQ); in xnn_math_f32_exp__avx512f_rr2_lut32_p2_perm2()
Dexp-avx512f-rr2-p5.c88 … __m512 vf = _mm512_maskz_fmadd_ps(_mm512_cmp_ps_mask(vx, vzero_cutoff, _CMP_NLT_US), vt, vp, vso); in xnn_math_f32_exp__avx512f_rr2_p5()
91 vf = _mm512_mask_mul_ps(vplus_inf, _mm512_cmp_ps_mask(vx, vinf_cutoff, _CMP_NGT_US), vsn, vf); in xnn_math_f32_exp__avx512f_rr2_p5()
Dexp-avx512f-rr2-lut16-p3-perm.c56 const __mmask16 vinvof = _mm512_cmp_ps_mask(vx, vinf_cutoff, _CMP_NGT_UQ); in xnn_math_f32_exp__avx512f_rr2_lut16_p3_perm()
57 const __mmask16 vinvuf = _mm512_cmp_ps_mask(vx, vzero_cutoff, _CMP_NLT_UQ); in xnn_math_f32_exp__avx512f_rr2_lut16_p3_perm()
/external/XNNPACK/src/f32-vlrelu/
Davx512f.c.in37 … const __mmask16 vsign${ABC[N:N+16]} = _mm512_cmp_ps_mask(vacc${ABC[N:N+16]}, vzero, _CMP_LT_OQ);
51 const __mmask16 vsign = _mm512_cmp_ps_mask(vacc, vzero, _CMP_LT_OQ);

12