/external/eigen/Eigen/src/Core/arch/AVX512/ |
D | MathFunctions.h | 68 _mm512_cmp_ps_mask(x, _mm512_setzero_ps(), _CMP_NGE_UQ); 70 _mm512_cmp_ps_mask(x, _mm512_setzero_ps(), _CMP_EQ_UQ); 91 Packet16f tmp = _mm512_mask_blend_ps(mask, x, _mm512_setzero_ps()); 93 e = psub(e, _mm512_mask_blend_ps(mask, p16f_1, _mm512_setzero_ps())); 270 _mm512_setzero_ps()); 336 Packet16f x = _mm512_mask_blend_ps(le_zero_mask, _mm512_setzero_ps(), 340 __mmask16 neg_mask = _mm512_cmp_ps_mask(_x, _mm512_setzero_ps(), _CMP_LT_OQ); 343 _mm512_mask_blend_ps(le_zero_mask, p16f_inf, _mm512_setzero_ps()));
|
/external/llvm-project/clang/lib/Headers/ |
D | avx512erintrin.h | 43 (__v16sf)_mm512_setzero_ps(), \ 53 (__v16sf)_mm512_setzero_ps(), \ 92 (__v16sf)_mm512_setzero_ps(), \ 102 (__v16sf)_mm512_setzero_ps(), \ 195 (__v16sf)_mm512_setzero_ps(), \ 205 (__v16sf)_mm512_setzero_ps(), \
|
D | avx512dqintrin.h | 203 (__v16sf)_mm512_setzero_ps()); in _mm512_maskz_xor_ps() 241 (__v16sf)_mm512_setzero_ps()); in _mm512_maskz_or_ps() 279 (__v16sf)_mm512_setzero_ps()); in _mm512_maskz_and_ps() 317 (__v16sf)_mm512_setzero_ps()); in _mm512_maskz_andnot_ps() 823 (__v16sf)_mm512_setzero_ps(), \ 836 (__v16sf)_mm512_setzero_ps(), \ 843 (__v16sf)_mm512_setzero_ps(), \ 855 (__v16sf)_mm512_setzero_ps(), \ 932 (__v16sf)_mm512_setzero_ps(), \ 944 (__v16sf)_mm512_setzero_ps(), \ [all …]
|
/external/clang/lib/Headers/ |
D | avx512erintrin.h | 57 (__v16sf)_mm512_setzero_ps(), \ 67 (__v16sf)_mm512_setzero_ps(), \ 106 (__v16sf)_mm512_setzero_ps(), \ 116 (__v16sf)_mm512_setzero_ps(), \ 209 (__v16sf)_mm512_setzero_ps(), \ 219 (__v16sf)_mm512_setzero_ps(), \
|
D | avx512dqintrin.h | 96 _mm512_setzero_ps (), in _mm512_maskz_xor_ps() 140 _mm512_setzero_ps (), in _mm512_maskz_or_ps() 184 _mm512_setzero_ps (), in _mm512_maskz_and_ps() 219 _mm512_setzero_ps (), in _mm512_andnot_ps() 236 _mm512_setzero_ps (), in _mm512_maskz_andnot_ps() 753 (__v16sf)_mm512_setzero_ps(), \ 766 (__v16sf)_mm512_setzero_ps(), \ 773 (__v16sf)_mm512_setzero_ps(), \ 785 (__v16sf)_mm512_setzero_ps(), \ 862 (__v16sf)_mm512_setzero_ps(), \ [all …]
|
/external/XNNPACK/src/f32-raddexpminusmax/gen/ |
D | avx512f-p5-scalef-x192-acc6.c | 39 __m512 vacc0 = _mm512_setzero_ps(); in xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x192_acc6() 40 __m512 vacc1 = _mm512_setzero_ps(); in xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x192_acc6() 41 __m512 vacc2 = _mm512_setzero_ps(); in xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x192_acc6() 42 __m512 vacc3 = _mm512_setzero_ps(); in xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x192_acc6() 43 __m512 vacc4 = _mm512_setzero_ps(); in xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x192_acc6() 44 __m512 vacc5 = _mm512_setzero_ps(); in xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x192_acc6()
|
D | avx512f-p5-scalef-x160-acc5.c | 39 __m512 vacc0 = _mm512_setzero_ps(); in xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x160_acc5() 40 __m512 vacc1 = _mm512_setzero_ps(); in xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x160_acc5() 41 __m512 vacc2 = _mm512_setzero_ps(); in xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x160_acc5() 42 __m512 vacc3 = _mm512_setzero_ps(); in xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x160_acc5() 43 __m512 vacc4 = _mm512_setzero_ps(); in xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x160_acc5()
|
D | avx512f-p5-scalef-x128-acc4.c | 39 __m512 vacc0 = _mm512_setzero_ps(); in xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x128_acc4() 40 __m512 vacc1 = _mm512_setzero_ps(); in xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x128_acc4() 41 __m512 vacc2 = _mm512_setzero_ps(); in xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x128_acc4() 42 __m512 vacc3 = _mm512_setzero_ps(); in xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x128_acc4()
|
/external/XNNPACK/src/f32-raddstoreexpminusmax/gen/ |
D | avx512f-p5-scalef-x192-acc6.c | 40 __m512 vacc0 = _mm512_setzero_ps(); in xnn_f32_raddstoreexpminusmax_ukernel__avx512f_p5_scalef_x192_acc6() 41 __m512 vacc1 = _mm512_setzero_ps(); in xnn_f32_raddstoreexpminusmax_ukernel__avx512f_p5_scalef_x192_acc6() 42 __m512 vacc2 = _mm512_setzero_ps(); in xnn_f32_raddstoreexpminusmax_ukernel__avx512f_p5_scalef_x192_acc6() 43 __m512 vacc3 = _mm512_setzero_ps(); in xnn_f32_raddstoreexpminusmax_ukernel__avx512f_p5_scalef_x192_acc6() 44 __m512 vacc4 = _mm512_setzero_ps(); in xnn_f32_raddstoreexpminusmax_ukernel__avx512f_p5_scalef_x192_acc6() 45 __m512 vacc5 = _mm512_setzero_ps(); in xnn_f32_raddstoreexpminusmax_ukernel__avx512f_p5_scalef_x192_acc6()
|
D | avx512f-p5-scalef-x160-acc5.c | 40 __m512 vacc0 = _mm512_setzero_ps(); in xnn_f32_raddstoreexpminusmax_ukernel__avx512f_p5_scalef_x160_acc5() 41 __m512 vacc1 = _mm512_setzero_ps(); in xnn_f32_raddstoreexpminusmax_ukernel__avx512f_p5_scalef_x160_acc5() 42 __m512 vacc2 = _mm512_setzero_ps(); in xnn_f32_raddstoreexpminusmax_ukernel__avx512f_p5_scalef_x160_acc5() 43 __m512 vacc3 = _mm512_setzero_ps(); in xnn_f32_raddstoreexpminusmax_ukernel__avx512f_p5_scalef_x160_acc5() 44 __m512 vacc4 = _mm512_setzero_ps(); in xnn_f32_raddstoreexpminusmax_ukernel__avx512f_p5_scalef_x160_acc5()
|
D | avx512f-p5-scalef-x128-acc4.c | 40 __m512 vacc0 = _mm512_setzero_ps(); in xnn_f32_raddstoreexpminusmax_ukernel__avx512f_p5_scalef_x128_acc4() 41 __m512 vacc1 = _mm512_setzero_ps(); in xnn_f32_raddstoreexpminusmax_ukernel__avx512f_p5_scalef_x128_acc4() 42 __m512 vacc2 = _mm512_setzero_ps(); in xnn_f32_raddstoreexpminusmax_ukernel__avx512f_p5_scalef_x128_acc4() 43 __m512 vacc3 = _mm512_setzero_ps(); in xnn_f32_raddstoreexpminusmax_ukernel__avx512f_p5_scalef_x128_acc4()
|
/external/XNNPACK/src/f32-velu/gen/ |
D | velu-avx512f-rr1-lut16-p3-perm-x32.c | 80 const __m512 vzero = _mm512_setzero_ps(); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x32() 98 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x32() 131 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x32()
|
D | velu-avx512f-rr1-p6-x32.c | 84 const __m512 vzero = _mm512_setzero_ps(); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x32() 102 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x32() 136 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x32()
|
D | velu-avx512f-rr1-lut16-p3-perm-x16.c | 47 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x16() 80 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x16()
|
D | velu-avx512f-rr1-lut16-p3-perm-x48.c | 93 const __m512 vzero = _mm512_setzero_ps(); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x48() 115 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x48() 148 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x48()
|
D | velu-avx512f-rr1-p6-x16.c | 47 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x16() 81 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x16()
|
D | velu-avx512f-rr1-p6-x48.c | 98 const __m512 vzero = _mm512_setzero_ps(); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x48() 120 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x48() 154 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x48()
|
D | velu-avx512f-rr1-p6-x64.c | 112 const __m512 vzero = _mm512_setzero_ps(); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x64() 138 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x64() 172 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x64()
|
D | velu-avx512f-rr1-lut16-p3-perm-x64.c | 106 const __m512 vzero = _mm512_setzero_ps(); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64() 132 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64() 165 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64()
|
/external/XNNPACK/src/f32-raddextexp/gen/ |
D | avx512f-p5-scalef-x192-acc6.c | 40 __m512 vaccv0 = _mm512_setzero_ps(); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x192_acc6() 41 __m512 vaccv1 = _mm512_setzero_ps(); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x192_acc6() 42 __m512 vaccv2 = _mm512_setzero_ps(); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x192_acc6() 43 __m512 vaccv3 = _mm512_setzero_ps(); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x192_acc6() 44 __m512 vaccv4 = _mm512_setzero_ps(); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x192_acc6() 45 __m512 vaccv5 = _mm512_setzero_ps(); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x192_acc6()
|
D | avx512f-p5-scalef-x160-acc5.c | 40 __m512 vaccv0 = _mm512_setzero_ps(); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x160_acc5() 41 __m512 vaccv1 = _mm512_setzero_ps(); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x160_acc5() 42 __m512 vaccv2 = _mm512_setzero_ps(); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x160_acc5() 43 __m512 vaccv3 = _mm512_setzero_ps(); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x160_acc5() 44 __m512 vaccv4 = _mm512_setzero_ps(); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x160_acc5()
|
D | avx512f-p5-scalef-x128-acc4.c | 40 __m512 vaccv0 = _mm512_setzero_ps(); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x128_acc4() 41 __m512 vaccv1 = _mm512_setzero_ps(); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x128_acc4() 42 __m512 vaccv2 = _mm512_setzero_ps(); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x128_acc4() 43 __m512 vaccv3 = _mm512_setzero_ps(); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x128_acc4()
|
/external/XNNPACK/src/f32-velu/ |
D | avx512f-rr1-lut16-p3-perm.c.in | 78 const __m512 vzero = _mm512_setzero_ps(); 96 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); 129 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
|
D | avx512f-rr1-p6.c.in | 83 const __m512 vzero = _mm512_setzero_ps(); 101 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); 135 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
|
/external/XNNPACK/src/f32-relu/gen/ |
D | avx512f-x16.c | 30 const __m512 vzero = _mm512_setzero_ps(); in xnn_f32_relu_ukernel__avx512f_x16()
|