/external/XNNPACK/src/qs8-igemm/gen/ |
D | 3x8c8-minmax-avx2.c | 137 const __m256i vacc0x0213 = _mm256_hadd_epi32(vacc0x01, vacc0x23); in xnn_qs8_igemm_minmax_ukernel_3x8c8__avx2() 138 const __m256i vacc0x4657 = _mm256_hadd_epi32(vacc0x45, vacc0x67); in xnn_qs8_igemm_minmax_ukernel_3x8c8__avx2() 139 const __m256i vacc1x0213 = _mm256_hadd_epi32(vacc1x01, vacc1x23); in xnn_qs8_igemm_minmax_ukernel_3x8c8__avx2() 140 const __m256i vacc1x4657 = _mm256_hadd_epi32(vacc1x45, vacc1x67); in xnn_qs8_igemm_minmax_ukernel_3x8c8__avx2() 141 const __m256i vacc2x0213 = _mm256_hadd_epi32(vacc2x01, vacc2x23); in xnn_qs8_igemm_minmax_ukernel_3x8c8__avx2() 142 const __m256i vacc2x4657 = _mm256_hadd_epi32(vacc2x45, vacc2x67); in xnn_qs8_igemm_minmax_ukernel_3x8c8__avx2() 144 const __m256i vacc0x02461357 = _mm256_hadd_epi32(vacc0x0213, vacc0x4657); in xnn_qs8_igemm_minmax_ukernel_3x8c8__avx2() 145 const __m256i vacc1x02461357 = _mm256_hadd_epi32(vacc1x0213, vacc1x4657); in xnn_qs8_igemm_minmax_ukernel_3x8c8__avx2() 146 const __m256i vacc2x02461357 = _mm256_hadd_epi32(vacc2x0213, vacc2x4657); in xnn_qs8_igemm_minmax_ukernel_3x8c8__avx2()
|
D | 2x8c8-minmax-avx2.c | 118 const __m256i vacc0x0213 = _mm256_hadd_epi32(vacc0x01, vacc0x23); in xnn_qs8_igemm_minmax_ukernel_2x8c8__avx2() 119 const __m256i vacc0x4657 = _mm256_hadd_epi32(vacc0x45, vacc0x67); in xnn_qs8_igemm_minmax_ukernel_2x8c8__avx2() 120 const __m256i vacc1x0213 = _mm256_hadd_epi32(vacc1x01, vacc1x23); in xnn_qs8_igemm_minmax_ukernel_2x8c8__avx2() 121 const __m256i vacc1x4657 = _mm256_hadd_epi32(vacc1x45, vacc1x67); in xnn_qs8_igemm_minmax_ukernel_2x8c8__avx2() 123 const __m256i vacc0x02461357 = _mm256_hadd_epi32(vacc0x0213, vacc0x4657); in xnn_qs8_igemm_minmax_ukernel_2x8c8__avx2() 124 const __m256i vacc1x02461357 = _mm256_hadd_epi32(vacc1x0213, vacc1x4657); in xnn_qs8_igemm_minmax_ukernel_2x8c8__avx2()
|
D | 1x8c8-minmax-avx2.c | 99 const __m256i vacc0x0213 = _mm256_hadd_epi32(vacc0x01, vacc0x23); in xnn_qs8_igemm_minmax_ukernel_1x8c8__avx2() 100 const __m256i vacc0x4657 = _mm256_hadd_epi32(vacc0x45, vacc0x67); in xnn_qs8_igemm_minmax_ukernel_1x8c8__avx2() 102 const __m256i vacc0x02461357 = _mm256_hadd_epi32(vacc0x0213, vacc0x4657); in xnn_qs8_igemm_minmax_ukernel_1x8c8__avx2()
|
/external/XNNPACK/src/qs8-gemm/gen/ |
D | 3x8c8-minmax-avx2.c | 120 const __m256i vacc0x0213 = _mm256_hadd_epi32(vacc0x01, vacc0x23); in xnn_qs8_gemm_minmax_ukernel_3x8c8__avx2() 121 const __m256i vacc0x4657 = _mm256_hadd_epi32(vacc0x45, vacc0x67); in xnn_qs8_gemm_minmax_ukernel_3x8c8__avx2() 122 const __m256i vacc1x0213 = _mm256_hadd_epi32(vacc1x01, vacc1x23); in xnn_qs8_gemm_minmax_ukernel_3x8c8__avx2() 123 const __m256i vacc1x4657 = _mm256_hadd_epi32(vacc1x45, vacc1x67); in xnn_qs8_gemm_minmax_ukernel_3x8c8__avx2() 124 const __m256i vacc2x0213 = _mm256_hadd_epi32(vacc2x01, vacc2x23); in xnn_qs8_gemm_minmax_ukernel_3x8c8__avx2() 125 const __m256i vacc2x4657 = _mm256_hadd_epi32(vacc2x45, vacc2x67); in xnn_qs8_gemm_minmax_ukernel_3x8c8__avx2() 127 const __m256i vacc0x02461357 = _mm256_hadd_epi32(vacc0x0213, vacc0x4657); in xnn_qs8_gemm_minmax_ukernel_3x8c8__avx2() 128 const __m256i vacc1x02461357 = _mm256_hadd_epi32(vacc1x0213, vacc1x4657); in xnn_qs8_gemm_minmax_ukernel_3x8c8__avx2() 129 const __m256i vacc2x02461357 = _mm256_hadd_epi32(vacc2x0213, vacc2x4657); in xnn_qs8_gemm_minmax_ukernel_3x8c8__avx2()
|
D | 3x8c8-xw-minmax-avx2.c | 116 const __m256i vacc0x0213 = _mm256_hadd_epi32(vacc0x01, vacc0x23); in xnn_qs8_gemm_xw_minmax_ukernel_3x8c8__avx2() 117 const __m256i vacc0x4657 = _mm256_hadd_epi32(vacc0x45, vacc0x67); in xnn_qs8_gemm_xw_minmax_ukernel_3x8c8__avx2() 118 const __m256i vacc1x0213 = _mm256_hadd_epi32(vacc1x01, vacc1x23); in xnn_qs8_gemm_xw_minmax_ukernel_3x8c8__avx2() 119 const __m256i vacc1x4657 = _mm256_hadd_epi32(vacc1x45, vacc1x67); in xnn_qs8_gemm_xw_minmax_ukernel_3x8c8__avx2() 120 const __m256i vacc2x0213 = _mm256_hadd_epi32(vacc2x01, vacc2x23); in xnn_qs8_gemm_xw_minmax_ukernel_3x8c8__avx2() 121 const __m256i vacc2x4657 = _mm256_hadd_epi32(vacc2x45, vacc2x67); in xnn_qs8_gemm_xw_minmax_ukernel_3x8c8__avx2() 123 const __m256i vacc0x02461357 = _mm256_hadd_epi32(vacc0x0213, vacc0x4657); in xnn_qs8_gemm_xw_minmax_ukernel_3x8c8__avx2() 124 const __m256i vacc1x02461357 = _mm256_hadd_epi32(vacc1x0213, vacc1x4657); in xnn_qs8_gemm_xw_minmax_ukernel_3x8c8__avx2() 125 const __m256i vacc2x02461357 = _mm256_hadd_epi32(vacc2x0213, vacc2x4657); in xnn_qs8_gemm_xw_minmax_ukernel_3x8c8__avx2()
|
D | 2x8c8-minmax-avx2.c | 103 const __m256i vacc0x0213 = _mm256_hadd_epi32(vacc0x01, vacc0x23); in xnn_qs8_gemm_minmax_ukernel_2x8c8__avx2() 104 const __m256i vacc0x4657 = _mm256_hadd_epi32(vacc0x45, vacc0x67); in xnn_qs8_gemm_minmax_ukernel_2x8c8__avx2() 105 const __m256i vacc1x0213 = _mm256_hadd_epi32(vacc1x01, vacc1x23); in xnn_qs8_gemm_minmax_ukernel_2x8c8__avx2() 106 const __m256i vacc1x4657 = _mm256_hadd_epi32(vacc1x45, vacc1x67); in xnn_qs8_gemm_minmax_ukernel_2x8c8__avx2() 108 const __m256i vacc0x02461357 = _mm256_hadd_epi32(vacc0x0213, vacc0x4657); in xnn_qs8_gemm_minmax_ukernel_2x8c8__avx2() 109 const __m256i vacc1x02461357 = _mm256_hadd_epi32(vacc1x0213, vacc1x4657); in xnn_qs8_gemm_minmax_ukernel_2x8c8__avx2()
|
D | 2x8c8-xw-minmax-avx2.c | 99 const __m256i vacc0x0213 = _mm256_hadd_epi32(vacc0x01, vacc0x23); in xnn_qs8_gemm_xw_minmax_ukernel_2x8c8__avx2() 100 const __m256i vacc0x4657 = _mm256_hadd_epi32(vacc0x45, vacc0x67); in xnn_qs8_gemm_xw_minmax_ukernel_2x8c8__avx2() 101 const __m256i vacc1x0213 = _mm256_hadd_epi32(vacc1x01, vacc1x23); in xnn_qs8_gemm_xw_minmax_ukernel_2x8c8__avx2() 102 const __m256i vacc1x4657 = _mm256_hadd_epi32(vacc1x45, vacc1x67); in xnn_qs8_gemm_xw_minmax_ukernel_2x8c8__avx2() 104 const __m256i vacc0x02461357 = _mm256_hadd_epi32(vacc0x0213, vacc0x4657); in xnn_qs8_gemm_xw_minmax_ukernel_2x8c8__avx2() 105 const __m256i vacc1x02461357 = _mm256_hadd_epi32(vacc1x0213, vacc1x4657); in xnn_qs8_gemm_xw_minmax_ukernel_2x8c8__avx2()
|
D | 1x8c8-minmax-avx2.c | 86 const __m256i vacc0x0213 = _mm256_hadd_epi32(vacc0x01, vacc0x23); in xnn_qs8_gemm_minmax_ukernel_1x8c8__avx2() 87 const __m256i vacc0x4657 = _mm256_hadd_epi32(vacc0x45, vacc0x67); in xnn_qs8_gemm_minmax_ukernel_1x8c8__avx2() 89 const __m256i vacc0x02461357 = _mm256_hadd_epi32(vacc0x0213, vacc0x4657); in xnn_qs8_gemm_minmax_ukernel_1x8c8__avx2()
|
D | 1x8c8-xw-minmax-avx2.c | 82 const __m256i vacc0x0213 = _mm256_hadd_epi32(vacc0x01, vacc0x23); in xnn_qs8_gemm_xw_minmax_ukernel_1x8c8__avx2() 83 const __m256i vacc0x4657 = _mm256_hadd_epi32(vacc0x45, vacc0x67); in xnn_qs8_gemm_xw_minmax_ukernel_1x8c8__avx2() 85 const __m256i vacc0x02461357 = _mm256_hadd_epi32(vacc0x0213, vacc0x4657); in xnn_qs8_gemm_xw_minmax_ukernel_1x8c8__avx2()
|
/external/libaom/libaom/aom_dsp/x86/ |
D | masked_sad_intrin_avx2.c | 64 res = _mm256_hadd_epi32(res, res); in masked_sad32xh_avx2() 65 res = _mm256_hadd_epi32(res, res); in masked_sad32xh_avx2() 117 res = _mm256_hadd_epi32(res, res); in masked_sad16xh_avx2() 118 res = _mm256_hadd_epi32(res, res); in masked_sad16xh_avx2() 253 res = _mm256_hadd_epi32(res, res); in highbd_masked_sad8xh_avx2() 254 res = _mm256_hadd_epi32(res, res); in highbd_masked_sad8xh_avx2() 311 res = _mm256_hadd_epi32(res, res); in highbd_masked_sad16xh_avx2() 312 res = _mm256_hadd_epi32(res, res); in highbd_masked_sad16xh_avx2()
|
D | obmc_variance_avx2.c | 145 v_d = _mm256_hadd_epi32(v_sum_d, v_sse_d); in obmc_variance_w16n() 146 v_d = _mm256_hadd_epi32(v_d, v_d); in obmc_variance_w16n()
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | sad4d_avx2.c | 16 const __m256i t0 = _mm256_hadd_epi32(sums[0], sums[1]); in calc_final_4() 17 const __m256i t1 = _mm256_hadd_epi32(sums[2], sums[3]); in calc_final_4() 18 const __m256i t2 = _mm256_hadd_epi32(t0, t1); in calc_final_4()
|
/external/libaom/libaom/av1/common/x86/ |
D | cfl_avx2.c | 384 a = _mm256_hadd_epi32(a, a); in fill_sum_epi32() 389 a = _mm256_hadd_epi32(a, a); in fill_sum_epi32() 392 return _mm256_hadd_epi32(a, a); in fill_sum_epi32()
|
/external/XNNPACK/src/qs8-igemm/ |
D | MRx8c8-avx2.c.in | 104 const __m256i vacc${M}x0213 = _mm256_hadd_epi32(vacc${M}x01, vacc${M}x23); 105 const __m256i vacc${M}x4657 = _mm256_hadd_epi32(vacc${M}x45, vacc${M}x67); 108 const __m256i vacc${M}x02461357 = _mm256_hadd_epi32(vacc${M}x0213, vacc${M}x4657);
|
/external/libaom/libaom/av1/encoder/x86/ |
D | rdopt_avx2.c | 87 const __m256i hadd_xy_xz = _mm256_hadd_epi32(xy_sum_32, xz_sum_32); in av1_get_horver_correlation_full_avx2() 94 const __m256i hadd_x_x2 = _mm256_hadd_epi32(x_sum_32, x2_sum_32); in av1_get_horver_correlation_full_avx2()
|
/external/XNNPACK/src/qs8-gemm/ |
D | MRx8c8-avx2.c.in | 105 const __m256i vacc${M}x0213 = _mm256_hadd_epi32(vacc${M}x01, vacc${M}x23); 106 const __m256i vacc${M}x4657 = _mm256_hadd_epi32(vacc${M}x45, vacc${M}x67); 109 const __m256i vacc${M}x02461357 = _mm256_hadd_epi32(vacc${M}x0213, vacc${M}x4657);
|
/external/libgav1/libgav1/src/dsp/x86/ |
D | cdef_avx2.cc | 353 a = _mm256_hadd_epi32(a, a); in SumVectorPair_S32()
|
/external/clang/test/CodeGen/ |
D | avx2-builtins.c | 399 return _mm256_hadd_epi32(a, b); in test_mm256_hadd_epi32()
|
/external/llvm-project/clang/test/CodeGen/X86/ |
D | avx2-builtins.c | 405 return _mm256_hadd_epi32(a, b); in test_mm256_hadd_epi32()
|
/external/llvm-project/clang/lib/Headers/ |
D | avx2intrin.h | 212 _mm256_hadd_epi32(__m256i __a, __m256i __b) in _mm256_hadd_epi32() function
|
/external/clang/lib/Headers/ |
D | avx2intrin.h | 241 _mm256_hadd_epi32(__m256i __a, __m256i __b) in _mm256_hadd_epi32() function
|