/external/libvpx/libvpx/vpx_dsp/x86/ |
D | vpx_subpixel_8t_intrin_avx2.c | 66 srcReg = _mm256_inserti128_si256( in vpx_filter_block1d16_h8_x_avx2() 82 srcReg = _mm256_inserti128_si256( in vpx_filter_block1d16_h8_x_avx2() 211 s32b[0] = _mm256_inserti128_si256(_mm256_castsi128_si256(s[0]), s[1], 1); in vpx_filter_block1d16_v8_x_avx2() 212 s32b[1] = _mm256_inserti128_si256(_mm256_castsi128_si256(s[1]), s[2], 1); in vpx_filter_block1d16_v8_x_avx2() 213 s32b[2] = _mm256_inserti128_si256(_mm256_castsi128_si256(s[2]), s[3], 1); in vpx_filter_block1d16_v8_x_avx2() 214 s32b[3] = _mm256_inserti128_si256(_mm256_castsi128_si256(s[3]), s[4], 1); in vpx_filter_block1d16_v8_x_avx2() 215 s32b[4] = _mm256_inserti128_si256(_mm256_castsi128_si256(s[4]), s[5], 1); in vpx_filter_block1d16_v8_x_avx2() 216 s32b[5] = _mm256_inserti128_si256(_mm256_castsi128_si256(s[5]), in vpx_filter_block1d16_v8_x_avx2() 237 srcRegHead1 = _mm256_inserti128_si256( in vpx_filter_block1d16_v8_x_avx2() 241 srcRegHead2 = _mm256_inserti128_si256( in vpx_filter_block1d16_v8_x_avx2() [all …]
|
D | convolve_avx2.h | 106 return _mm256_inserti128_si256(tmp, _mm_loadu_si128((const __m128i *)hi), 1); in mm256_loadu2_si128() 112 return _mm256_inserti128_si256(tmp, _mm_loadl_epi64((const __m128i *)hi), 1); in mm256_loadu2_epi64()
|
D | highbd_convolve_avx2.c | 523 s0 = _mm256_inserti128_si256(s0, _mm256_castsi256_si128(s1), 1); in pack_8x9_init() 524 s1 = _mm256_inserti128_si256(s1, _mm256_castsi256_si128(s2), 1); in pack_8x9_init() 525 s2 = _mm256_inserti128_si256(s2, _mm256_castsi256_si128(s3), 1); in pack_8x9_init() 526 s3 = _mm256_inserti128_si256(s3, _mm256_castsi256_si128(s4), 1); in pack_8x9_init() 527 s4 = _mm256_inserti128_si256(s4, _mm256_castsi256_si128(s5), 1); in pack_8x9_init() 528 s5 = _mm256_inserti128_si256(s5, _mm256_castsi256_si128(s6), 1); in pack_8x9_init() 547 __m256i s2 = _mm256_inserti128_si256(sig[8], _mm256_castsi256_si128(s0), 1); in pack_8x9_pixels() 548 __m256i s3 = _mm256_inserti128_si256(s0, _mm256_castsi256_si128(s1), 1); in pack_8x9_pixels() 1308 src_reg_12 = _mm256_inserti128_si256(src_reg_1, in vpx_highbd_filter_block1d4_v4_avx2() 1314 src_reg_23 = _mm256_inserti128_si256(src_reg_2, in vpx_highbd_filter_block1d4_v4_avx2() [all …]
|
D | variance_avx2.c | 108 const __m256i s = _mm256_inserti128_si256(_mm256_castsi128_si256(s0), s1, 1); in variance16_kernel_avx2() 109 const __m256i r = _mm256_inserti128_si256(_mm256_castsi128_si256(r0), r1, 1); in variance16_kernel_avx2()
|
/external/libaom/libaom/aom_dsp/x86/ |
D | highbd_convolve_avx2.c | 592 s[0] = _mm256_inserti128_si256(row0, _mm256_castsi256_si128(row1), 1); in aom_highbd_filter_block1d4_h4_avx2() 619 _mm256_inserti128_si256(row0_0, _mm256_castsi256_si128(row0_1), 1); in aom_highbd_filter_block1d4_h4_avx2() 668 _mm256_inserti128_si256(row0, _mm256_castsi256_si128(row1), 1); in aom_highbd_filter_block1d8_h4_avx2() 707 _mm256_inserti128_si256(row0_0, _mm256_castsi256_si128(row0_1), 1); in aom_highbd_filter_block1d8_h4_avx2() 872 s0 = _mm256_inserti128_si256(s0, _mm256_castsi256_si128(s1), 1); in pack_8x9_init() 873 s1 = _mm256_inserti128_si256(s1, _mm256_castsi256_si128(s2), 1); in pack_8x9_init() 874 s2 = _mm256_inserti128_si256(s2, _mm256_castsi256_si128(s3), 1); in pack_8x9_init() 875 s3 = _mm256_inserti128_si256(s3, _mm256_castsi256_si128(s4), 1); in pack_8x9_init() 876 s4 = _mm256_inserti128_si256(s4, _mm256_castsi256_si128(s5), 1); in pack_8x9_init() 877 s5 = _mm256_inserti128_si256(s5, _mm256_castsi256_si128(s6), 1); in pack_8x9_init() [all …]
|
D | aom_subpixel_8t_intrin_avx2.c | 53 a = _mm256_inserti128_si256(a, _mm_loadl_epi64((const __m128i *)(hi)), 1); in xx_loadu2_epi64() 66 a = _mm256_inserti128_si256(a, _mm_loadu_si128((const __m128i *)(hi)), 1); in xx_loadu2_mi128() 848 _mm256_inserti128_si256(srcReg4x, _mm256_castsi256_si128(srcReg5x), 1); in aom_filter_block1d8_v4_avx2() 853 _mm256_inserti128_si256(srcReg5x, _mm256_castsi256_si128(srcReg6x), 1); in aom_filter_block1d8_v4_avx2() 947 srcReg32b7 = _mm256_inserti128_si256(srcReg32b7, in aom_filter_block1d8_v8_avx2() 951 srcReg32b8 = _mm256_inserti128_si256(srcReg32b8, in aom_filter_block1d8_v8_avx2() 1087 _mm256_inserti128_si256(srcReg4x, _mm256_castsi256_si128(srcReg5x), 1); in aom_filter_block1d16_v4_avx2() 1092 _mm256_inserti128_si256(srcReg5x, _mm256_castsi256_si128(srcReg6x), 1); in aom_filter_block1d16_v4_avx2() 1202 srcReg32b7 = _mm256_inserti128_si256(srcReg32b7, in aom_filter_block1d16_v8_avx2() 1206 srcReg32b8 = _mm256_inserti128_si256(srcReg32b8, in aom_filter_block1d16_v8_avx2() [all …]
|
D | highbd_variance_avx2.c | 33 v_p_a = _mm256_inserti128_si256(v_p_a, v_p_a1, 1); in aom_highbd_calc8x8var_avx2() 34 v_p_b = _mm256_inserti128_si256(v_p_b, v_p_b1, 1); in aom_highbd_calc8x8var_avx2()
|
D | intrapred_avx2.c | 517 const __m256i row = _mm256_inserti128_si256(_mm256_castsi128_si256(r), r, 1); in aom_dc_left_predictor_32x16_avx2() 574 const __m256i row = _mm256_inserti128_si256(_mm256_castsi128_si256(r), r, 1); in aom_dc_left_predictor_64x16_avx2() 702 return _mm256_inserti128_si256(_mm256_castsi128_si256(t0), t1, 1); in get_top_vector() 708 const __m256i l = _mm256_inserti128_si256(_mm256_castsi128_si256(x), x, 1); in aom_paeth_predictor_16x8_avx2() 727 return _mm256_inserti128_si256(_mm256_castsi128_si256(x), x, 1); in get_left_vector() 1127 a0 = _mm256_inserti128_si256(a0, _mm256_castsi256_si128(a0_1), 1); in highbd_dr_prediction_z1_8xN_internal_avx2() 1128 a1 = _mm256_inserti128_si256(a1, _mm256_castsi256_si128(a1_1), 1); in highbd_dr_prediction_z1_8xN_internal_avx2() 1245 res1 = _mm256_inserti128_si256(res[0], _mm256_castsi256_si128(res[1]), in highbd_dr_prediction_z1_16xN_internal_avx2() 1346 res1 = _mm256_inserti128_si256(res[0], _mm256_castsi256_si128(res[1]), in highbd_dr_prediction_z1_32xN_internal_avx2() 1463 res1 = _mm256_inserti128_si256(res[0], _mm256_castsi256_si128(res[1]), in highbd_dr_prediction_z1_64xN_avx2() [all …]
|
D | convolve_avx2.h | 41 data = _mm256_inserti128_si256( \ 133 data = _mm256_inserti128_si256( \
|
D | variance_avx2.c | 107 const __m256i s = _mm256_inserti128_si256(_mm256_castsi128_si256(s0), s1, 1); in variance16_kernel_avx2() 108 const __m256i r = _mm256_inserti128_si256(_mm256_castsi128_si256(r0), r1, 1); in variance16_kernel_avx2()
|
D | masked_sad_intrin_avx2.c | 74 return _mm256_inserti128_si256(a, a1, 1); in xx_loadu2_m128i()
|
/external/clang/test/CodeGen/ |
D | avx2-builtins.c | 646 return _mm256_inserti128_si256(a, b, 0); in test0_mm256_inserti128_si256() 653 return _mm256_inserti128_si256(a, b, 1); in test1_mm256_inserti128_si256() 661 return _mm256_inserti128_si256(a, b, 2); in test2_mm256_inserti128_si256()
|
/external/libaom/libaom/av1/common/x86/ |
D | convolve_avx2.c | 341 const __m256i data = _mm256_inserti128_si256( in av1_convolve_x_sr_avx2() 413 const __m256i data = _mm256_inserti128_si256( in av1_convolve_x_sr_avx2()
|
D | convolve_2d_avx2.c | 82 data = _mm256_inserti128_si256( in av1_convolve_2d_sr_avx2()
|
D | jnt_convolve_avx2.c | 659 data = _mm256_inserti128_si256( in av1_dist_wtd_convolve_2d_avx2()
|
/external/libaom/libaom/aom_dsp/simd/ |
D | v256_intrinsics_x86.h | 52 return _mm256_inserti128_si256(_mm256_castsi128_si256(b), a, 1); in v256_from_v128() 664 : _mm256_inserti128_si256( \ 673 : _mm256_inserti128_si256( \
|
/external/libaom/libaom/av1/encoder/x86/ |
D | pickrst_avx2.c | 127 const __m256i s0 = _mm256_inserti128_si256( in acc_stat_highbd_avx2()
|
/external/clang/lib/Headers/ |
D | avx2intrin.h | 974 #define _mm256_inserti128_si256(V1, V2, M) __extension__ ({ \ macro
|