/external/libavc/encoder/x86/ |
D | ime_distortion_metrics_sse42.c | 131 sad_val = _mm_add_epi64(res_r0, res_r1); in ime_compute_sad_16x16_sse42() 132 sad_val = _mm_add_epi64(sad_val, res_r2); in ime_compute_sad_16x16_sse42() 133 sad_val = _mm_add_epi64(sad_val, res_r3); in ime_compute_sad_16x16_sse42() 154 sad_val = _mm_add_epi64(sad_val, res_r0); in ime_compute_sad_16x16_sse42() 155 sad_val = _mm_add_epi64(sad_val, res_r1); in ime_compute_sad_16x16_sse42() 156 sad_val = _mm_add_epi64(sad_val, res_r2); in ime_compute_sad_16x16_sse42() 157 sad_val = _mm_add_epi64(sad_val, res_r3); in ime_compute_sad_16x16_sse42() 177 sad_val = _mm_add_epi64(sad_val, res_r0); in ime_compute_sad_16x16_sse42() 178 sad_val = _mm_add_epi64(sad_val, res_r1); in ime_compute_sad_16x16_sse42() 179 sad_val = _mm_add_epi64(sad_val, res_r2); in ime_compute_sad_16x16_sse42() [all …]
|
/external/boringssl/src/crypto/poly1305/ |
D | poly1305_vec.c | 255 T0 = _mm_add_epi64(T0, T5); in poly1305_blocks() 256 T1 = _mm_add_epi64(T1, T6); in poly1305_blocks() 259 T0 = _mm_add_epi64(T0, T5); in poly1305_blocks() 260 T1 = _mm_add_epi64(T1, T6); in poly1305_blocks() 263 T0 = _mm_add_epi64(T0, T5); in poly1305_blocks() 264 T1 = _mm_add_epi64(T1, T6); in poly1305_blocks() 267 T0 = _mm_add_epi64(T0, T5); in poly1305_blocks() 268 T1 = _mm_add_epi64(T1, T6); in poly1305_blocks() 271 T2 = _mm_add_epi64(T2, T5); in poly1305_blocks() 272 T3 = _mm_add_epi64(T3, T6); in poly1305_blocks() [all …]
|
/external/libvpx/libvpx/vp9/common/x86/ |
D | vp9_highbd_iht16x16_add_sse4.c | 50 s0[0] = _mm_add_epi64(t00[0], t11[0]); in highbd_iadst_butterfly_sse4_1() 51 s0[1] = _mm_add_epi64(t00[1], t11[1]); in highbd_iadst_butterfly_sse4_1() 75 x0[0] = _mm_add_epi64(s0[0], s8[0]); in highbd_iadst16_4col_sse4_1() 76 x0[1] = _mm_add_epi64(s0[1], s8[1]); in highbd_iadst16_4col_sse4_1() 77 x1[0] = _mm_add_epi64(s1[0], s9[0]); in highbd_iadst16_4col_sse4_1() 78 x1[1] = _mm_add_epi64(s1[1], s9[1]); in highbd_iadst16_4col_sse4_1() 79 x2[0] = _mm_add_epi64(s2[0], s10[0]); in highbd_iadst16_4col_sse4_1() 80 x2[1] = _mm_add_epi64(s2[1], s10[1]); in highbd_iadst16_4col_sse4_1() 81 x3[0] = _mm_add_epi64(s3[0], s11[0]); in highbd_iadst16_4col_sse4_1() 82 x3[1] = _mm_add_epi64(s3[1], s11[1]); in highbd_iadst16_4col_sse4_1() [all …]
|
D | vp9_highbd_iht4x4_add_sse4.c | 50 t0[0] = _mm_add_epi64(s0[0], s3[0]); in highbd_iadst4_sse4_1() 51 t0[1] = _mm_add_epi64(s0[1], s3[1]); in highbd_iadst4_sse4_1() 52 t0[0] = _mm_add_epi64(t0[0], s5[0]); in highbd_iadst4_sse4_1() 53 t0[1] = _mm_add_epi64(t0[1], s5[1]); in highbd_iadst4_sse4_1() 64 s0[0] = _mm_add_epi64(t0[0], s2[0]); in highbd_iadst4_sse4_1() 65 s0[1] = _mm_add_epi64(t0[1], s2[1]); in highbd_iadst4_sse4_1() 66 s1[0] = _mm_add_epi64(t1[0], s2[0]); in highbd_iadst4_sse4_1() 67 s1[1] = _mm_add_epi64(t1[1], s2[1]); in highbd_iadst4_sse4_1() 68 s3[0] = _mm_add_epi64(t0[0], t1[0]); in highbd_iadst4_sse4_1() 69 s3[1] = _mm_add_epi64(t0[1], t1[1]); in highbd_iadst4_sse4_1()
|
D | vp9_highbd_iht8x8_add_sse4.c | 50 s0[0] = _mm_add_epi64(t00[0], t11[0]); in highbd_iadst_butterfly_sse4_1() 51 s0[1] = _mm_add_epi64(t00[1], t11[1]); in highbd_iadst_butterfly_sse4_1() 65 x0[0] = _mm_add_epi64(s0[0], s4[0]); in highbd_iadst8_sse4_1() 66 x0[1] = _mm_add_epi64(s0[1], s4[1]); in highbd_iadst8_sse4_1() 67 x1[0] = _mm_add_epi64(s1[0], s5[0]); in highbd_iadst8_sse4_1() 68 x1[1] = _mm_add_epi64(s1[1], s5[1]); in highbd_iadst8_sse4_1() 76 x2[0] = _mm_add_epi64(s2[0], s6[0]); in highbd_iadst8_sse4_1() 77 x2[1] = _mm_add_epi64(s2[1], s6[1]); in highbd_iadst8_sse4_1() 78 x3[0] = _mm_add_epi64(s3[0], s7[0]); in highbd_iadst8_sse4_1() 79 x3[1] = _mm_add_epi64(s3[1], s7[1]); in highbd_iadst8_sse4_1() [all …]
|
/external/flac/libFLAC/ |
D | lpc_intrin_sse41.c | 97 xmm7 = _mm_add_epi64(xmm7, xmm6); in FLAC__lpc_compute_residual_from_qlp_coefficients_wide_intrin_sse41() 104 xmm7 = _mm_add_epi64(xmm7, xmm6); in FLAC__lpc_compute_residual_from_qlp_coefficients_wide_intrin_sse41() 111 xmm7 = _mm_add_epi64(xmm7, xmm6); in FLAC__lpc_compute_residual_from_qlp_coefficients_wide_intrin_sse41() 118 xmm7 = _mm_add_epi64(xmm7, xmm6); in FLAC__lpc_compute_residual_from_qlp_coefficients_wide_intrin_sse41() 125 xmm7 = _mm_add_epi64(xmm7, xmm6); in FLAC__lpc_compute_residual_from_qlp_coefficients_wide_intrin_sse41() 127 xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8)); in FLAC__lpc_compute_residual_from_qlp_coefficients_wide_intrin_sse41() 157 xmm7 = _mm_add_epi64(xmm7, xmm6); in FLAC__lpc_compute_residual_from_qlp_coefficients_wide_intrin_sse41() 164 xmm7 = _mm_add_epi64(xmm7, xmm6); in FLAC__lpc_compute_residual_from_qlp_coefficients_wide_intrin_sse41() 171 xmm7 = _mm_add_epi64(xmm7, xmm6); in FLAC__lpc_compute_residual_from_qlp_coefficients_wide_intrin_sse41() 178 xmm7 = _mm_add_epi64(xmm7, xmm6); in FLAC__lpc_compute_residual_from_qlp_coefficients_wide_intrin_sse41() [all …]
|
D | stream_encoder_intrin_ssse3.c | 109 mm_sum = _mm_add_epi64(mm_sum, mm_res); in FLAC__precompute_partition_info_sums_intrin_ssse3() 115 mm_sum = _mm_add_epi64(mm_sum, mm_res); in FLAC__precompute_partition_info_sums_intrin_ssse3() 120 mm_sum = _mm_add_epi64(mm_sum, mm_res); in FLAC__precompute_partition_info_sums_intrin_ssse3() 123 mm_sum = _mm_add_epi64(mm_sum, _mm_srli_si128(mm_sum, 8)); in FLAC__precompute_partition_info_sums_intrin_ssse3()
|
D | stream_encoder_intrin_avx2.c | 107 sum128 = _mm_add_epi64(_mm256_extracti128_si256(sum256, 1), _mm256_castsi256_si128(sum256)); in FLAC__precompute_partition_info_sums_intrin_avx2() 112 sum128 = _mm_add_epi64(sum128, res128); in FLAC__precompute_partition_info_sums_intrin_avx2() 117 sum128 = _mm_add_epi64(sum128, res128); in FLAC__precompute_partition_info_sums_intrin_avx2() 120 sum128 = _mm_add_epi64(sum128, _mm_srli_si128(sum128, 8)); in FLAC__precompute_partition_info_sums_intrin_avx2()
|
D | stream_encoder_intrin_sse2.c | 120 mm_sum = _mm_add_epi64(mm_sum, mm_res); in FLAC__precompute_partition_info_sums_intrin_sse2() 126 mm_sum = _mm_add_epi64(mm_sum, mm_res); in FLAC__precompute_partition_info_sums_intrin_sse2() 131 mm_sum = _mm_add_epi64(mm_sum, mm_res); in FLAC__precompute_partition_info_sums_intrin_sse2() 134 mm_sum = _mm_add_epi64(mm_sum, _mm_srli_si128(mm_sum, 8)); in FLAC__precompute_partition_info_sums_intrin_sse2()
|
/external/python/cpython3/Modules/_blake2/impl/ |
D | blake2b-round.h | 35 : (-(c) == 63) ? _mm_xor_si128(_mm_srli_epi64((x), -(c)), _mm_add_epi64((x), (x))) \ 47 row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); \ 48 row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); \ 56 row3l = _mm_add_epi64(row3l, row4l); \ 57 row3h = _mm_add_epi64(row3h, row4h); \ 66 row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); \ 67 row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); \ 75 row3l = _mm_add_epi64(row3l, row4l); \ 76 row3h = _mm_add_epi64(row3h, row4h); \
|
/external/libaom/libaom/aom_dsp/x86/ |
D | sum_squares_sse2.c | 67 __m128i v_acc_64 = _mm_add_epi64(_mm_srli_epi64(v_acc_q, 32), in aom_sum_squares_2d_i16_4xn_sse2() 69 v_acc_64 = _mm_add_epi64(v_acc_64, _mm_srli_si128(v_acc_64, 8)); in aom_sum_squares_2d_i16_4xn_sse2() 112 v_acc_q = _mm_add_epi64(v_acc_q, _mm_and_si128(v_acc_d, v_zext_mask_q)); in aom_sum_squares_2d_i16_nxn_sse2() 113 v_acc_q = _mm_add_epi64(v_acc_q, _mm_srli_epi64(v_acc_d, 32)); in aom_sum_squares_2d_i16_nxn_sse2() 119 v_acc_q = _mm_add_epi64(v_acc_q, _mm_srli_si128(v_acc_q, 8)); in aom_sum_squares_2d_i16_nxn_sse2() 182 v_acc0_q = _mm_add_epi64(v_acc0_q, _mm_and_si128(v_sum_d, v_zext_mask_q)); in aom_sum_squares_i16_64n_sse2() 183 v_acc1_q = _mm_add_epi64(v_acc1_q, _mm_srli_epi64(v_sum_d, 32)); in aom_sum_squares_i16_64n_sse2() 188 v_acc0_q = _mm_add_epi64(v_acc0_q, v_acc1_q); in aom_sum_squares_i16_64n_sse2() 189 v_acc0_q = _mm_add_epi64(v_acc0_q, _mm_srli_si128(v_acc0_q, 8)); in aom_sum_squares_i16_64n_sse2()
|
D | sse_sse4.c | 25 const __m128i sum_2x64 = _mm_add_epi64(sum0, sum1); in summary_all_sse4() 26 const __m128i sum_1x64 = _mm_add_epi64(sum_2x64, _mm_srli_si128(sum_2x64, 8)); in summary_all_sse4() 34 *sum64 = _mm_add_epi64(sum0, *sum64); in summary_32_sse4() 35 *sum64 = _mm_add_epi64(sum1, *sum64); in summary_32_sse4() 240 xx_storel_64(&sse, _mm_add_epi64(sum, _mm_srli_si128(sum, 8))); in aom_highbd_sse_sse4_1() 258 xx_storel_64(&sse, _mm_add_epi64(sum, _mm_srli_si128(sum, 8))); in aom_highbd_sse_sse4_1() 280 xx_storel_64(&sse, _mm_add_epi64(sum, _mm_srli_si128(sum, 8))); in aom_highbd_sse_sse4_1() 310 xx_storel_64(&sse, _mm_add_epi64(sum, _mm_srli_si128(sum, 8))); in aom_highbd_sse_sse4_1() 346 xx_storel_64(&sse, _mm_add_epi64(sum, _mm_srli_si128(sum, 8))); in aom_highbd_sse_sse4_1()
|
D | obmc_intrinsic_ssse3.h | 26 v_q = _mm_add_epi64(v_q, _mm_srli_si128(v_q, 8)); in xx_hsum_epi64_si64() 42 return xx_hsum_epi64_si64(_mm_add_epi64(v_0_q, v_1_q)); in xx_hsum_epi32_si64()
|
/external/pdfium/third_party/libopenjpeg20/ |
D | mct.c | 248 lo = _mm_add_epi64(lo, mulround); in opj_mct_encode_real() 249 hi = _mm_add_epi64(hi, mulround); in opj_mct_encode_real() 258 lo = _mm_add_epi64(lo, mulround); in opj_mct_encode_real() 259 hi = _mm_add_epi64(hi, mulround); in opj_mct_encode_real() 268 lo = _mm_add_epi64(lo, mulround); in opj_mct_encode_real() 269 hi = _mm_add_epi64(hi, mulround); in opj_mct_encode_real() 283 lo = _mm_add_epi64(lo, mulround); in opj_mct_encode_real() 284 hi = _mm_add_epi64(hi, mulround); in opj_mct_encode_real() 293 lo = _mm_add_epi64(lo, mulround); in opj_mct_encode_real() 294 hi = _mm_add_epi64(hi, mulround); in opj_mct_encode_real() [all …]
|
/external/webp/src/dsp/ |
D | rescaler_sse2.c | 155 const __m128i E1 = _mm_add_epi64(D1, rounder); in RescalerImportRowShrink_SSE2() 156 const __m128i E2 = _mm_add_epi64(D2, rounder); in RescalerImportRowShrink_SSE2() 206 const __m128i C0 = _mm_add_epi64(B0, rounder); in ProcessRow_SSE2() 207 const __m128i C1 = _mm_add_epi64(B1, rounder); in ProcessRow_SSE2() 208 const __m128i C2 = _mm_add_epi64(B2, rounder); in ProcessRow_SSE2() 209 const __m128i C3 = _mm_add_epi64(B3, rounder); in ProcessRow_SSE2() 261 const __m128i C0 = _mm_add_epi64(A0, B0); in RescalerExportRowExpand_SSE2() 262 const __m128i C1 = _mm_add_epi64(A1, B1); in RescalerExportRowExpand_SSE2() 263 const __m128i C2 = _mm_add_epi64(A2, B2); in RescalerExportRowExpand_SSE2() 264 const __m128i C3 = _mm_add_epi64(A3, B3); in RescalerExportRowExpand_SSE2() [all …]
|
/external/libopus/silk/fixed/x86/ |
D | vector_ops_FIX_sse4_1.c | 71 acc1 = _mm_add_epi64( acc1, xmm_tempa ); in silk_inner_prod16_aligned_64_sse4_1() 72 acc2 = _mm_add_epi64( acc2, inVec1_76543210 ); in silk_inner_prod16_aligned_64_sse4_1() 75 acc1 = _mm_add_epi64( acc1, acc2 ); in silk_inner_prod16_aligned_64_sse4_1() 79 acc1 = _mm_add_epi64( acc1, acc2 ); in silk_inner_prod16_aligned_64_sse4_1()
|
/external/libaom/libaom/av1/encoder/x86/ |
D | wedge_utils_sse2.c | 74 const __m128i v_sum0_q = _mm_add_epi64(_mm_and_si128(v_sq0_d, v_zext_q), in av1_wedge_sse_from_residuals_sse2() 76 const __m128i v_sum1_q = _mm_add_epi64(_mm_and_si128(v_sq1_d, v_zext_q), in av1_wedge_sse_from_residuals_sse2() 79 v_acc0_q = _mm_add_epi64(v_acc0_q, v_sum0_q); in av1_wedge_sse_from_residuals_sse2() 80 v_acc0_q = _mm_add_epi64(v_acc0_q, v_sum1_q); in av1_wedge_sse_from_residuals_sse2() 86 v_acc0_q = _mm_add_epi64(v_acc0_q, _mm_srli_si128(v_acc0_q, 8)); in av1_wedge_sse_from_residuals_sse2() 166 v_acc0_d = _mm_add_epi64(_mm_unpacklo_epi32(v_acc0_d, v_sign_d), in av1_wedge_sign_from_residuals_sse2() 170 v_acc1_d = _mm_add_epi64(_mm_unpacklo_epi32(v_acc1_d, v_sign_d), in av1_wedge_sign_from_residuals_sse2() 173 v_acc_q = _mm_add_epi64(v_acc0_d, v_acc1_d); in av1_wedge_sign_from_residuals_sse2() 175 v_acc_q = _mm_add_epi64(v_acc_q, _mm_srli_si128(v_acc_q, 8)); in av1_wedge_sign_from_residuals_sse2()
|
D | pickrst_sse4.c | 158 const __m128i rll = _mm_add_epi64(xx_loadu_128(dst), dll); in acc_stat_highbd_sse41() 160 const __m128i rlh = _mm_add_epi64(xx_loadu_128(dst + 2), dlh); in acc_stat_highbd_sse41() 162 const __m128i rhl = _mm_add_epi64(xx_loadu_128(dst + 4), dhl); in acc_stat_highbd_sse41() 164 const __m128i rhh = _mm_add_epi64(xx_loadu_128(dst + 6), dhh); in acc_stat_highbd_sse41() 550 sum64 = _mm_add_epi64(sum64, sum64_0); in av1_lowbd_pixel_proj_error_sse4_1() 551 sum64 = _mm_add_epi64(sum64, sum64_1); in av1_lowbd_pixel_proj_error_sse4_1() 588 sum64 = _mm_add_epi64(sum64, sum64_0); in av1_lowbd_pixel_proj_error_sse4_1() 589 sum64 = _mm_add_epi64(sum64, sum64_1); in av1_lowbd_pixel_proj_error_sse4_1() 617 sum64 = _mm_add_epi64(sum64_0, sum64_1); in av1_lowbd_pixel_proj_error_sse4_1() 703 sum64 = _mm_add_epi64(sum64, sum32l); in av1_highbd_pixel_proj_error_sse4_1() [all …]
|
D | error_intrin_avx2.c | 76 sse_reg128 = _mm_add_epi64(_mm256_castsi256_si128(sse_reg), in av1_block_error_avx2() 79 ssz_reg128 = _mm_add_epi64(_mm256_castsi256_si128(ssz_reg), in av1_block_error_avx2()
|
/external/webrtc/webrtc/modules/video_processing/ |
D | content_analysis_sse2.cc | 52 sad_64 = _mm_add_epi64(sad_64, _mm_sad_epu8(o, p)); in TemporalDiffMetric_SSE2() 55 sum_64 = _mm_add_epi64(sum_64, _mm_sad_epu8(o, z)); in TemporalDiffMetric_SSE2() 70 _mm_add_epi64(sqsum_64, _mm_add_epi64(_mm_unpackhi_epi32(sqsum_32, z), in TemporalDiffMetric_SSE2() 234 _mm_store_si128(&se_128, _mm_add_epi64(_mm_unpackhi_epi32(se_32, z), in ComputeSpatialMetrics_SSE2() 236 _mm_store_si128(&sev_128, _mm_add_epi64(_mm_unpackhi_epi32(sev_32, z), in ComputeSpatialMetrics_SSE2() 238 _mm_store_si128(&seh_128, _mm_add_epi64(_mm_unpackhi_epi32(seh_32, z), in ComputeSpatialMetrics_SSE2() 240 _mm_store_si128(&msa_128, _mm_add_epi64(_mm_unpackhi_epi32(msa_32, z), in ComputeSpatialMetrics_SSE2()
|
/external/lzma/C/ |
D | AesOpt.c | 123 ctr = _mm_add_epi64(ctr, one); m0 = _mm_xor_si128(ctr, t); in AesCtr_Code_Intel() 124 ctr = _mm_add_epi64(ctr, one); m1 = _mm_xor_si128(ctr, t); in AesCtr_Code_Intel() 125 ctr = _mm_add_epi64(ctr, one); m2 = _mm_xor_si128(ctr, t); in AesCtr_Code_Intel() 146 ctr = _mm_add_epi64(ctr, one); in AesCtr_Code_Intel()
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | sum_squares_sse2.c | 86 v_acc_q = _mm_add_epi64(v_acc_q, _mm_and_si128(v_acc_d, v_zext_mask_q)); in vpx_sum_squares_2d_i16_sse2() 87 v_acc_q = _mm_add_epi64(v_acc_q, _mm_srli_epi64(v_acc_d, 32)); in vpx_sum_squares_2d_i16_sse2() 93 v_acc_q = _mm_add_epi64(v_acc_q, _mm_srli_si128(v_acc_q, 8)); in vpx_sum_squares_2d_i16_sse2()
|
/external/libmpeg2/common/x86/ |
D | ideint_spatial_filter_ssse3.c | 145 diffs[0] = _mm_add_epi64(diffs[0], diff); in ideint_spatial_filter_ssse3() 148 diffs[1] = _mm_add_epi64(diffs[1], diff); in ideint_spatial_filter_ssse3() 151 diffs[2] = _mm_add_epi64(diffs[2], diff); in ideint_spatial_filter_ssse3()
|
D | icv_sad_ssse3.c | 139 res_r0 = _mm_add_epi64(res_r0, res_r1); in icv_sad_8x4_ssse3() 140 res_r0 = _mm_add_epi64(res_r0, _mm_srli_si128(res_r0, 8)); in icv_sad_8x4_ssse3()
|
/external/libvpx/libvpx/vp9/encoder/x86/ |
D | vp9_error_avx2.c | 96 sse_128 = _mm_add_epi64(_mm256_castsi256_si128(sse_256), in vp9_block_error_avx2() 99 ssz_128 = _mm_add_epi64(_mm256_castsi256_si128(ssz_256), in vp9_block_error_avx2() 155 sse_128 = _mm_add_epi64(_mm256_castsi256_si128(sse_256), in vp9_block_error_fp_avx2()
|