/external/libhevc/common/x86/ |
D | ihevc_itrans_recon_ssse3_intr.c | 191 m_temp_reg_20 = _mm_slli_epi32(m_temp_reg_1, 6); in ihevc_itrans_recon_4x4_ttype1_ssse3() 192 m_temp_reg_21 = _mm_slli_epi32(m_temp_reg_1, 3); in ihevc_itrans_recon_4x4_ttype1_ssse3() 193 m_temp_reg_22 = _mm_slli_epi32(m_temp_reg_1, 1); in ihevc_itrans_recon_4x4_ttype1_ssse3() 222 m_temp_reg_20 = _mm_slli_epi32(m_temp_reg_10, 5); in ihevc_itrans_recon_4x4_ttype1_ssse3() 223 m_temp_reg_21 = _mm_slli_epi32(m_temp_reg_10, 1); in ihevc_itrans_recon_4x4_ttype1_ssse3() 228 m_temp_reg_20 = _mm_slli_epi32(m_temp_reg_11, 6); in ihevc_itrans_recon_4x4_ttype1_ssse3() 229 m_temp_reg_21 = _mm_slli_epi32(m_temp_reg_11, 3); in ihevc_itrans_recon_4x4_ttype1_ssse3() 236 m_temp_reg_20 = _mm_slli_epi32(m_temp_reg_11, 5); in ihevc_itrans_recon_4x4_ttype1_ssse3() 237 m_temp_reg_21 = _mm_slli_epi32(m_temp_reg_11, 1); in ihevc_itrans_recon_4x4_ttype1_ssse3() 242 m_temp_reg_20 = _mm_slli_epi32(m_temp_reg_12, 6); in ihevc_itrans_recon_4x4_ttype1_ssse3() [all …]
|
D | ihevc_deblk_ssse3_intr.c | 435 const2_8x16b = _mm_slli_epi32(const2_8x16b, 3); in ihevc_deblk_luma_vert_ssse3() 871 const2_8x16b = _mm_slli_epi32(const2_8x16b, 3); in ihevc_deblk_luma_horz_ssse3() 935 src_q2_8x16b = _mm_slli_epi32(src_q2_8x16b, 31); in ihevc_deblk_luma_horz_ssse3() 936 src_p2_8x16b = _mm_slli_epi32(src_p2_8x16b, 31); in ihevc_deblk_luma_horz_ssse3()
|
/external/epid-sdk/ext/ipp/sources/ippcp/ |
D | pcpsms4ecby8cn.h | 90 __m128i T = _mm_slli_epi32(x, 13); in Ltag() 92 T = _mm_xor_si128(T, _mm_slli_epi32 (x,23)); in Ltag() 125 __m128i T = _mm_slli_epi32(x, 2); in L() 128 T = _mm_xor_si128(T, _mm_slli_epi32 (x,10)); in L() 131 T = _mm_xor_si128(T, _mm_slli_epi32 (x,18)); in L() 134 T = _mm_xor_si128(T, _mm_slli_epi32 (x,24)); in L()
|
/external/libavc/common/x86/ |
D | ih264_ihadamard_scaling_sse42.c | 181 src_r0 = _mm_slli_epi32(src_r0, u4_qp_div_6 - 6); in ih264_ihadamard_scaling_4x4_sse42() 182 src_r1 = _mm_slli_epi32(src_r1, u4_qp_div_6 - 6); in ih264_ihadamard_scaling_4x4_sse42() 183 src_r2 = _mm_slli_epi32(src_r2, u4_qp_div_6 - 6); in ih264_ihadamard_scaling_4x4_sse42() 184 src_r3 = _mm_slli_epi32(src_r3, u4_qp_div_6 - 6); in ih264_ihadamard_scaling_4x4_sse42() 237 temp0 = _mm_slli_epi32(temp0, u4_qp_div_6); in ih264_ihadamard_scaling_2x2_uv_sse42() 238 temp1 = _mm_slli_epi32(temp1, u4_qp_div_6); in ih264_ihadamard_scaling_2x2_uv_sse42()
|
D | ih264_iquant_itrans_recon_ssse3.c | 156 resq_r0 = _mm_slli_epi32(temp4, u4_qp_div_6 - 4); in ih264_iquant_itrans_recon_4x4_ssse3() 157 resq_r1 = _mm_slli_epi32(temp5, u4_qp_div_6 - 4); in ih264_iquant_itrans_recon_4x4_ssse3() 158 resq_r2 = _mm_slli_epi32(temp6, u4_qp_div_6 - 4); in ih264_iquant_itrans_recon_4x4_ssse3() 159 resq_r3 = _mm_slli_epi32(temp7, u4_qp_div_6 - 4); in ih264_iquant_itrans_recon_4x4_ssse3() 425 resq_r0_1 = _mm_slli_epi32(temp5, qp_div - 6); in ih264_iquant_itrans_recon_8x8_ssse3() 426 resq_r0_2 = _mm_slli_epi32(temp7, qp_div - 6); in ih264_iquant_itrans_recon_8x8_ssse3() 446 resq_r1_1 = _mm_slli_epi32(temp5, qp_div - 6); in ih264_iquant_itrans_recon_8x8_ssse3() 447 resq_r1_2 = _mm_slli_epi32(temp7, qp_div - 6); in ih264_iquant_itrans_recon_8x8_ssse3() 467 resq_r2_1 = _mm_slli_epi32(temp5, qp_div - 6); in ih264_iquant_itrans_recon_8x8_ssse3() 468 resq_r2_2 = _mm_slli_epi32(temp7, qp_div - 6); in ih264_iquant_itrans_recon_8x8_ssse3() [all …]
|
D | ih264_ihadamard_scaling_ssse3.c | 188 src_r0 = _mm_slli_epi32(src_r0, u4_qp_div_6 - 6); in ih264_ihadamard_scaling_4x4_ssse3() 189 src_r1 = _mm_slli_epi32(src_r1, u4_qp_div_6 - 6); in ih264_ihadamard_scaling_4x4_ssse3() 190 src_r2 = _mm_slli_epi32(src_r2, u4_qp_div_6 - 6); in ih264_ihadamard_scaling_4x4_ssse3() 191 src_r3 = _mm_slli_epi32(src_r3, u4_qp_div_6 - 6); in ih264_ihadamard_scaling_4x4_ssse3()
|
D | ih264_iquant_itrans_recon_sse42.c | 155 resq_r0 = _mm_slli_epi32(temp4, u4_qp_div_6 - 4); in ih264_iquant_itrans_recon_4x4_sse42() 156 resq_r1 = _mm_slli_epi32(temp5, u4_qp_div_6 - 4); in ih264_iquant_itrans_recon_4x4_sse42() 157 resq_r2 = _mm_slli_epi32(temp6, u4_qp_div_6 - 4); in ih264_iquant_itrans_recon_4x4_sse42() 158 resq_r3 = _mm_slli_epi32(temp7, u4_qp_div_6 - 4); in ih264_iquant_itrans_recon_4x4_sse42() 406 resq_r0 = _mm_slli_epi32(temp4, u4_qp_div_6 - 4); in ih264_iquant_itrans_recon_chroma_4x4_sse42() 407 resq_r1 = _mm_slli_epi32(temp5, u4_qp_div_6 - 4); in ih264_iquant_itrans_recon_chroma_4x4_sse42() 408 resq_r2 = _mm_slli_epi32(temp6, u4_qp_div_6 - 4); in ih264_iquant_itrans_recon_chroma_4x4_sse42() 409 resq_r3 = _mm_slli_epi32(temp7, u4_qp_div_6 - 4); in ih264_iquant_itrans_recon_chroma_4x4_sse42()
|
/external/scrypt/lib/crypto/ |
D | crypto_scrypt-sse.c | 99 X1 = _mm_xor_si128(X1, _mm_slli_epi32(T, 7)); in salsa20_8() 102 X2 = _mm_xor_si128(X2, _mm_slli_epi32(T, 9)); in salsa20_8() 105 X3 = _mm_xor_si128(X3, _mm_slli_epi32(T, 13)); in salsa20_8() 108 X0 = _mm_xor_si128(X0, _mm_slli_epi32(T, 18)); in salsa20_8() 118 X3 = _mm_xor_si128(X3, _mm_slli_epi32(T, 7)); in salsa20_8() 121 X2 = _mm_xor_si128(X2, _mm_slli_epi32(T, 9)); in salsa20_8() 124 X1 = _mm_xor_si128(X1, _mm_slli_epi32(T, 13)); in salsa20_8() 127 X0 = _mm_xor_si128(X0, _mm_slli_epi32(T, 18)); in salsa20_8()
|
/external/libaom/libaom/av1/common/x86/ |
D | av1_txfm_sse4.h | 40 output[i] = _mm_slli_epi32(input[i], -bit); in av1_round_shift_array_32_sse4_1() 61 const __m128i r0 = _mm_slli_epi32(input[i], -bit); in av1_round_shift_rect_array_32_sse4_1()
|
D | selfguided_sse4.c | 274 return _mm_sub_epi32(_mm_slli_epi32(_mm_add_epi32(fours, threes), 2), threes); in cross_sum() 413 _mm_add_epi32(_mm_slli_epi32(fives_plus_sixes, 2), fives_plus_sixes), in cross_sum_fast_even_row() 443 _mm_add_epi32(_mm_slli_epi32(fives_plus_sixes, 2), fives_plus_sixes), in cross_sum_fast_odd_row() 620 __m128i v_0 = _mm_slli_epi32(u_0, SGRPROJ_PRJ_BITS); in apply_selfguided_restoration_sse4_1() 621 __m128i v_1 = _mm_slli_epi32(u_1, SGRPROJ_PRJ_BITS); in apply_selfguided_restoration_sse4_1()
|
/external/python/cpython3/Modules/_blake2/impl/ |
D | blake2s-round.h | 34 : _mm_xor_si128(_mm_srli_epi32( (r), -(c) ),_mm_slli_epi32( (r), 32-(-(c)) )) ) 36 #define _mm_roti_epi32(r, c) _mm_xor_si128(_mm_srli_epi32( (r), -(c) ),_mm_slli_epi32( (r), 32-(-(c…
|
/external/libopus/silk/x86/ |
D | VQ_WMat_EC_sse4_1.c | 69 C_tmp2 = _mm_slli_epi32( C_tmp2, 7 ); in silk_VQ_WMat_EC_sse4_1() 97 C_tmp5 = _mm_slli_epi32( C_tmp5, 1 ); in silk_VQ_WMat_EC_sse4_1()
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | post_proc_sse2.c | 102 mask_0 = _mm_slli_epi32(sumsq_0, 4); in vpx_mbpost_proc_down_sse2() 104 mask_1 = _mm_slli_epi32(sumsq_1, 4); in vpx_mbpost_proc_down_sse2()
|
/external/eigen/Eigen/src/Core/arch/SSE/ |
D | MathFunctions.h | 168 emm0 = _mm_slli_epi32(emm0, 23); 240 emm0 = _mm_slli_epi32(emm0, 20); 305 emm0 = _mm_slli_epi32(emm0, 29); 400 emm0 = _mm_slli_epi32(emm0, 29);
|
/external/libaom/libaom/av1/encoder/x86/ |
D | highbd_fwd_txfm_sse4.c | 52 in[0] = _mm_slli_epi32(in[0], shift); in load_buffer_4x4() 53 in[1] = _mm_slli_epi32(in[1], shift); in load_buffer_4x4() 54 in[2] = _mm_slli_epi32(in[2], shift); in load_buffer_4x4() 55 in[3] = _mm_slli_epi32(in[3], shift); in load_buffer_4x4() 391 in[0] = _mm_slli_epi32(in[0], shift); in load_buffer_8x8() 392 in[1] = _mm_slli_epi32(in[1], shift); in load_buffer_8x8() 393 in[2] = _mm_slli_epi32(in[2], shift); in load_buffer_8x8() 394 in[3] = _mm_slli_epi32(in[3], shift); in load_buffer_8x8() 395 in[4] = _mm_slli_epi32(in[4], shift); in load_buffer_8x8() 396 in[5] = _mm_slli_epi32(in[5], shift); in load_buffer_8x8() [all …]
|
D | av1_highbd_quantize_sse4.c | 41 const __m128i abs_s = _mm_slli_epi32(*coeff, 1 + scale); in quantize_coeff_phase1()
|
/external/jemalloc/test/include/test/ |
D | SFMT-sse2.h | 70 v = _mm_slli_epi32(d, SL1); in mm_recursion()
|
/external/jemalloc_new/test/include/test/ |
D | SFMT-sse2.h | 70 v = _mm_slli_epi32(d, SL1); in mm_recursion()
|
/external/eigen/Eigen/src/Core/arch/AVX/ |
D | MathFunctions.h | 26 __m128i lo = _mm_slli_epi32(_mm256_extractf128_si256(v, 0), n); in pshiftleft() 27 __m128i hi = _mm_slli_epi32(_mm256_extractf128_si256(v, 1), n); in pshiftleft()
|
/external/libopus/silk/fixed/x86/ |
D | burg_modified_FIX_sse4_1.c | 227 xmm_x_ptr_n_k_x2x0 = _mm_slli_epi32( xmm_x_ptr_n_k_x2x0, -rshifts - 1 ); in silk_burg_modified_sse4_1() 228 xmm_x_ptr_sub_x2x0 = _mm_slli_epi32( xmm_x_ptr_sub_x2x0, -rshifts - 1 ); in silk_burg_modified_sse4_1()
|
/external/mesa3d/src/gallium/drivers/llvmpipe/ |
D | lp_rast_tri.c | 283 rej4 = _mm_slli_epi32(rej4, 2); in lp_rast_triangle_32_3_16() 339 cx = _mm_add_epi32(cx, _mm_slli_epi32(dcdx, 2)); in lp_rast_triangle_32_3_16() 342 c = _mm_add_epi32(c, _mm_slli_epi32(dcdy, 2)); in lp_rast_triangle_32_3_16()
|
/external/skqp/src/opts/ |
D | SkBlitRow_opts.h | 20 __m128i s = _mm_or_si128(_mm_slli_epi32(scale, 16), scale); in SkPMSrcOver_SSE2()
|
/external/skia/src/opts/ |
D | SkBlitRow_opts.h | 20 __m128i s = _mm_or_si128(_mm_slli_epi32(scale, 16), scale); in SkPMSrcOver_SSE2()
|
/external/skia/include/private/ |
D | SkNx_sse.h | 263 AI SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); } 333 AI SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); } 380 AI SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); } 716 __m128i x = _mm_srai_epi32(_mm_slli_epi32(src.fVec, 16), 16);
|
/external/skqp/include/private/ |
D | SkNx_sse.h | 263 AI SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); } 333 AI SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); } 380 AI SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); } 716 __m128i x = _mm_srai_epi32(_mm_slli_epi32(src.fVec, 16), 16);
|