/external/lzma/C/ |
D | AesOpt.c | 25 m = _mm_xor_si128(m, *data); in AesCbc_Encode_Intel() 26 m = _mm_xor_si128(m, p[2]); in AesCbc_Encode_Intel() 65 m0 = _mm_xor_si128(t, data[0]); in AesCbc_Decode_Intel() 66 m1 = _mm_xor_si128(t, data[1]); in AesCbc_Decode_Intel() 67 m2 = _mm_xor_si128(t, data[2]); in AesCbc_Decode_Intel() 82 t = _mm_xor_si128(m0, iv); iv = data[0]; data[0] = t; in AesCbc_Decode_Intel() 83 t = _mm_xor_si128(m1, iv); iv = data[1]; data[1] = t; in AesCbc_Decode_Intel() 84 t = _mm_xor_si128(m2, iv); iv = data[2]; data[2] = t; in AesCbc_Decode_Intel() 91 __m128i m = _mm_xor_si128(w[2], *data); in AesCbc_Decode_Intel() 103 m = _mm_xor_si128(m, iv); in AesCbc_Decode_Intel() [all …]
|
/external/epid-sdk/ext/ipp/sources/ippcp/ |
D | pcpsms4ecby8cn.h | 85 return _mm_xor_si128(T0, T1); in affine() 91 T = _mm_xor_si128(T, _mm_srli_epi32 (x,19)); in Ltag() 92 T = _mm_xor_si128(T, _mm_slli_epi32 (x,23)); in Ltag() 93 T = _mm_xor_si128(T, _mm_srli_epi32 (x, 9)); in Ltag() 126 T = _mm_xor_si128(T, _mm_srli_epi32 (x,30)); in L() 128 T = _mm_xor_si128(T, _mm_slli_epi32 (x,10)); in L() 129 T = _mm_xor_si128(T, _mm_srli_epi32 (x,22)); in L() 131 T = _mm_xor_si128(T, _mm_slli_epi32 (x,18)); in L() 132 T = _mm_xor_si128(T, _mm_srli_epi32 (x,14)); in L() 134 T = _mm_xor_si128(T, _mm_slli_epi32 (x,24)); in L() [all …]
|
/external/scrypt/lib/crypto/ |
D | crypto_scrypt-sse.c | 77 D[i] = _mm_xor_si128(D[i], S[i]); in blkxor() 99 X1 = _mm_xor_si128(X1, _mm_slli_epi32(T, 7)); in salsa20_8() 100 X1 = _mm_xor_si128(X1, _mm_srli_epi32(T, 25)); in salsa20_8() 102 X2 = _mm_xor_si128(X2, _mm_slli_epi32(T, 9)); in salsa20_8() 103 X2 = _mm_xor_si128(X2, _mm_srli_epi32(T, 23)); in salsa20_8() 105 X3 = _mm_xor_si128(X3, _mm_slli_epi32(T, 13)); in salsa20_8() 106 X3 = _mm_xor_si128(X3, _mm_srli_epi32(T, 19)); in salsa20_8() 108 X0 = _mm_xor_si128(X0, _mm_slli_epi32(T, 18)); in salsa20_8() 109 X0 = _mm_xor_si128(X0, _mm_srli_epi32(T, 14)); in salsa20_8() 118 X3 = _mm_xor_si128(X3, _mm_slli_epi32(T, 7)); in salsa20_8() [all …]
|
/external/python/cpython3/Modules/_blake2/impl/ |
D | blake2b-round.h | 35 : (-(c) == 63) ? _mm_xor_si128(_mm_srli_epi64((x), -(c)), _mm_add_epi64((x), (x))) \ 36 : _mm_xor_si128(_mm_srli_epi64((x), -(c)), _mm_slli_epi64((x), 64-(-(c)))) 38 #define _mm_roti_epi64(r, c) _mm_xor_si128(_mm_srli_epi64( (r), -(c) ),_mm_slli_epi64( (r), 64-(-(c… 50 row4l = _mm_xor_si128(row4l, row1l); \ 51 row4h = _mm_xor_si128(row4h, row1h); \ 59 row2l = _mm_xor_si128(row2l, row3l); \ 60 row2h = _mm_xor_si128(row2h, row3h); \ 69 row4l = _mm_xor_si128(row4l, row1l); \ 70 row4h = _mm_xor_si128(row4h, row1h); \ 78 row2l = _mm_xor_si128(row2l, row3l); \ [all …]
|
D | blake2s-round.h | 34 : _mm_xor_si128(_mm_srli_epi32( (r), -(c) ),_mm_slli_epi32( (r), 32-(-(c)) )) ) 36 #define _mm_roti_epi32(r, c) _mm_xor_si128(_mm_srli_epi32( (r), -(c) ),_mm_slli_epi32( (r), 32-(-(c… 45 row4 = _mm_xor_si128( row4, row1 ); \ 48 row2 = _mm_xor_si128( row2, row3 ); \ 53 row4 = _mm_xor_si128( row4, row1 ); \ 56 row2 = _mm_xor_si128( row2, row3 ); \
|
D | blake2b.c | 304 row4l = _mm_xor_si128( LOADU( &blake2b_IV[4] ), LOADU( &S->t[0] ) ); in blake2b_compress() 305 row4h = _mm_xor_si128( LOADU( &blake2b_IV[6] ), LOADU( &S->f[0] ) ); in blake2b_compress() 318 row1l = _mm_xor_si128( row3l, row1l ); in blake2b_compress() 319 row1h = _mm_xor_si128( row3h, row1h ); in blake2b_compress() 320 STOREU( &S->h[0], _mm_xor_si128( LOADU( &S->h[0] ), row1l ) ); in blake2b_compress() 321 STOREU( &S->h[2], _mm_xor_si128( LOADU( &S->h[2] ), row1h ) ); in blake2b_compress() 322 row2l = _mm_xor_si128( row4l, row2l ); in blake2b_compress() 323 row2h = _mm_xor_si128( row4h, row2h ); in blake2b_compress() 324 STOREU( &S->h[4], _mm_xor_si128( LOADU( &S->h[4] ), row2l ) ); in blake2b_compress() 325 STOREU( &S->h[6], _mm_xor_si128( LOADU( &S->h[6] ), row2h ) ); in blake2b_compress()
|
D | blake2s.c | 287 …row4 = _mm_xor_si128( _mm_setr_epi32( 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19 ), LOADU( &S-… in blake2s_compress() 298 STOREU( &S->h[0], _mm_xor_si128( ff0, _mm_xor_si128( row1, row3 ) ) ); in blake2s_compress() 299 STOREU( &S->h[4], _mm_xor_si128( ff1, _mm_xor_si128( row2, row4 ) ) ); in blake2s_compress()
|
/external/libvpx/libvpx/vp8/encoder/x86/ |
D | vp8_quantize_sse2.c | 66 x0 = _mm_xor_si128(z0, sz0); in vp8_regular_quantize_b_sse2() 67 x1 = _mm_xor_si128(z1, sz1); in vp8_regular_quantize_b_sse2() 101 y0 = _mm_xor_si128(y0, sz0); in vp8_regular_quantize_b_sse2() 102 y1 = _mm_xor_si128(y1, sz1); in vp8_regular_quantize_b_sse2() 163 x0 = _mm_xor_si128(z0, sz0); in vp8_fast_quantize_b_sse2() 164 x1 = _mm_xor_si128(z1, sz1); in vp8_fast_quantize_b_sse2() 177 y0 = _mm_xor_si128(y0, sz0); in vp8_fast_quantize_b_sse2() 178 y1 = _mm_xor_si128(y1, sz1); in vp8_fast_quantize_b_sse2() 202 x0 = _mm_xor_si128(x0, ones); in vp8_fast_quantize_b_sse2() 203 x1 = _mm_xor_si128(x1, ones); in vp8_fast_quantize_b_sse2()
|
D | quantize_sse4.c | 61 x0 = _mm_xor_si128(z0, sz0); in vp8_regular_quantize_b_sse4_1() 62 x1 = _mm_xor_si128(z1, sz1); in vp8_regular_quantize_b_sse4_1() 93 y0 = _mm_xor_si128(y0, sz0); in vp8_regular_quantize_b_sse4_1() 94 y1 = _mm_xor_si128(y1, sz1); in vp8_regular_quantize_b_sse4_1()
|
/external/libvpx/libvpx/vp9/encoder/x86/ |
D | vp9_quantize_sse2.c | 64 qcoeff0 = _mm_xor_si128(coeff0, coeff0_sign); in vp9_quantize_fp_sse2() 65 qcoeff1 = _mm_xor_si128(coeff1, coeff1_sign); in vp9_quantize_fp_sse2() 77 qcoeff0 = _mm_xor_si128(qtmp0, coeff0_sign); in vp9_quantize_fp_sse2() 78 qcoeff1 = _mm_xor_si128(qtmp1, coeff1_sign); in vp9_quantize_fp_sse2() 131 qcoeff0 = _mm_xor_si128(coeff0, coeff0_sign); in vp9_quantize_fp_sse2() 132 qcoeff1 = _mm_xor_si128(coeff1, coeff1_sign); in vp9_quantize_fp_sse2() 146 qcoeff0 = _mm_xor_si128(qtmp0, coeff0_sign); in vp9_quantize_fp_sse2() 147 qcoeff1 = _mm_xor_si128(qtmp1, coeff1_sign); in vp9_quantize_fp_sse2()
|
D | vp9_highbd_block_error_intrin_sse2.c | 36 cmp0 = _mm_xor_si128(_mm_cmpgt_epi32(mm_coeff, max), in vp9_highbd_block_error_sse2() 38 cmp1 = _mm_xor_si128(_mm_cmpgt_epi32(mm_coeff2, max), in vp9_highbd_block_error_sse2() 40 cmp2 = _mm_xor_si128(_mm_cmpgt_epi32(mm_dqcoeff, max), in vp9_highbd_block_error_sse2() 42 cmp3 = _mm_xor_si128(_mm_cmpgt_epi32(mm_dqcoeff2, max), in vp9_highbd_block_error_sse2()
|
D | vp9_dct_ssse3.c | 316 qcoeff0 = _mm_xor_si128(coeff0, coeff0_sign); in vp9_fdct8x8_quant_ssse3() 317 qcoeff1 = _mm_xor_si128(coeff1, coeff1_sign); in vp9_fdct8x8_quant_ssse3() 329 qcoeff0 = _mm_xor_si128(qtmp0, coeff0_sign); in vp9_fdct8x8_quant_ssse3() 330 qcoeff1 = _mm_xor_si128(qtmp1, coeff1_sign); in vp9_fdct8x8_quant_ssse3() 384 qcoeff0 = _mm_xor_si128(coeff0, coeff0_sign); in vp9_fdct8x8_quant_ssse3() 385 qcoeff1 = _mm_xor_si128(coeff1, coeff1_sign); in vp9_fdct8x8_quant_ssse3() 399 qcoeff0 = _mm_xor_si128(qtmp0, coeff0_sign); in vp9_fdct8x8_quant_ssse3() 400 qcoeff1 = _mm_xor_si128(qtmp1, coeff1_sign); in vp9_fdct8x8_quant_ssse3()
|
/external/fec/ |
D | viterbi615_sse2.c | 143 …m0 = _mm_add_epi16(_mm_xor_si128(Branchtab615[0].v[i],sym0v),_mm_xor_si128(Branchtab615[1].v[i],sy… in update_viterbi615_blk_sse2() 144 …m1 = _mm_add_epi16(_mm_xor_si128(Branchtab615[2].v[i],sym2v),_mm_xor_si128(Branchtab615[3].v[i],sy… in update_viterbi615_blk_sse2() 145 …m2 = _mm_add_epi16(_mm_xor_si128(Branchtab615[4].v[i],sym4v),_mm_xor_si128(Branchtab615[5].v[i],sy… in update_viterbi615_blk_sse2()
|
/external/neven/Embedded/common/src/b_BasicEm/ |
D | MathSSE2.c | 44 m_XMM4 = _mm_xor_si128( m_XMM4, m_XMM4 ); in bbs_dotProduct_64SSE2() 45 m_XMM6 = _mm_xor_si128( m_XMM6, m_XMM6 ); in bbs_dotProduct_64SSE2() 46 m_XMM7 = _mm_xor_si128( m_XMM7, m_XMM7 ); in bbs_dotProduct_64SSE2() 155 m_XMM5 = _mm_xor_si128( m_XMM5, m_XMM5 ); in bbs_dotProduct_128SSE2() 156 m_XMM6 = _mm_xor_si128( m_XMM6, m_XMM6 ); in bbs_dotProduct_128SSE2() 258 m_XMM5 = _mm_xor_si128( m_XMM5, m_XMM5 ); in bbs_dotProduct_u128SSE2() 259 m_XMM6 = _mm_xor_si128( m_XMM6, m_XMM6 ); in bbs_dotProduct_u128SSE2()
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | loopfilter_sse2.c | 67 ps1ps0 = _mm_xor_si128(p1p0, t80); /* ^ 0x80 */ \ 68 qs1qs0 = _mm_xor_si128(q1q0, t80); \ 103 qs1qs0 = _mm_xor_si128(qs1qs0, t80); /* ^ 0x80 */ \ 104 ps1ps0 = _mm_xor_si128(ps1ps0, t80); /* ^ 0x80 */ \ 272 hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff); in vpx_lpf_horizontal_16_sse2() 277 mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff); in vpx_lpf_horizontal_16_sse2() 296 __m128i qs1ps1 = _mm_xor_si128(q1p1, t80); in vpx_lpf_horizontal_16_sse2() 297 __m128i qs0ps0 = _mm_xor_si128(q0p0, t80); in vpx_lpf_horizontal_16_sse2() 298 __m128i qs0 = _mm_xor_si128(p0q0, t80); in vpx_lpf_horizontal_16_sse2() 299 __m128i qs1 = _mm_xor_si128(p1q1, t80); in vpx_lpf_horizontal_16_sse2() [all …]
|
D | loopfilter_avx2.c | 63 hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff); in vpx_lpf_horizontal_16_avx2() 68 mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff); in vpx_lpf_horizontal_16_avx2() 89 __m128i qs1ps1 = _mm_xor_si128(q1p1, t80); in vpx_lpf_horizontal_16_avx2() 90 __m128i qs0ps0 = _mm_xor_si128(q0p0, t80); in vpx_lpf_horizontal_16_avx2() 91 __m128i qs0 = _mm_xor_si128(p0q0, t80); in vpx_lpf_horizontal_16_avx2() 92 __m128i qs1 = _mm_xor_si128(p1q1, t80); in vpx_lpf_horizontal_16_avx2() 117 qs0ps0 = _mm_xor_si128(_mm_adds_epi8(qs0ps0, filt), t80); in vpx_lpf_horizontal_16_avx2() 125 qs1ps1 = _mm_xor_si128(_mm_adds_epi8(qs1ps1, filt), t80); in vpx_lpf_horizontal_16_avx2() 435 hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff); in vpx_lpf_horizontal_16_dual_avx2() 440 mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff); in vpx_lpf_horizontal_16_dual_avx2() [all …]
|
/external/libaom/libaom/av1/encoder/x86/ |
D | highbd_block_error_intrin_sse2.c | 36 cmp0 = _mm_xor_si128(_mm_cmpgt_epi32(mm_coeff, max), in av1_highbd_block_error_sse2() 38 cmp1 = _mm_xor_si128(_mm_cmpgt_epi32(mm_coeff2, max), in av1_highbd_block_error_sse2() 40 cmp2 = _mm_xor_si128(_mm_cmpgt_epi32(mm_dqcoeff, max), in av1_highbd_block_error_sse2() 42 cmp3 = _mm_xor_si128(_mm_cmpgt_epi32(mm_dqcoeff2, max), in av1_highbd_block_error_sse2()
|
D | av1_quantize_sse2.c | 86 __m128i qcoeff0 = _mm_xor_si128(coeff0, coeff0_sign); in quantize() 87 __m128i qcoeff1 = _mm_xor_si128(coeff1, coeff1_sign); in quantize() 103 qcoeff0 = _mm_xor_si128(qtmp0, coeff0_sign); in quantize() 104 qcoeff1 = _mm_xor_si128(qtmp1, coeff1_sign); in quantize()
|
/external/jemalloc/test/include/test/ |
D | SFMT-sse2.h | 71 z = _mm_xor_si128(z, x); in mm_recursion() 72 z = _mm_xor_si128(z, v); in mm_recursion() 75 z = _mm_xor_si128(z, x); in mm_recursion() 76 z = _mm_xor_si128(z, y); in mm_recursion()
|
/external/jemalloc_new/test/include/test/ |
D | SFMT-sse2.h | 71 z = _mm_xor_si128(z, x); in mm_recursion() 72 z = _mm_xor_si128(z, v); in mm_recursion() 75 z = _mm_xor_si128(z, x); in mm_recursion() 76 z = _mm_xor_si128(z, y); in mm_recursion()
|
/external/libavc/common/x86/ |
D | ih264_deblk_luma_ssse3.c | 385 _mm_xor_si128(flag1_16x8, _mm_set1_epi8(0xFF))); in ih264_deblk_luma_vert_bs4_ssse3() 389 _mm_xor_si128(flag1_16x8, _mm_set1_epi8(0xFF))); in ih264_deblk_luma_vert_bs4_ssse3() 395 _mm_xor_si128(flag3_16x8, _mm_set1_epi8(0xFF))); in ih264_deblk_luma_vert_bs4_ssse3() 399 _mm_xor_si128(flag4_16x8, _mm_set1_epi8(0xFF))); in ih264_deblk_luma_vert_bs4_ssse3() 405 _mm_xor_si128(flag3_16x8, _mm_set1_epi8(0xFF))); in ih264_deblk_luma_vert_bs4_ssse3() 409 _mm_xor_si128(flag4_16x8, _mm_set1_epi8(0xFF))); in ih264_deblk_luma_vert_bs4_ssse3() 415 _mm_xor_si128(flag3_16x8, _mm_set1_epi8(0xFF))); in ih264_deblk_luma_vert_bs4_ssse3() 419 _mm_xor_si128(flag4_16x8, _mm_set1_epi8(0xFF))); in ih264_deblk_luma_vert_bs4_ssse3() 770 _mm_xor_si128(flag1_16x8, _mm_set1_epi8(0xFF))); in ih264_deblk_luma_horz_bs4_ssse3() 774 _mm_xor_si128(flag1_16x8, _mm_set1_epi8(0xFF))); in ih264_deblk_luma_horz_bs4_ssse3() [all …]
|
D | ih264_deblk_chroma_ssse3.c | 208 _mm_xor_si128(flag1, _mm_set1_epi8(0xFF))); in ih264_deblk_chroma_vert_bs4_ssse3() 213 _mm_xor_si128(flag1, _mm_set1_epi8(0xFF))); in ih264_deblk_chroma_vert_bs4_ssse3() 377 _mm_xor_si128(flag1, _mm_set1_epi8(0xFF))); in ih264_deblk_chroma_horz_bs4_ssse3() 383 _mm_xor_si128(flag1, _mm_set1_epi8(0xFF))); in ih264_deblk_chroma_horz_bs4_ssse3() 461 flag_bs = _mm_xor_si128(flag_bs, _mm_set1_epi8(0xFF)); //Invert for required mask in ih264_deblk_chroma_vert_bslt4_ssse3() 576 _mm_xor_si128(flag1, _mm_set1_epi8(0xFF))); in ih264_deblk_chroma_vert_bslt4_ssse3() 581 _mm_xor_si128(flag1, _mm_set1_epi8(0xFF))); in ih264_deblk_chroma_vert_bslt4_ssse3() 688 flag_bs = _mm_xor_si128(flag_bs, _mm_set1_epi8(0xFF)); //Invert for required mask in ih264_deblk_chroma_horz_bslt4_ssse3() 782 _mm_xor_si128(flag1, _mm_set1_epi8(0xFF))); in ih264_deblk_chroma_horz_bslt4_ssse3() 788 _mm_xor_si128(flag1, _mm_set1_epi8(0xFF))); in ih264_deblk_chroma_horz_bslt4_ssse3() [all …]
|
/external/libjpeg-turbo/simd/x86_64/ |
D | jchuff-sse2.asm | 145 pxor %34, xmm8 ; x1 = _mm_xor_si128(x1, neg); 146 pxor %35, xmm9 ; x1 = _mm_xor_si128(x1, neg); 147 pxor %36, xmm10 ; x1 = _mm_xor_si128(x1, neg); 148 pxor %37, xmm11 ; x1 = _mm_xor_si128(x1, neg); 149 pxor xmm8, %34 ; neg = _mm_xor_si128(neg, x1); 150 pxor xmm9, %35 ; neg = _mm_xor_si128(neg, x1); 151 pxor xmm10, %36 ; neg = _mm_xor_si128(neg, x1); 152 pxor xmm11, %37 ; neg = _mm_xor_si128(neg, x1);
|
/external/webp/src/dsp/ |
D | upsampling_sse41.c | 46 const __m128i tmp2 = _mm_xor_si128(k, (in)); /* (k^in) */ \ 72 const __m128i st = _mm_xor_si128(s, t); /* st = s^t */ \ 74 const __m128i ad = _mm_xor_si128(a, d); /* ad = a^d */ \ 75 const __m128i bc = _mm_xor_si128(b, c); /* bc = b^c */ \
|
/external/flac/libFLAC/ |
D | fixed_intrin_sse2.c | 103 err0 = _mm_xor_si128(err0, tmp); in FLAC__fixed_compute_best_predictor_intrin_sse2() 106 err1 = _mm_xor_si128(err1, tmp); in FLAC__fixed_compute_best_predictor_intrin_sse2() 200 err0 = _mm_xor_si128(err0, tmp); in FLAC__fixed_compute_best_predictor_wide_intrin_sse2() 203 err1 = _mm_xor_si128(err1, tmp); in FLAC__fixed_compute_best_predictor_wide_intrin_sse2()
|