Home
last modified time | relevance | path

Searched refs:xmm0 (Results 1 – 25 of 409) sorted by relevance

12345678910>>...17

/external/llvm/test/MC/X86/
Dx86_64-fma4-encoding.s6 vfmaddss (%rcx), %xmm1, %xmm0, %xmm0
10 vfmaddss %xmm1, (%rcx),%xmm0, %xmm0
14 vfmaddss %xmm2, %xmm1, %xmm0, %xmm0
18 vfmaddsd (%rcx), %xmm1, %xmm0, %xmm0
22 vfmaddsd %xmm1, (%rcx),%xmm0, %xmm0
26 vfmaddsd %xmm2, %xmm1, %xmm0, %xmm0
30 vfmaddsd %xmm10, %xmm1, %xmm0, %xmm0
34 vfmaddps (%rcx), %xmm1, %xmm0, %xmm0
38 vfmaddps %xmm1, (%rcx),%xmm0, %xmm0
42 vfmaddps %xmm2, %xmm1, %xmm0, %xmm0
[all …]
Dshuffle-comments.s3 palignr $8, %xmm0, %xmm1
4 # CHECK: xmm1 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
8 palignr $16, %xmm0, %xmm1
13 palignr $0, %xmm0, %xmm1
14 # CHECK: xmm1 = xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
18 vpalignr $8, %xmm0, %xmm1, %xmm2
19 # CHECK: xmm2 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
23 vpalignr $16, %xmm0, %xmm1, %xmm2
28 vpalignr $0, %xmm0, %xmm1, %xmm2
29 # CHECK: xmm2 = xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
[all …]
/external/valgrind/none/tests/amd64/
Dpcmpxstrx64.stdout.exp3 istri $0x4A: xmm0 55555555555555555555555555555555 rcx 5555555555550006 flags 00000881
4 istri $0x0A: xmm0 55555555555555555555555555555555 rcx 5555555555550000 flags 00000881
5 istrm $0x4A: xmm0 000000000000000000ffffffffffffff rcx 5555555555555555 flags 00000881
6 istrm $0x0A: xmm0 0000000000000000000000000000007f rcx 5555555555555555 flags 00000881
7 estri $0x4A: xmm0 55555555555555555555555555555555 rcx 555555555555000f flags 000008c1
8 estri $0x0A: xmm0 55555555555555555555555555555555 rcx 5555555555550000 flags 000008c1
9 estrm $0x4A: xmm0 ffffffffffffffffffffffffffffffff rcx 5555555555555555 flags 000008c1
10 estrm $0x0A: xmm0 0000000000000000000000000000ffff rcx 5555555555555555 flags 000008c1
13 istri $0x4A: xmm0 55555555555555555555555555555555 rcx 555555555555000f flags 000000c1
14 istri $0x0A: xmm0 55555555555555555555555555555555 rcx 5555555555550007 flags 000000c1
[all …]
Dpcmpxstrx64w.stdout.exp3 istri $0x4B: xmm0 55555555555555555555555555555555 rcx 5555555555550002 flags 00000881
4 istri $0x0B: xmm0 55555555555555555555555555555555 rcx 5555555555550000 flags 00000881
5 istrm $0x4B: xmm0 00000000000000000000ffffffffffff rcx 5555555555555555 flags 00000881
6 istrm $0x0B: xmm0 00000000000000000000000000000007 rcx 5555555555555555 flags 00000881
7 estri $0x4B: xmm0 55555555555555555555555555555555 rcx 5555555555550007 flags 000008c1
8 estri $0x0B: xmm0 55555555555555555555555555555555 rcx 5555555555550000 flags 000008c1
9 estrm $0x4B: xmm0 ffffffffffffffffffffffffffffffff rcx 5555555555555555 flags 000008c1
10 estrm $0x0B: xmm0 000000000000000000000000000000ff rcx 5555555555555555 flags 000008c1
13 istri $0x4B: xmm0 55555555555555555555555555555555 rcx 5555555555550007 flags 000000c1
14 istri $0x0B: xmm0 55555555555555555555555555555555 rcx 5555555555550003 flags 000000c1
[all …]
/external/llvm/test/CodeGen/X86/
Dvector-shuffle-128-v8.ll13 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
18 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
26 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,2,1,0]
31 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,2,1,0]
39 ; SSE2-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
44 ; SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
45 ; SSSE3-NEXT: movdqa %xmm1, %xmm0
50 ; SSE41-NEXT: palignr {{.*#+}} xmm1 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
51 ; SSE41-NEXT: movdqa %xmm1, %xmm0
56 ; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
[all …]
Dvector-zext.ll10 ; SSE2-NEXT: movdqa %xmm0, %xmm1
13 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],x…
20 ; SSSE3-NEXT: movdqa %xmm0, %xmm1
23 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],…
30 ; SSE41-NEXT: movdqa %xmm0, %xmm1
31 ; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
39 ; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],…
40 ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
46 … vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0
56 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
[all …]
Dvec_cast2.ll7 ; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4,4,5,5,6,6,7,7]
8 ; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
9 ; CHECK-NEXT: vpslld $24, %xmm0, %xmm0
10 ; CHECK-NEXT: vpsrad $24, %xmm0, %xmm0
19 …WIDE-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero…
22 ; CHECK-WIDE-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
23 ; CHECK-WIDE-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
24 ; CHECK-WIDE-NEXT: vpslld $24, %xmm0, %xmm0
25 ; CHECK-WIDE-NEXT: vpsrad $24, %xmm0, %xmm0
26 ; CHECK-WIDE-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
[all …]
Dfast-isel-select-sse.ll10 ; CHECK: cmpeqss %xmm1, %xmm0
11 ; CHECK-NEXT: andps %xmm0, %xmm2
12 ; CHECK-NEXT: andnps %xmm3, %xmm0
13 ; CHECK-NEXT: orps %xmm2, %xmm0
15 ; AVX: vcmpeqss %xmm1, %xmm0, %xmm0
16 ; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
24 ; CHECK: cmpeqsd %xmm1, %xmm0
25 ; CHECK-NEXT: andpd %xmm0, %xmm2
26 ; CHECK-NEXT: andnpd %xmm3, %xmm0
27 ; CHECK-NEXT: orpd %xmm2, %xmm0
[all …]
Dvector-shuffle-128-v16.ll14 ; FIXME-NEXT: punpcklbw %xmm0, %xmm0
15 ; FIXME-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,0,0,0,4,5,6,7]
16 ; FIXME-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,1,0,1]
21 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
22 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
23 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
24 ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
30 ; SSSE3-NEXT: pshufb %xmm1, %xmm0
36 ; SSE41-NEXT: pshufb %xmm1, %xmm0
42 ; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
[all …]
Dvector-shuffle-combining.ll57 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
62 ; AVX-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
76 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
81 ; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
95 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
100 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
132 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
137 ; AVX-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
149 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
154 ; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
[all …]
Dvector-shuffle-128-v4.ll14 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,1]
19 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,1]
27 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,0]
32 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,2,0]
40 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,2]
45 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,2]
53 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,0,0]
58 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,3,0,0]
66 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,0,0]
71 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,0,0]
[all …]
Dvector-sext.ll13 ; SSE2-NEXT: movdqa %xmm0, %xmm1
15 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
16 ; SSE2-NEXT: pslld $16, %xmm0
17 ; SSE2-NEXT: psrad $16, %xmm0
25 ; SSSE3-NEXT: movdqa %xmm0, %xmm1
27 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
28 ; SSSE3-NEXT: pslld $16, %xmm0
29 ; SSSE3-NEXT: psrad $16, %xmm0
37 ; SSE41-NEXT: movdqa %xmm0, %xmm1
38 ; SSE41-NEXT: pmovzxwd %xmm1, %xmm0
[all …]
Dbswap-vector.ll17 ; CHECK-NOSSSE3-NEXT: movdqa %xmm0, %xmm2
21 …cklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1…
22 ; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7]
23 ; CHECK-NOSSSE3-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6]
24 ; CHECK-NOSSSE3-NEXT: packuswb %xmm2, %xmm0
29 ; CHECK-SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
34 ; CHECK-AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
39 ; CHECK-WIDE-AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
50 ; CHECK-NOSSSE3-NEXT: movdqa %xmm0, %xmm2
54 …cklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1…
[all …]
Dvector-trunc.ll10 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
11 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
17 ; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
18 ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
24 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
25 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
31 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
32 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
46 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
47 ; SSE-NEXT: movd %xmm0, %rax
[all …]
Dvec_setcc.ll10 ; SSE2: pmaxub %xmm0, %xmm1
11 ; SSE2: pcmpeqb %xmm1, %xmm0
14 ; SSE41: pmaxub %xmm0, %xmm1
15 ; SSE41: pcmpeqb %xmm1, %xmm0
18 ; AVX: vpmaxub %xmm1, %xmm0, %xmm1
19 ; AVX: vpcmpeqb %xmm1, %xmm0, %xmm0
27 ; SSE2: pminub %xmm0, %xmm1
28 ; SSE2: pcmpeqb %xmm1, %xmm0
31 ; SSE41: pminub %xmm0, %xmm1
32 ; SSE41: pcmpeqb %xmm1, %xmm0
[all …]
Dvector-shuffle-128-v2.ll14 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
19 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
24 ; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
32 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
37 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
45 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
50 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
58 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,0,1]
63 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm1[0,1,0,1]
68 ; AVX2-NEXT: vpbroadcastq %xmm1, %xmm0
[all …]
Davx-shift.ll7 ; CHECK-NEXT: vpslld $2, %xmm0, %xmm1
8 ; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
9 ; CHECK-NEXT: vpslld $2, %xmm0, %xmm0
10 ; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
20 ; CHECK-NEXT: vpsllw $2, %xmm0, %xmm1
21 ; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
22 ; CHECK-NEXT: vpsllw $2, %xmm0, %xmm0
23 ; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
32 ; CHECK-NEXT: vpsllq $2, %xmm0, %xmm1
33 ; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
[all …]
Dvector-shuffle-256-v16.ll9 ; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
10 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
15 ; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0
24 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
25 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
26 ; AVX1-NEXT: vpshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,4,4,4]
27 ; AVX1-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,4]
28 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
33 ; AVX2-NEXT: vpbroadcastw %xmm0, %xmm1
34 ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,2,3,0,1]
[all …]
Dvector-blend.ll12 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,3]
13 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
18 ; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,3]
19 ; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
24 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
29 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
39 ; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
40 ; SSE2-NEXT: movaps %xmm1, %xmm0
45 ; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
46 ; SSSE3-NEXT: movaps %xmm1, %xmm0
[all …]
/external/boringssl/src/crypto/aes/asm/
Dvpaes-x86_64.pl71 ## AES-encrypt %xmm0.
74 ## %xmm0 = input
78 ## Output in %xmm0
91 pandn %xmm0, %xmm1
94 pand %xmm9, %xmm0
95 pshufb %xmm0, %xmm2
96 movdqa .Lk_ipt+16(%rip), %xmm0 # ipthi
97 pshufb %xmm1, %xmm0
100 pxor %xmm2, %xmm0
108 movdqa %xmm12, %xmm0 # 0 : sb1t
[all …]
/external/libvpx/libvpx/third_party/libyuv/source/
Dscale_win.cc108 movdqa xmm0, [eax] in ScaleRowDown2_SSE2()
111 psrlw xmm0, 8 // isolate odd pixels. in ScaleRowDown2_SSE2()
113 packuswb xmm0, xmm1 in ScaleRowDown2_SSE2()
115 movdqa [edx], xmm0 in ScaleRowDown2_SSE2()
138 movdqa xmm0, [eax] in ScaleRowDown2Linear_SSE2()
142 movdqa xmm2, xmm0 // average columns (32 to 16 pixels) in ScaleRowDown2Linear_SSE2()
143 psrlw xmm0, 8 in ScaleRowDown2Linear_SSE2()
148 pavgw xmm0, xmm2 in ScaleRowDown2Linear_SSE2()
150 packuswb xmm0, xmm1 in ScaleRowDown2Linear_SSE2()
153 movdqa [edx], xmm0 in ScaleRowDown2Linear_SSE2()
[all …]
Drow_win.cc82 __m128i xmm0, xmm1, xmm2, xmm3; in I422ToARGBRow_SSSE3() local
88 xmm0 = _mm_cvtsi32_si128(*(uint32*)u_buf); in I422ToARGBRow_SSSE3()
90 xmm0 = _mm_unpacklo_epi8(xmm0, xmm1); in I422ToARGBRow_SSSE3()
91 xmm0 = _mm_unpacklo_epi16(xmm0, xmm0); in I422ToARGBRow_SSSE3()
92 xmm1 = _mm_load_si128(&xmm0); in I422ToARGBRow_SSSE3()
93 xmm2 = _mm_load_si128(&xmm0); in I422ToARGBRow_SSSE3()
94 xmm0 = _mm_maddubs_epi16(xmm0, *(__m128i*)kUVToB); in I422ToARGBRow_SSSE3()
97 xmm0 = _mm_sub_epi16(xmm0, *(__m128i*)kUVBiasB); in I422ToARGBRow_SSSE3()
104 xmm0 = _mm_adds_epi16(xmm0, xmm3); in I422ToARGBRow_SSSE3()
107 xmm0 = _mm_srai_epi16(xmm0, 6); in I422ToARGBRow_SSSE3()
[all …]
/external/libyuv/files/source/
Drow_win.cc136 movq xmm0, qword ptr [eax] in I400ToARGBRow_SSE2()
138 punpcklbw xmm0, xmm0 in I400ToARGBRow_SSE2()
139 movdqa xmm1, xmm0 in I400ToARGBRow_SSE2()
140 punpcklwd xmm0, xmm0 in I400ToARGBRow_SSE2()
142 por xmm0, xmm5 in I400ToARGBRow_SSE2()
144 movdqa [edx], xmm0 in I400ToARGBRow_SSE2()
164 movdqa xmm0, [eax] in BGRAToARGBRow_SSSE3()
165 pshufb xmm0, xmm5 in BGRAToARGBRow_SSSE3()
167 movdqa [eax + edx], xmm0 in BGRAToARGBRow_SSSE3()
185 movdqa xmm0, [eax] in ABGRToARGBRow_SSSE3()
[all …]
/external/boringssl/mac-x86_64/crypto/modes/
Dghash-x86_64.S686 movdqa %xmm2,%xmm0
688 movdqa %xmm0,%xmm1
689 pshufd $78,%xmm0,%xmm3
690 pxor %xmm0,%xmm3
694 pxor %xmm0,%xmm3
701 pxor %xmm4,%xmm0
703 movdqa %xmm0,%xmm4
704 movdqa %xmm0,%xmm3
705 psllq $5,%xmm0
706 pxor %xmm0,%xmm3
[all …]
/external/boringssl/linux-x86_64/crypto/modes/
Dghash-x86_64.S687 movdqa %xmm2,%xmm0
689 movdqa %xmm0,%xmm1
690 pshufd $78,%xmm0,%xmm3
691 pxor %xmm0,%xmm3
695 pxor %xmm0,%xmm3
702 pxor %xmm4,%xmm0
704 movdqa %xmm0,%xmm4
705 movdqa %xmm0,%xmm3
706 psllq $5,%xmm0
707 pxor %xmm0,%xmm3
[all …]

12345678910>>...17