/external/llvm/test/MC/X86/ |
D | x86_64-fma4-encoding.s | 6 vfmaddss (%rcx), %xmm1, %xmm0, %xmm0 10 vfmaddss %xmm1, (%rcx),%xmm0, %xmm0 14 vfmaddss %xmm2, %xmm1, %xmm0, %xmm0 18 vfmaddsd (%rcx), %xmm1, %xmm0, %xmm0 22 vfmaddsd %xmm1, (%rcx),%xmm0, %xmm0 26 vfmaddsd %xmm2, %xmm1, %xmm0, %xmm0 30 vfmaddsd %xmm10, %xmm1, %xmm0, %xmm0 34 vfmaddps (%rcx), %xmm1, %xmm0, %xmm0 38 vfmaddps %xmm1, (%rcx),%xmm0, %xmm0 42 vfmaddps %xmm2, %xmm1, %xmm0, %xmm0 [all …]
|
D | shuffle-comments.s | 3 palignr $8, %xmm0, %xmm1 4 # CHECK: xmm1 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] 8 palignr $16, %xmm0, %xmm1 13 palignr $0, %xmm0, %xmm1 14 # CHECK: xmm1 = xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] 18 vpalignr $8, %xmm0, %xmm1, %xmm2 19 # CHECK: xmm2 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] 23 vpalignr $16, %xmm0, %xmm1, %xmm2 28 vpalignr $0, %xmm0, %xmm1, %xmm2 29 # CHECK: xmm2 = xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] [all …]
|
/external/valgrind/none/tests/amd64/ |
D | pcmpxstrx64w.stdout.exp | 3 istri $0x4B: xmm0 55555555555555555555555555555555 rcx 5555555555550002 flags 00000881 4 istri $0x0B: xmm0 55555555555555555555555555555555 rcx 5555555555550000 flags 00000881 5 istrm $0x4B: xmm0 00000000000000000000ffffffffffff rcx 5555555555555555 flags 00000881 6 istrm $0x0B: xmm0 00000000000000000000000000000007 rcx 5555555555555555 flags 00000881 7 estri $0x4B: xmm0 55555555555555555555555555555555 rcx 5555555555550007 flags 000008c1 8 estri $0x0B: xmm0 55555555555555555555555555555555 rcx 5555555555550000 flags 000008c1 9 estrm $0x4B: xmm0 ffffffffffffffffffffffffffffffff rcx 5555555555555555 flags 000008c1 10 estrm $0x0B: xmm0 000000000000000000000000000000ff rcx 5555555555555555 flags 000008c1 13 istri $0x4B: xmm0 55555555555555555555555555555555 rcx 5555555555550007 flags 000000c1 14 istri $0x0B: xmm0 55555555555555555555555555555555 rcx 5555555555550003 flags 000000c1 [all …]
|
D | pcmpxstrx64.stdout.exp | 3 istri $0x4A: xmm0 55555555555555555555555555555555 rcx 5555555555550006 flags 00000881 4 istri $0x0A: xmm0 55555555555555555555555555555555 rcx 5555555555550000 flags 00000881 5 istrm $0x4A: xmm0 000000000000000000ffffffffffffff rcx 5555555555555555 flags 00000881 6 istrm $0x0A: xmm0 0000000000000000000000000000007f rcx 5555555555555555 flags 00000881 7 estri $0x4A: xmm0 55555555555555555555555555555555 rcx 555555555555000f flags 000008c1 8 estri $0x0A: xmm0 55555555555555555555555555555555 rcx 5555555555550000 flags 000008c1 9 estrm $0x4A: xmm0 ffffffffffffffffffffffffffffffff rcx 5555555555555555 flags 000008c1 10 estrm $0x0A: xmm0 0000000000000000000000000000ffff rcx 5555555555555555 flags 000008c1 13 istri $0x4A: xmm0 55555555555555555555555555555555 rcx 555555555555000f flags 000000c1 14 istri $0x0A: xmm0 55555555555555555555555555555555 rcx 5555555555550007 flags 000000c1 [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | vector-tzcnt-128.ll | 17 ; SSE2-NEXT: movd %xmm0, %rax 22 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] 23 ; SSE2-NEXT: movd %xmm0, %rax 26 ; SSE2-NEXT: movd %rax, %xmm0 27 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] 28 ; SSE2-NEXT: movdqa %xmm1, %xmm0 33 ; SSE3-NEXT: movd %xmm0, %rax 38 ; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] 39 ; SSE3-NEXT: movd %xmm0, %rax 42 ; SSE3-NEXT: movd %rax, %xmm0 [all …]
|
D | vector-zext.ll | 13 …cklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1… 19 …cklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1… 24 …XT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,x… 29 …T: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,x… 41 ; SSE2-NEXT: movdqa %xmm0, %xmm1 43 …cklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2… 49 ; SSSE3-NEXT: movdqa %xmm0, %xmm1 51 …cklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2… 57 ; SSE41-NEXT: movdqa %xmm0, %xmm1 59 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[… [all …]
|
D | widen_conv-4.ll | 14 ; X86-SSE2-NEXT: movdqa %xmm0, %xmm2 17 ; X86-SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[… 18 ; X86-SSE2-NEXT: cvtdq2ps %xmm0, %xmm0 19 ; X86-SSE2-NEXT: movups %xmm0, (%eax) 21 ; X86-SSE2-NEXT: movaps %xmm2, %xmm0 22 ; X86-SSE2-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0] 23 ; X86-SSE2-NEXT: movss %xmm0, 24(%eax) 32 ; X86-SSE42-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero 33 ; X86-SSE42-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0… 34 ; X86-SSE42-NEXT: cvtdq2ps %xmm0, %xmm0 [all …]
|
D | vec_int_to_fp.ll | 17 ; SSE-NEXT: movd %xmm0, %rax 19 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] 20 ; SSE-NEXT: movd %xmm0, %rax 21 ; SSE-NEXT: xorps %xmm0, %xmm0 22 ; SSE-NEXT: cvtsi2sdq %rax, %xmm0 23 ; SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0] 24 ; SSE-NEXT: movapd %xmm1, %xmm0 29 ; AVX-NEXT: vpextrq $1, %xmm0, %rax 30 ; AVX-NEXT: vcvtsi2sdq %rax, %xmm0, %xmm1 31 ; AVX-NEXT: vmovq %xmm0, %rax [all …]
|
D | vector-shuffle-128-v8.ll | 14 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] 19 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] 27 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,2,1,0] 32 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,2,1,0] 40 ; SSE2-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0] 45 ; SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] 46 ; SSSE3-NEXT: movdqa %xmm1, %xmm0 51 ; SSE41-NEXT: palignr {{.*#+}} xmm1 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] 52 ; SSE41-NEXT: movdqa %xmm1, %xmm0 57 ; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] [all …]
|
D | vector-shift-ashr-128.ll | 27 ; SSE2-NEXT: movdqa %xmm0, %xmm2 29 ; SSE2-NEXT: psrlq %xmm1, %xmm0 30 ; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1] 33 ; SSE2-NEXT: movdqa %xmm2, %xmm0 44 ; SSE41-NEXT: movdqa %xmm0, %xmm3 46 ; SSE41-NEXT: psrlq %xmm4, %xmm0 47 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7] 48 ; SSE41-NEXT: pxor %xmm2, %xmm0 49 ; SSE41-NEXT: psubq %xmm2, %xmm0 59 ; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm1 [all …]
|
D | fast-isel-select-sse.ll | 10 ; CHECK: cmpeqss %xmm1, %xmm0 11 ; CHECK-NEXT: andps %xmm0, %xmm2 12 ; CHECK-NEXT: andnps %xmm3, %xmm0 13 ; CHECK-NEXT: orps %xmm2, %xmm0 15 ; AVX: vcmpeqss %xmm1, %xmm0, %xmm0 16 ; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 24 ; CHECK: cmpeqsd %xmm1, %xmm0 25 ; CHECK-NEXT: andpd %xmm0, %xmm2 26 ; CHECK-NEXT: andnpd %xmm3, %xmm0 27 ; CHECK-NEXT: orpd %xmm2, %xmm0 [all …]
|
D | vector-shuffle-128-v4.ll | 15 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,1] 20 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,1] 28 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,0] 33 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,2,0] 41 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,2] 46 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,2] 54 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,0,0] 59 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,3,0,0] 67 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,0,0] 72 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,0,0] [all …]
|
D | vector-popcnt-128.ll | 12 ; SSE2-NEXT: movdqa %xmm0, %xmm1 15 ; SSE2-NEXT: psubq %xmm1, %xmm0 17 ; SSE2-NEXT: movdqa %xmm0, %xmm2 19 ; SSE2-NEXT: psrlq $2, %xmm0 20 ; SSE2-NEXT: pand %xmm1, %xmm0 21 ; SSE2-NEXT: paddq %xmm2, %xmm0 22 ; SSE2-NEXT: movdqa %xmm0, %xmm1 24 ; SSE2-NEXT: paddq %xmm0, %xmm1 26 ; SSE2-NEXT: pxor %xmm0, %xmm0 27 ; SSE2-NEXT: psadbw %xmm0, %xmm1 [all …]
|
D | vector-shuffle-128-v16.ll | 14 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] 15 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] 16 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] 22 ; SSSE3-NEXT: pshufb %xmm1, %xmm0 28 ; SSE41-NEXT: pshufb %xmm1, %xmm0 34 ; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0 39 ; AVX2-NEXT: vpbroadcastb %xmm0, %xmm0 48 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] 49 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,1,1,4,5,6,7] 50 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] [all …]
|
D | vector-lzcnt-128.ll | 17 ; SSE2-NEXT: movd %xmm0, %rax 23 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] 24 ; SSE2-NEXT: movd %xmm0, %rax 28 ; SSE2-NEXT: movd %rax, %xmm0 29 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] 30 ; SSE2-NEXT: movdqa %xmm1, %xmm0 35 ; SSE3-NEXT: movd %xmm0, %rax 41 ; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] 42 ; SSE3-NEXT: movd %xmm0, %rax 46 ; SSE3-NEXT: movd %rax, %xmm0 [all …]
|
D | widen_conv-3.ll | 13 ; X86-SSE2-NEXT: psllq $48, %xmm0 14 ; X86-SSE2-NEXT: psrad $16, %xmm0 15 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] 16 ; X86-SSE2-NEXT: cvtdq2ps %xmm0, %xmm0 17 ; X86-SSE2-NEXT: movss %xmm0, (%eax) 18 ; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3] 19 ; X86-SSE2-NEXT: movss %xmm0, 4(%eax) 25 ; X86-SSE42-NEXT: psllq $48, %xmm0 26 ; X86-SSE42-NEXT: psrad $16, %xmm0 27 ; X86-SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] [all …]
|
D | vec_cast2.ll | 8 ; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4,4,5,5,6,6,7,7] 9 ; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero 10 ; CHECK-NEXT: vpslld $24, %xmm0, %xmm0 11 ; CHECK-NEXT: vpsrad $24, %xmm0, %xmm0 20 ; CHECK-WIDE-NEXT: vpmovsxbd %xmm0, %xmm1 21 ; CHECK-WIDE-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] 22 ; CHECK-WIDE-NEXT: vpmovsxbd %xmm0, %xmm0 23 ; CHECK-WIDE-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 33 ; CHECK-NEXT: vpslld $24, %xmm0, %xmm0 34 ; CHECK-NEXT: vpsrad $24, %xmm0, %xmm0 [all …]
|
D | vec_cmp_uint-128.ll | 19 ; SSE2-NEXT: pcmpeqd %xmm1, %xmm0 20 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2] 21 ; SSE2-NEXT: pand %xmm1, %xmm0 26 ; SSE41-NEXT: pcmpeqq %xmm1, %xmm0 31 ; SSE42-NEXT: pcmpeqq %xmm1, %xmm0 36 ; AVX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 41 ; XOP-NEXT: vpcomeqq %xmm1, %xmm0, %xmm0 51 ; SSE-NEXT: pcmpeqd %xmm1, %xmm0 56 ; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 61 ; XOP-NEXT: vpcomeqd %xmm1, %xmm0, %xmm0 [all …]
|
D | vector-shuffle-combining.ll | 58 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4] 63 ; AVX-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4] 77 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7] 82 ; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7] 96 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] 101 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] 106 ; AVX2-NEXT: vbroadcastss %xmm0, %xmm0 138 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4] 143 ; AVX-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4] 155 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7] [all …]
|
D | vector-shift-shl-128.ll | 21 ; SSE2-NEXT: movdqa %xmm0, %xmm2 23 ; SSE2-NEXT: psllq %xmm1, %xmm0 24 ; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1] 25 ; SSE2-NEXT: movapd %xmm2, %xmm0 30 ; SSE41-NEXT: movdqa %xmm0, %xmm2 33 ; SSE41-NEXT: psllq %xmm1, %xmm0 34 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7] 39 ; AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm2 41 ; AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm0 42 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7] [all …]
|
D | vector-shift-lshr-128.ll | 21 ; SSE2-NEXT: movdqa %xmm0, %xmm2 23 ; SSE2-NEXT: psrlq %xmm1, %xmm0 24 ; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1] 25 ; SSE2-NEXT: movapd %xmm2, %xmm0 30 ; SSE41-NEXT: movdqa %xmm0, %xmm2 33 ; SSE41-NEXT: psrlq %xmm1, %xmm0 34 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7] 39 ; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm2 41 ; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 42 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7] [all …]
|
D | vector-trunc.ll | 13 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 14 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] 23 ; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 24 ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] 33 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 34 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] 48 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 49 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] 76 ; SSE2-NEXT: pextrw $4, %xmm0, %ecx 77 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],x… [all …]
|
D | vec_cmp_sint-128.ll | 19 ; SSE2-NEXT: pcmpeqd %xmm1, %xmm0 20 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2] 21 ; SSE2-NEXT: pand %xmm1, %xmm0 26 ; SSE41-NEXT: pcmpeqq %xmm1, %xmm0 31 ; SSE42-NEXT: pcmpeqq %xmm1, %xmm0 36 ; AVX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 41 ; XOP-NEXT: vpcomeqq %xmm1, %xmm0, %xmm0 51 ; SSE-NEXT: pcmpeqd %xmm1, %xmm0 56 ; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 61 ; XOP-NEXT: vpcomeqd %xmm1, %xmm0, %xmm0 [all …]
|
D | promote-vec3.ll | 12 ; SSE3-NEXT: pxor %xmm0, %xmm0 19 ; SSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],x… 30 ; SSE41-NEXT: pxor %xmm0, %xmm0 31 ; SSE41-NEXT: pinsrb $0, {{[0-9]+}}(%esp), %xmm0 32 ; SSE41-NEXT: pinsrb $4, {{[0-9]+}}(%esp), %xmm0 33 ; SSE41-NEXT: pinsrb $8, {{[0-9]+}}(%esp), %xmm0 34 ; SSE41-NEXT: movd %xmm0, %eax 35 ; SSE41-NEXT: pextrw $2, %xmm0, %edx 36 ; SSE41-NEXT: pextrw $4, %xmm0, %ecx 44 ; AVX_ANY-NEXT: vpxor %xmm0, %xmm0, %xmm0 [all …]
|
/external/boringssl/src/crypto/aes/asm/ |
D | vpaes-x86_64.pl | 72 ## AES-encrypt %xmm0. 75 ## %xmm0 = input 79 ## Output in %xmm0 92 pandn %xmm0, %xmm1 95 pand %xmm9, %xmm0 96 pshufb %xmm0, %xmm2 97 movdqa .Lk_ipt+16(%rip), %xmm0 # ipthi 98 pshufb %xmm1, %xmm0 101 pxor %xmm2, %xmm0 109 movdqa %xmm12, %xmm0 # 0 : sb1t [all …]
|