/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | vector-trunc-packus.ll | 27 ; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2] 29 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3] 31 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm6[1,1,3,3] 40 ; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2] 42 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] 44 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3] 53 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm1[0,0,2,2] 55 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] 57 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] 64 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] [all …]
|
D | vector-trunc-ssat.ll | 27 ; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2] 29 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3] 31 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm6[1,1,3,3] 40 ; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2] 42 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] 44 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3] 55 ; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2] 57 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] 59 ; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3] 67 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,0,2,2] [all …]
|
D | vec_compare.ll | 48 ; CHECK: pshufd $177 59 ; CHECK: pshufd $177 80 ; CHECK: pshufd $160 82 ; CHECK: pshufd $245 84 ; CHECK: pshufd $245 97 ; CHECK: pshufd $160 99 ; CHECK: pshufd $245 101 ; CHECK: pshufd $245 114 ; CHECK: pshufd $160 116 ; CHECK: pshufd $245 [all …]
|
D | vector-trunc-usat.ll | 26 ; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2] 28 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] 30 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3] 35 ; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2] 37 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] 39 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] 58 ; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2] 60 ; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] 62 ; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3] 67 ; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2] [all …]
|
D | combine-rotates.ll | 12 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] 14 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3] 15 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] 17 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3] 19 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 20 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] 104 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] 106 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3] 107 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] 109 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3] [all …]
|
D | vector-reduce-umin.ll | 16 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 23 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] 25 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] 27 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3] 38 ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] 45 ; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] 47 ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] 49 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3] 95 ; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2] 97 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3] [all …]
|
D | vector-reduce-smax.ll | 16 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 23 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] 25 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] 27 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3] 38 ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] 45 ; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] 47 ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] 49 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3] 92 ; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2] 94 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3] [all …]
|
D | vector-reduce-smin.ll | 16 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 23 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] 25 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] 27 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3] 38 ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] 45 ; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] 47 ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] 49 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3] 92 ; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2] 94 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3] [all …]
|
D | vector-rem.ll | 7 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3] 9 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3] 14 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] 16 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] 27 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] 29 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3] 45 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3] 47 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3] 52 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] 54 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] [all …]
|
D | vec_shift6.ll | 46 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] 48 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] 50 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 120 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] 122 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 123 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3] 125 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] 128 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] 129 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] 131 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] [all …]
|
D | vector-reduce-umax.ll | 16 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 23 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] 25 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] 27 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3] 38 ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] 45 ; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] 47 ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] 49 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3] 95 ; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2] 97 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3] [all …]
|
D | combine-shl.ll | 78 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] 80 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 81 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] 83 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] 112 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] 114 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 115 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] 117 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] 174 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] 176 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] [all …]
|
D | vec_compare-sse4.ll | 14 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2] 16 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] 18 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3] 29 ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2] 31 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] 33 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3] 50 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2]
|
D | i64-to-float.ll | 12 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 25 ; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 43 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 56 ; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 154 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] 156 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] 158 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3] 167 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2] 169 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3] 171 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3] [all …]
|
D | mulvi32.ll | 42 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 43 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] 45 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,1,3] 50 ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 51 ; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] 74 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] 76 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 77 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] 79 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] 99 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] [all …]
|
D | combine-multiplies.ll | 81 ; pshufd $245, %xmm0, %xmm3 # xmm3 = xmm0[1,1,3,3] 83 ; pshufd $232, %xmm0, %xmm0 # xmm0 = xmm0[0,2,2,3] 85 ; pshufd $232, %xmm3, %xmm2 # xmm2 = xmm3[0,2,2,3] 111 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] 113 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 115 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3] 145 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] 147 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 148 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] 150 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
|
/external/llvm/test/CodeGen/X86/ |
D | vec_compare.ll | 48 ; CHECK: pshufd $177 59 ; CHECK: pshufd $177 80 ; CHECK: pshufd $160 82 ; CHECK: pshufd $245 84 ; CHECK: pshufd $245 97 ; CHECK: pshufd $160 99 ; CHECK: pshufd $245 101 ; CHECK: pshufd $245 114 ; CHECK: pshufd $160 116 ; CHECK: pshufd $245 [all …]
|
D | lower-bitcast.ll | 13 ; pshufd+paddq+pshufd. This is fixed with the widening legalization. 17 ; CHECK: pshufd 19 ; CHECK-NEXT: pshufd 53 ; CHECK-NOT: pshufd 55 ; CHECK-NOT: pshufd 59 ; CHECK-WIDE-NOT: pshufd 61 ; CHECK-WIDE-NOT: pshufd 71 ; FIXME: At the moment we still produce the sequence pshufd+paddd+pshufd. 76 ; CHECK: pshufd 78 ; CHECK-NEXT: pshufd [all …]
|
D | vector-rem.ll | 7 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3] 9 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3] 14 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,2,3] 16 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,2,3] 27 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] 29 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] 46 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3] 48 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3] 53 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,2,3] 55 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,2,3] [all …]
|
D | vec_compare-sse4.ll | 14 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2] 16 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] 18 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3] 29 ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2] 31 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] 33 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3] 50 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2]
|
D | vector-idiv.ll | 34 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] 36 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,3,2,3] 37 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3] 39 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] 50 ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] 51 ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] 54 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
/external/swiftshader/third_party/subzero/tests_lit/llvm2ice_tests/ |
D | randomize-regalloc.ll | 29 ; OPTM1_1-NEXT: pshufd xmm6,XMMWORD PTR [esp+0x20],0x31 30 ; OPTM1_1-NEXT: pshufd xmm2,XMMWORD PTR [esp+0x10],0x31 34 ; OPTM1_1-NEXT: pshufd xmm0,xmm0,0xd8 42 ; CHECK_1-NEXT: pshufd xmm0,xmm0,0x31 43 ; CHECK_1-NEXT: pshufd xmm5,xmm1,0x31 47 ; CHECK_1-NEXT: pshufd xmm7,xmm7,0xd8 56 ; OPTM1_123-NEXT: pshufd xmm6,XMMWORD PTR [esp+0x20],0x31 57 ; OPTM1_123-NEXT: pshufd xmm2,XMMWORD PTR [esp+0x10],0x31 61 ; OPTM1_123-NEXT: pshufd xmm0,xmm0,0xd8 69 ; CHECK_123-NEXT: pshufd xmm0,xmm0,0x31 [all …]
|
D | nop-insertion.ll | 39 ; PROB50: pshufd $49, 32(%esp), %xmm1 41 ; PROB50: pshufd $49, 16(%esp), %xmm2 48 ; PROB50: pshufd $216, %xmm0, %xmm0 65 ; PROB90: pshufd $49, 32(%esp), %xmm1 67 ; PROB90: pshufd $49, 16(%esp), %xmm2 74 ; PROB90: pshufd $216, %xmm0, %xmm0 96 ; MAXNOPS2: pshufd $49, 32(%esp), %xmm1 98 ; MAXNOPS2: pshufd $49, 16(%esp), %xmm2 105 ; MAXNOPS2: pshufd $216, %xmm0, %xmm0 124 ; SANDBOX50: pshufd $49, 32(%esp), %xmm1 [all …]
|
/external/libvpx/libvpx/vp8/common/x86/ |
D | idctllm_sse2.asm | 153 pshufd xmm0, xmm0, 11011000b 154 pshufd xmm1, xmm4, 11011000b 160 pshufd xmm2, xmm2, 11011000b 161 pshufd xmm3, xmm4, 11011000b 227 pshufd xmm0, xmm2, 11011000b 228 pshufd xmm2, xmm1, 11011000b 230 pshufd xmm1, xmm5, 11011000b 231 pshufd xmm3, xmm7, 11011000b 305 pshufd xmm0, xmm2, 11011000b 306 pshufd xmm2, xmm1, 11011000b [all …]
|
/external/boringssl/src/crypto/chacha/asm/ |
D | chacha-x86.pl | 463 &pshufd ("xmm0","xmm3",0x00); 464 &pshufd ("xmm1","xmm3",0x55); 465 &pshufd ("xmm2","xmm3",0xaa); 466 &pshufd ("xmm3","xmm3",0xff); 468 &pshufd ("xmm4","xmm7",0x00); 469 &pshufd ("xmm5","xmm7",0x55); 471 &pshufd ("xmm6","xmm7",0xaa); 472 &pshufd ("xmm7","xmm7",0xff); 485 &pshufd ("xmm0","xmm3",0x00); 486 &pshufd ("xmm1","xmm3",0x55); [all …]
|