/external/boringssl/src/crypto/hrss/asm/ |
D | poly_rq_mul.S | 335 vmovdqa const3(%rip), %ymm3 label 348 vmovdqa %ymm0, 0(%rax) label 349 vmovdqa %ymm1, 96(%rax) label 351 vmovdqa %ymm14, 192(%rax) label 352 vmovdqa %ymm2, 288(%rax) label 353 vmovdqa %ymm12, 384(%rax) label 355 vmovdqa %ymm14, 480(%rax) label 357 vmovdqa %ymm14, 576(%rax) label 359 vmovdqa %ymm15, 672(%rax) label 361 vmovdqa %ymm14, 768(%rax) label [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | shuffle-strided-with-offset-512.ll | 10 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 11 ; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1 16 ; AVX512F-NEXT: vmovdqa %ymm0, (%rsi) 22 ; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0 23 ; AVX512VL-NEXT: vmovdqa 32(%rdi), %ymm1 26 ; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,5,7] 28 ; AVX512VL-NEXT: vmovdqa %ymm2, (%rsi) 40 ; AVX512BW-NEXT: vmovdqa %ymm0, (%rsi) 50 ; AVX512BWVL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,5,7] 52 ; AVX512BWVL-NEXT: vmovdqa %ymm2, (%rsi) [all …]
|
D | shuffle-strided-with-offset-256.ll | 13 ; AVX1-NEXT: vmovdqa (%rdi), %ymm0 15 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u> 19 ; AVX1-NEXT: vmovdqa %xmm0, (%rsi) 25 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 27 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u> 31 ; AVX2-NEXT: vmovdqa %xmm0, (%rsi) 37 ; AVX512-NEXT: vmovdqa (%rdi), %ymm0 39 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = <1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u> 43 ; AVX512-NEXT: vmovdqa %xmm0, (%rsi) 55 ; AVX1-NEXT: vmovdqa (%rdi), %ymm0 [all …]
|
D | shuffle-vs-trunc-512.ll | 16 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 17 ; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1 22 ; AVX512F-NEXT: vmovdqa %ymm0, (%rsi) 28 ; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0 29 ; AVX512VL-NEXT: vmovdqa 32(%rdi), %ymm1 32 ; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,5,7] 34 ; AVX512VL-NEXT: vmovdqa %ymm2, (%rsi) 46 ; AVX512BW-NEXT: vmovdqa %ymm0, (%rsi) 56 ; AVX512BWVL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,5,7] 58 ; AVX512BWVL-NEXT: vmovdqa %ymm2, (%rsi) [all …]
|
D | shuffle-vs-trunc-256.ll | 18 ; AVX1-NEXT: vmovdqa (%rdi), %ymm0 20 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> 24 ; AVX1-NEXT: vmovdqa %xmm0, (%rsi) 30 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 32 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> 36 ; AVX2-NEXT: vmovdqa %xmm0, (%rsi) 42 ; AVX512-NEXT: vmovdqa (%rdi), %ymm0 44 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> 48 ; AVX512-NEXT: vmovdqa %xmm0, (%rsi) 60 ; AVX1-NEXT: vmovdqa (%rdi), %ymm0 [all …]
|
D | shuffle-vs-trunc-128.ll | 35 ; AVX-NEXT: vmovdqa (%rdi), %xmm0 42 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0 49 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0 56 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0 63 ; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0 90 ; AVX-NEXT: vmovdqa (%rdi), %xmm0 97 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0 104 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0 111 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0 118 ; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0 [all …]
|
D | vector-popcnt-256.ll | 13 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] 15 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] 35 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,1… 37 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,… 61 ; BITALG_NOVLX-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15… 63 ; BITALG_NOVLX-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,… 75 ; BITALG-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15… 77 ; BITALG-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,… 94 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] 96 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] [all …]
|
D | vector-tzcnt-256.ll | 23 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] 25 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] 52 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,1… 54 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,… 70 ; AVX512CDVL-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,1… 72 ; AVX512CDVL-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,… 88 ; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,… 90 ; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,… 127 ; BITALG_NOVLX-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15… 129 ; BITALG_NOVLX-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,… [all …]
|
D | vector-lzcnt-256.ll | 16 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] 18 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] 70 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,1… 72 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,… 100 ; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,… 102 ; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,… 130 ; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15… 132 ; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,… 172 ; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,1… 174 ; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0… [all …]
|
D | shuffle-strided-with-offset-128.ll | 42 ; AVX-NEXT: vmovdqa (%rdi), %xmm0 49 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0 56 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0 63 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0 98 ; AVX-NEXT: vmovdqa (%rdi), %xmm0 105 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0 118 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0 202 ; AVX-NEXT: vmovdqa (%rdi), %xmm0 209 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0 216 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0 [all …]
|
/external/boringssl/linux-x86_64/crypto/cipher_extra/ |
D | aes128gcmsiv-x86_64.S | 81 vmovdqa (%rsi),%xmm0 82 vmovdqa %xmm0,%xmm1 83 vmovdqa %xmm0,(%rdi) 85 vmovdqa %xmm0,16(%rdi) 87 vmovdqa %xmm0,32(%rdi) 89 vmovdqa %xmm0,48(%rdi) 91 vmovdqa %xmm0,64(%rdi) 93 vmovdqa %xmm0,80(%rdi) 95 vmovdqa %xmm0,96(%rdi) 97 vmovdqa %xmm0,112(%rdi) [all …]
|
D | chacha20_poly1305_x86_64.S | 4115 vmovdqa .chacha20_consts(%rip),%ymm0 4125 vmovdqa %ymm4,64(%rbp) 4126 vmovdqa %ymm8,96(%rbp) 4127 vmovdqa %ymm12,160(%rbp) 4179 vmovdqa %ymm3,0(%rbp) 4246 vmovdqa .chacha20_consts(%rip),%ymm0 4247 vmovdqa 64(%rbp),%ymm4 4248 vmovdqa 96(%rbp),%ymm8 4249 vmovdqa %ymm0,%ymm1 4250 vmovdqa %ymm4,%ymm5 [all …]
|
/external/boringssl/mac-x86_64/crypto/cipher_extra/ |
D | aes128gcmsiv-x86_64.S | 81 vmovdqa (%rsi),%xmm0 82 vmovdqa %xmm0,%xmm1 83 vmovdqa %xmm0,(%rdi) 85 vmovdqa %xmm0,16(%rdi) 87 vmovdqa %xmm0,32(%rdi) 89 vmovdqa %xmm0,48(%rdi) 91 vmovdqa %xmm0,64(%rdi) 93 vmovdqa %xmm0,80(%rdi) 95 vmovdqa %xmm0,96(%rdi) 97 vmovdqa %xmm0,112(%rdi) [all …]
|
D | chacha20_poly1305_x86_64.S | 4114 vmovdqa .chacha20_consts(%rip),%ymm0 4124 vmovdqa %ymm4,64(%rbp) 4125 vmovdqa %ymm8,96(%rbp) 4126 vmovdqa %ymm12,160(%rbp) 4178 vmovdqa %ymm3,0(%rbp) 4245 vmovdqa .chacha20_consts(%rip),%ymm0 4246 vmovdqa 64(%rbp),%ymm4 4247 vmovdqa 96(%rbp),%ymm8 4248 vmovdqa %ymm0,%ymm1 4249 vmovdqa %ymm4,%ymm5 [all …]
|
/external/boringssl/win-x86_64/crypto/cipher_extra/ |
D | aes128gcmsiv-x86_64.asm | 88 vmovdqa xmm0,XMMWORD[rsi] 89 vmovdqa xmm1,xmm0 90 vmovdqa XMMWORD[rdi],xmm0 92 vmovdqa XMMWORD[16+rdi],xmm0 94 vmovdqa XMMWORD[32+rdi],xmm0 96 vmovdqa XMMWORD[48+rdi],xmm0 98 vmovdqa XMMWORD[64+rdi],xmm0 100 vmovdqa XMMWORD[80+rdi],xmm0 102 vmovdqa XMMWORD[96+rdi],xmm0 104 vmovdqa XMMWORD[112+rdi],xmm0 [all …]
|
/external/libjpeg-turbo/simd/x86_64/ |
D | jccolext-avx2.asm | 135 vmovdqa ymmF, ymmA 141 vmovdqa ymmB, ymmA 169 vmovdqa ymmG, ymmA 185 vmovdqa ymmD, ymmA 201 vmovdqa ymmE, ymmA 219 vmovdqa ymmC, ymmA 223 vmovdqa ymmB, ymmE 227 vmovdqa ymmF, ymmD 249 vmovdqa xmmF, xmmA 257 vmovdqa ymmF, ymmA [all …]
|
D | jcgryext-avx2.asm | 127 vmovdqa ymmF, ymmA 133 vmovdqa ymmB, ymmA 161 vmovdqa ymmG, ymmA 177 vmovdqa ymmD, ymmA 193 vmovdqa ymmE, ymmA 211 vmovdqa ymmC, ymmA 215 vmovdqa ymmB, ymmE 219 vmovdqa ymmF, ymmD 241 vmovdqa xmmF, xmmA 249 vmovdqa ymmF, ymmA [all …]
|
/external/libjpeg-turbo/simd/i386/ |
D | jccolext-avx2.asm | 149 vmovdqa ymmF, ymmA 155 vmovdqa ymmB, ymmA 184 vmovdqa ymmG, ymmA 200 vmovdqa ymmD, ymmA 216 vmovdqa ymmE, ymmA 234 vmovdqa ymmC, ymmA 238 vmovdqa ymmB, ymmE 242 vmovdqa ymmF, ymmD 264 vmovdqa xmmF, xmmA 272 vmovdqa ymmF, ymmA [all …]
|
D | jcgryext-avx2.asm | 141 vmovdqa ymmF, ymmA 147 vmovdqa ymmB, ymmA 176 vmovdqa ymmG, ymmA 192 vmovdqa ymmD, ymmA 208 vmovdqa ymmE, ymmA 226 vmovdqa ymmC, ymmA 230 vmovdqa ymmB, ymmE 234 vmovdqa ymmF, ymmD 256 vmovdqa xmmF, xmmA 264 vmovdqa ymmF, ymmA [all …]
|
/external/boringssl/linux-x86_64/crypto/chacha/ |
D | chacha-x86_64.S | 1057 vmovdqa %ymm8,128-256(%rcx) 1059 vmovdqa %ymm9,160-256(%rcx) 1061 vmovdqa %ymm10,192-256(%rcx) 1062 vmovdqa %ymm11,224-256(%rcx) 1066 vmovdqa %ymm0,256-256(%rcx) 1068 vmovdqa %ymm1,288-256(%rcx) 1070 vmovdqa %ymm2,320-256(%rcx) 1071 vmovdqa %ymm3,352-256(%rcx) 1075 vmovdqa %ymm12,384-512(%rax) 1077 vmovdqa %ymm13,416-512(%rax) [all …]
|
/external/boringssl/mac-x86_64/crypto/chacha/ |
D | chacha-x86_64.S | 1050 vmovdqa %ymm8,128-256(%rcx) 1052 vmovdqa %ymm9,160-256(%rcx) 1054 vmovdqa %ymm10,192-256(%rcx) 1055 vmovdqa %ymm11,224-256(%rcx) 1059 vmovdqa %ymm0,256-256(%rcx) 1061 vmovdqa %ymm1,288-256(%rcx) 1063 vmovdqa %ymm2,320-256(%rcx) 1064 vmovdqa %ymm3,352-256(%rcx) 1068 vmovdqa %ymm12,384-512(%rax) 1070 vmovdqa %ymm13,416-512(%rax) [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | vector-tzcnt-256.ll | 17 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1] 19 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] 21 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] 46 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,1… 48 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,… 81 ; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,… 83 ; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,… 105 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1] 107 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] 109 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] [all …]
|
D | avx-cvt-2.ll | 15 ; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] 19 ; CHECK-NEXT: vmovdqa %xmm0, (%rdi) 32 ; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] 36 ; CHECK-NEXT: vmovdqa %xmm0, (%rdi) 49 ; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] 67 ; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
|
/external/boringssl/win-x86_64/crypto/chacha/ |
D | chacha-x86_64.asm | 1139 vmovdqa YMMWORD[(128-256)+rcx],ymm8 1141 vmovdqa YMMWORD[(160-256)+rcx],ymm9 1143 vmovdqa YMMWORD[(192-256)+rcx],ymm10 1144 vmovdqa YMMWORD[(224-256)+rcx],ymm11 1148 vmovdqa YMMWORD[(256-256)+rcx],ymm0 1150 vmovdqa YMMWORD[(288-256)+rcx],ymm1 1152 vmovdqa YMMWORD[(320-256)+rcx],ymm2 1153 vmovdqa YMMWORD[(352-256)+rcx],ymm3 1157 vmovdqa YMMWORD[(384-512)+rax],ymm12 1159 vmovdqa YMMWORD[(416-512)+rax],ymm13 [all …]
|
/external/boringssl/src/crypto/fipsmodule/sha/asm/ |
D | sha1-586.pl | 1122 &vmovdqa(@X[3],&QWP(0,$tmp1)); # K_00_19 1123 &vmovdqa(@X[4],&QWP(16,$tmp1)); # K_20_39 1124 &vmovdqa(@X[5],&QWP(32,$tmp1)); # K_40_59 1125 &vmovdqa(@X[6],&QWP(48,$tmp1)); # K_60_79 1126 &vmovdqa(@X[2],&QWP(64,$tmp1)); # pbswap mask 1157 &vmovdqa(&QWP(112+0,"esp"),@X[4]); # copy constants 1158 &vmovdqa(&QWP(112+16,"esp"),@X[5]); 1159 &vmovdqa(&QWP(112+32,"esp"),@X[6]); 1161 &vmovdqa(&QWP(112+48,"esp"),@X[3]); 1163 &vmovdqa(&QWP(112+64,"esp"),@X[2]); [all …]
|