/external/boringssl/linux-x86/crypto/fipsmodule/ |
D | sha512-586.S | 48 movq 40(%esi),%mm5 60 movq %mm5,40(%esp) 83 pxor %mm6,%mm5 86 pand %mm4,%mm5 92 pxor %mm6,%mm5 97 paddq %mm5,%mm7 107 movq %mm0,%mm5 108 psrlq $28,%mm5 111 movq %mm5,%mm7 114 psrlq $6,%mm5 [all …]
|
/external/boringssl/mac-x86/crypto/fipsmodule/ |
D | sha512-586.S | 47 movq 40(%esi),%mm5 59 movq %mm5,40(%esp) 82 pxor %mm6,%mm5 85 pand %mm4,%mm5 91 pxor %mm6,%mm5 96 paddq %mm5,%mm7 106 movq %mm0,%mm5 107 psrlq $28,%mm5 110 movq %mm5,%mm7 113 psrlq $6,%mm5 [all …]
|
/external/boringssl/win-x86/crypto/fipsmodule/ |
D | sha512-586.asm | 61 movq mm5,[40+esi] 73 movq [40+esp],mm5 96 pxor mm5,mm6 99 pand mm5,mm4 105 pxor mm5,mm6 110 paddq mm7,mm5 120 movq mm5,mm0 121 psrlq mm5,28 124 movq mm7,mm5 127 psrlq mm5,6 [all …]
|
/external/libjpeg-turbo/simd/loongson/ |
D | jdcolext-mmi.c | 43 #define mmB mm5 57 #define mmD mm5 71 #define mmF mm5 85 #define mmH mm5 98 __m64 mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7; in jsimd_ycc_rgb_convert_mmi() local 111 mm5 = _mm_load_si64((__m64 *)inptr1); in jsimd_ycc_rgb_convert_mmi() 122 mm4 = _mm_and_si64(mm4, mm5); /* mm4=Cb(0246)=CbE */ in jsimd_ycc_rgb_convert_mmi() 123 mm5 = _mm_srli_pi16(mm5, BYTE_BIT); /* mm5=Cb(1357)=CbO */ in jsimd_ycc_rgb_convert_mmi() 127 mm5 = _mm_add_pi16(mm5, mm7); in jsimd_ycc_rgb_convert_mmi() 143 mm3 = mm5; /* mm3 = CbO */ in jsimd_ycc_rgb_convert_mmi() [all …]
|
D | jccolext-mmi.c | 44 #define mmB mm5 58 #define mmD mm5 72 #define mmF mm5 86 #define mmH mm5 99 __m64 mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7; in jsimd_rgb_ycc_convert_mmi() local 330 wk[3] = mm5; in jsimd_rgb_ycc_convert_mmi() 345 mm1 = _mm_loadlo_pi16_f(mm5); in jsimd_rgb_ycc_convert_mmi() 346 mm6 = _mm_loadhi_pi16_f(mm5); in jsimd_rgb_ycc_convert_mmi() 350 mm5 = PD_ONEHALFM1_CJ; in jsimd_rgb_ycc_convert_mmi() 353 mm7 = _mm_add_pi32(mm7, mm5); in jsimd_rgb_ycc_convert_mmi() [all …]
|
D | jdsample-mmi.c | 61 mm5 = mm7; \ 63 mm5 = _mm_srli_si64(mm5, (SIZEOF_MMWORD - 2) * BYTE_BIT); /* mm5=( 3 - - -) */ \ 67 mm5 = _mm_or_si64(mm5, mm6); /* mm5=( 3 4 5 6) */ \ 84 mm5 = _mm_add_pi16(mm5, PW_EIGHT); \ 89 mm5 = _mm_add_pi16(mm5, mm3); \ 91 mm5 = _mm_srli_pi16(mm5, 4); /* mm5=OutrHE=( 8 10 12 14) */ \ 100 mm5 = _mm_or_si64(mm5, mm2); /* mm5=OutrH=( 8 9 10 11 12 13 14 15) */ \ 103 _mm_store_si64((__m64 *)outptr##r + 1, mm5); \ 114 __m64 mm0, mm1, mm2, mm3 = 0.0, mm4, mm5, mm6, mm7 = 0.0; in jsimd_h2v2_fancy_upsample_mmi() local 155 mm5 = mm1; in jsimd_h2v2_fancy_upsample_mmi() [all …]
|
D | jquanti-mmi.c | 59 mm5 = mm1; \ 68 mm1 = _mm_add_pi16(mm1, mm5); /* (MSB=1), so we always need to add the */ \ 79 mm5 = mm1; \ 89 mm7 = _mm_and_si64(mm7, mm5); \ 94 mm5 = _mm_srai_pi16(mm5, (WORD_BIT - 1)); /* negative input */ \ 97 mm5 = _mm_and_si64(mm5, scale1); \ 99 mm1 = _mm_add_pi16(mm1, mm5); \ 119 __m64 mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7; in jsimd_quantize_mmi() local
|
D | jcsample-mmi.c | 45 __m64 mm0, mm1, mm2, mm3, mm4, mm5, mm6 = 0.0, mm7; in jsimd_h2v2_downsample_mmi() local 71 mm5 = mm1; in jsimd_h2v2_downsample_mmi() 75 mm5 = _mm_srli_pi16(mm5, BYTE_BIT); in jsimd_h2v2_downsample_mmi() 77 mm1 = _mm_add_pi16(mm1, mm5); in jsimd_h2v2_downsample_mmi() 80 mm5 = mm3; in jsimd_h2v2_downsample_mmi() 84 mm5 = _mm_srli_pi16(mm5, BYTE_BIT); in jsimd_h2v2_downsample_mmi() 86 mm3 = _mm_add_pi16(mm3, mm5); in jsimd_h2v2_downsample_mmi()
|
/external/libvpx/libvpx/vp8/common/x86/ |
D | idctllm_mmx.asm | 71 movq mm5, mm1 74 pmulhw mm5, [GLOBAL(x_s1sqr2)]; 75 paddw mm5, mm1 ; ip1 * sin(pi/8) * sqrt(2) 81 psubw mm7, mm5 ; c1 83 movq mm5, mm1 86 pmulhw mm5, [GLOBAL(x_c1sqr2less1)] 87 paddw mm5, mm1 92 paddw mm3, mm5 ; d1 113 movq mm5, mm2 ; 13 03 12 02 119 punpckhdq mm5, mm4 ; 33 23 13 03 [all …]
|
D | subpixel_mmx.asm | 60 movq mm5, mm4 ; mm5 = p-2..p5 61 punpckhbw mm4, mm0 ; mm5 = p2..p5 62 pmullw mm4, mm7 ; mm5 *= kernel 4 modifiers 63 paddsw mm3, mm4 ; mm3 += mm5 65 movq mm4, mm5 ; mm4 = p-2..p5; 66 psrlq mm5, 16 ; mm5 = p0..p5; 67 punpcklbw mm5, mm0 ; mm5 = p0..p3 68 pmullw mm5, mm2 ; mm5 *= kernel 2 modifiers 69 paddsw mm3, mm5 ; mm3 += mm5 71 movq mm5, mm4 ; mm5 = p-2..p5 [all …]
|
D | dequantize_mmx.asm | 101 movq mm5, mm1 104 pmulhw mm5, [GLOBAL(x_s1sqr2)]; 105 paddw mm5, mm1 ; ip1 * sin(pi/8) * sqrt(2) 111 psubw mm7, mm5 ; c1 113 movq mm5, mm1 116 pmulhw mm5, [GLOBAL(x_c1sqr2less1)] 117 paddw mm5, mm1 122 paddw mm3, mm5 ; d1 143 movq mm5, mm2 ; 13 03 12 02 149 punpckhdq mm5, mm4 ; 33 23 13 03 [all …]
|
/external/libjpeg-turbo/simd/i386/ |
D | jdcolext-mmx.asm | 102 movq mm5, MMWORD [ebx] ; mm5=Cb(01234567) 111 pand mm4, mm5 ; mm4=Cb(0246)=CbE 112 psrlw mm5, BYTE_BIT ; mm5=Cb(1357)=CbO 117 paddw mm5, mm7 132 movq mm3, mm5 ; mm3=CbO 134 paddw mm5, mm5 ; mm5=2*CbO 141 pmulhw mm5, [GOTOFF(eax,PW_MF0228)] ; mm5=(2*CbO * -FIX(0.22800)) 146 paddw mm5, [GOTOFF(eax,PW_ONE)] 148 psraw mm5, 1 ; mm5=(CbO * -FIX(0.22800)) 155 paddw mm5, mm3 [all …]
|
D | jccolext-mmx.asm | 268 ; mm1=(R1 R3 R5 R7)=RO, mm3=(G1 G3 G5 G7)=GO, mm5=(B1 B3 B5 B7)=BO 283 movq MMWORD [wk(3)], mm5 ; wk(3)=BO 300 punpcklwd mm1, mm5 ; mm1=BOL 301 punpckhwd mm6, mm5 ; mm6=BOH 305 movq mm5, [GOTOFF(eax,PD_ONEHALFM1_CJ)] ; mm5=[PD_ONEHALFM1_CJ] 309 paddd mm7, mm5 310 paddd mm4, mm5 320 movq mm5, mm0 324 pmaddwd mm5, [GOTOFF(eax,PW_MF016_MF033)] ; mm5=REL*-FIX(0.168)+GEL*-FIX(0.331) 339 paddd mm5, mm0 [all …]
|
D | jquant-3dn.asm | 75 punpcklwd mm5, mm0 ; mm5=(***4***5) 82 psrad mm5, (DWORD_BIT-BYTE_BIT) ; mm5=(45) 84 pi2fd mm5, mm5 89 movq MMWORD [MMBLOCK(0,2,edi,SIZEOF_FAST_FLOAT)], mm5 178 movq mm5, mm2 180 punpckhwd mm5, mm3 ; mm5=(05 07 ** **) 183 punpcklwd mm2, mm5 ; mm2=(04 05 06 07) 199 movq mm5, mm6 201 punpckhwd mm5, mm1 ; mm5=(11 13 ** **) 206 punpcklwd mm6, mm5 ; mm6=(10 11 12 13)
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | mmx-fold-zero.ll | 13 ; X86-NEXT: movq 16(%ebp), %mm5 14 ; X86-NEXT: movq %mm5, (%esp) # 8-byte Spill 16 ; X86-NEXT: paddd %mm5, %mm3 24 ; X86-NEXT: movq %mm5, %mm1 26 ; X86-NEXT: movq 32(%ebp), %mm5 28 ; X86-NEXT: pmuludq %mm5, %mm7 30 ; X86-NEXT: paddw %mm7, %mm5 31 ; X86-NEXT: paddw %mm5, %mm2 39 ; X86-NEXT: paddw %mm5, %mm0 51 ; X64-NEXT: movdq2q %xmm1, %mm5 [all …]
|
D | stack-folding-mmx.ll | 15 …%1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm1},~{mm1},~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},… 24 …%1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm1},~{mm1},~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},… 65 …%1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm1},~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"… 74 …%1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm1},~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"… 83 …%1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm1},~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"… 92 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"() 101 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"() 110 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"() 119 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"() 128 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"() [all …]
|
D | stack-folding-3dnow.ll | 6 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"() 15 …%1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm1},~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"… 24 …%1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm1},~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"… 33 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"() 42 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"() 51 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"() 60 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"() 69 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"() 78 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"() 87 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"() [all …]
|
D | 2008-02-26-AsmDirectMemOp.ll | 15 … $2, %mm5 \0A\09pxor %mm7, %mm7 \0A\09packssdw %mm5, %mm5 \0…
|
/external/mesa3d/src/mesa/x86-64/ |
D | xform4.S | 261 movq 8(%rdx), %mm5 /* x3 | x2 */ 264 movq %mm5, %mm6 /* x3 | x2 */ 268 pfmul %mm2, %mm5 /* x3*m32 | x2*m22 */ 271 pfacc %mm7, %mm5 /* x3 | x2*m22+x3*m32 */ 277 movq %mm5, 8(%rdi) /* write r2, r3 */ 327 movq 8(%rdx), %mm5 /* x3 | x2 */ 330 movq %mm5, %mm6 /* x3 | x2 */ 333 punpckldq %mm5, %mm5 /* x2 | x2 */ 335 pfmul %mm2, %mm5 /* x2*m21 | x2*m20 */ 339 pfadd %mm4, %mm5 /* x1*m11+x2*m21 | x0*m00+x2*m20 */ [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | stack-folding-mmx.ll | 15 …%1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm1},~{mm1},~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},… 24 …%1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm1},~{mm1},~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},… 65 …%1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm1},~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"… 74 …%1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm1},~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"… 83 …%1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm1},~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"… 92 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"() 101 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"() 110 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"() 119 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"() 128 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"() [all …]
|
D | stack-folding-3dnow.ll | 6 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"() 15 …%1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm1},~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"… 24 …%1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm1},~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"… 33 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"() 42 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"() 51 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"() 60 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"() 69 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"() 78 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"() 87 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"() [all …]
|
D | 2008-02-26-AsmDirectMemOp.ll | 15 … $2, %mm5 \0A\09pxor %mm7, %mm7 \0A\09packssdw %mm5, %mm5 \0…
|
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
D | 2008-02-26-AsmDirectMemOp.ll | 15 … $2, %mm5 \0A\09pxor %mm7, %mm7 \0A\09packssdw %mm5, %mm5 \0…
|
/external/mesa3d/src/mesa/x86/ |
D | read_rgba_span_x86.S | 253 movq 8(%esp), %mm5 257 movq %mm5, %mm6 258 movq %mm5, %mm7 273 pand %mm1, %mm5 279 por %mm6, %mm5 282 movq %mm5, 8(%ecx) 512 movq (%esp), %mm5 544 pand %mm5, %mm0 545 pand %mm5, %mm2 586 pand %mm5, %mm0 [all …]
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | sad_sse3.asm | 128 movq mm5, QWORD PTR [%3] 132 psadbw mm5, mm0 145 paddw mm5, mm1 163 paddw mm5, mm1 272 punpckldq mm5, mm6 274 movq [rcx], mm5 297 punpckldq mm5, mm6 299 movq [rcx], mm5 325 movd mm5, DWORD PTR [ref_ptr+2] 333 punpcklbw mm5, mm3 [all …]
|