/external/mesa3d/src/mesa/x86/ |
D | 3dnow_normal.S | 88 MOVD ( ARG_SCALE, MM0 ) /* | scale */ 89 PUNPCKLDQ ( MM0, MM0 ) /* scale | scale */ 91 PFMUL ( MM0, MM3 ) /* scale * m1 | scale * m0 */ 92 PFMUL ( MM0, MM4 ) /* scale * m5 | scale * m4 */ 93 PFMUL ( MM0, MM5 ) /* scale * m6 | scale * m2 */ 94 PFMUL ( MM0, MM6 ) /* scale * m9 | scale * m8 */ 95 PFMUL ( MM0, MM7 ) /* | scale * m10 */ 100 MOVQ ( REGIND (EDX), MM0 ) /* x1 | x0 */ 103 MOVQ ( MM0, MM1 ) /* x1 | x0 */ 106 PFMUL ( MM3, MM0 ) /* x1*m1 | x0*m0 */ [all …]
|
D | 3dnow_xform4.S | 69 MOVQ ( REGIND(EAX), MM0 ) /* x1 | x0 */ 75 MOVQ ( MM0, MM2 ) /* x1 | x0 */ 78 PUNPCKLDQ ( MM0, MM0 ) /* x0 | x0 */ 81 MOVQ ( MM0, MM1 ) /* x0 | x0 */ 84 PFMUL ( REGIND(ECX), MM0 ) /* x0*m1 | x0*m0 */ 100 PFADD ( MM0, MM2 ) 158 MOVD ( REGIND(ECX), MM0 ) /* | m00 */ 159 PUNPCKLDQ ( REGOFF(20, ECX), MM0 ) /* m11 | m00 */ 180 PFMUL ( MM0, MM4 ) /* x1*m11 | x0*m00 */ 250 MOVQ ( MM2, MM0 ) /* x1 | x0 */ [all …]
|
D | 3dnow_xform3.S | 69 MOVQ ( REGIND(EAX), MM0 ) /* x1 | x0 */ 75 MOVQ ( MM0, MM1 ) /* x1 | x0 */ 78 PUNPCKLDQ ( MM0, MM0 ) /* x0 | x0 */ 84 MOVQ ( MM0, MM3 ) /* x0 | x0 */ 88 PFMUL ( REGIND(ECX), MM0 ) /* x0*m1 | x0*m0 */ 94 PFADD ( MM0, MM1 ) /* x0*m1+x1*m5 | x0*m0+x1*m4 */ 150 MOVD ( REGIND(ECX), MM0 ) /* | m00 */ 151 PUNPCKLDQ ( REGOFF(20, ECX), MM0 ) /* m11 | m00 */ 172 PFMUL ( MM0, MM4 ) /* x1*m11 | x0*m00 */ 240 MOVQ ( REGIND(EAX), MM0 ) /* x1 | x0 */ [all …]
|
D | 3dnow_xform2.S | 62 MOVD ( REGIND(ECX), MM0 ) /* | m00 */ 63 PUNPCKLDQ ( REGOFF(16, ECX), MM0 ) /* m10 | m00 */ 83 PFMUL ( MM0, MM6 ) /* x1*m10 | x0*m00 */ 143 MOVD ( REGIND(ECX), MM0 ) /* | m00 */ 144 PUNPCKLDQ ( REGOFF(20, ECX), MM0 ) /* m11 | m00 */ 152 PFMUL ( MM0, MM4 ) /* x1*m11 | x0*m00 */ 199 MOVD ( REGIND(ECX), MM0 ) /* | m00 */ 200 PUNPCKLDQ ( REGOFF(16, ECX), MM0 ) /* m10 | m00 */ 217 PFMUL ( MM0, MM6 ) /* x1*m10 | x0*m00 */ 276 MOVD ( REGIND(ECX), MM0 ) /* | m00 */ [all …]
|
D | 3dnow_xform1.S | 62 MOVQ ( REGIND(ECX), MM0 ) /* m01 | m00 */ 75 PFMUL ( MM0, MM4 ) /* x0*m01 | x0*m00 */ 130 MOVD ( REGIND(EAX), MM0 ) /* | x0 */ 133 MOVD ( MM0, REGIND(EDX) ) /* | r0 */ 175 MOVD ( REGIND(ECX), MM0 ) /* | m00 */ 184 PFMUL ( MM0, MM4 ) /* | x0*m00 */ 233 MOVD ( REGIND(ECX), MM0 ) /* | m00 */ 240 PFMUL ( MM0, MM4 ) /* 0 | x0*m00 */ 287 MOVQ ( REGIND(ECX), MM0 ) /* m01 | m00 */ 296 PFMUL ( MM0, MM4 ) /* x0*m01 | x0*m00 */ [all …]
|
D | mmx_blend.S | 271 PXOR ( MM0, MM0 ) /* 0x0000 | 0x0000 | 0x0000 | 0x0000 */ 275 GMB_UNPACK( MM1, MM2, MM4, MM5, MM0 ) ;\ 383 PXOR ( MM0, MM0 ) /* 0x0000 | 0x0000 | 0x0000 | 0x0000 */ ;\ 391 GMB_UNPACK( MM1, MM2, MM4, MM5, MM0 ) ;\
|
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
D | 2007-07-03-GR64ToVR64.ll | 3 ; CHECK: movd %rsi, [[MM0:%mm[0-9]+]] 5 ; CHECK: paddusw [[MM0]], [[MM1]]
|
/external/llvm/test/CodeGen/X86/ |
D | 2007-07-03-GR64ToVR64.ll | 3 ; CHECK: movd %rsi, [[MM0:%mm[0-9]+]] 5 ; CHECK: paddusw [[MM0]], [[MM1]]
|
D | ipra-reg-usage.ll | 6 …R12 DR13 DR14 DR15 FP0 FP1 FP2 FP3 FP4 FP5 FP6 FP7 K0 K1 K2 K3 K4 K5 K6 K7 MM0 MM1 MM2 MM3 MM4 MM5…
|
/external/swiftshader/third_party/LLVM/lib/Target/X86/ |
D | X86InstrControl.td | 142 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7, 181 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7, 217 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7, 249 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7, 282 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
|
D | X86CallingConv.td | 49 // MMX vector types are always returned in MM0. If the target doesn't have 50 // MM0, it doesn't support these vector types. 51 CCIfType<[x86mmx], CCAssignToReg<[MM0]>>, 260 CCAssignToReg<[MM0, MM1, MM2]>>>,
|
D | X86GenRegisterInfo.inc | 84 MM0 = 65, 313 const unsigned MM0_Overlaps[] = { X86::MM0, 0 }; 630 { "MM0", MM0_Overlaps, Empty_SubRegsSet, Empty_SuperRegsSet }, 920 X86::MM0, X86::MM1, X86::MM2, X86::MM3, X86::MM4, X86::MM5, X86::MM6, X86::MM7, 1181 RI->mapDwarfRegToLLVMReg(41, X86::MM0, false ); 1241 RI->mapDwarfRegToLLVMReg(29, X86::MM0, false ); 1276 RI->mapDwarfRegToLLVMReg(29, X86::MM0, false ); 1307 RI->mapDwarfRegToLLVMReg(41, X86::MM0, true ); 1367 RI->mapDwarfRegToLLVMReg(29, X86::MM0, true ); 1402 RI->mapDwarfRegToLLVMReg(29, X86::MM0, true ); [all …]
|
D | X86GenCallingConv.inc | 190 X86::MM0, X86::MM1, X86::MM2 878 if (unsigned Reg = State.AllocateReg(X86::MM0)) {
|
D | X86RegisterInfo.td | 147 def MM0 : Register<"mm0">, DwarfRegNum<[41, 29, 29]>;
|
D | X86InstrCompiler.td | 303 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7, 317 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
|
/external/swiftshader/third_party/LLVM/lib/Target/X86/Disassembler/ |
D | X86DisassemblerDecoder.h | 197 ENTRY(MM0) \
|
/external/ImageMagick/PerlMagick/t/reference/write/composite/ |
D | CopyBlue.miff | 41 …�MM?�MM=�MM<�MM<�MM1�MM/�MM,�MM.�MM/�MM0�MM3�MM4�MM1�MM-�MM-�MM-�MM.�MM0�MM-�MM'�MM�MM4�MMN�MM3�M…
|
/external/llvm/lib/Target/X86/ |
D | X86CallingConv.td | 71 // MMX vector types are always returned in MM0. If the target doesn't have 72 // MM0, it doesn't support these vector types. 73 CCIfType<[x86mmx], CCAssignToReg<[MM0]>>, 581 CCAssignToReg<[MM0, MM1, MM2]>>>,
|
D | X86RegisterInfo.td | 152 def MM0 : X86Reg<"mm0", 0>, DwarfRegNum<[41, 29, 29]>;
|
D | X86InstrCompiler.td | 456 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7, 476 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
|
/external/llvm/lib/Target/X86/Disassembler/ |
D | X86DisassemblerDecoder.h | 207 ENTRY(MM0) \
|
/external/swiftshader/third_party/LLVM/lib/Target/X86/MCTargetDesc/ |
D | X86MCTargetDesc.cpp | 176 case X86::YMM0: case X86::YMM8: case X86::MM0: in getX86RegNum()
|
/external/llvm/docs/TableGen/ |
D | index.rst | 65 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7, R10, R10B, R10D, R10W, R11, R11B, R11D,
|
D | LangIntro.rst | 543 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
|
/external/llvm/lib/Target/X86/InstPrinter/ |
D | X86InstComments.cpp | 168 if (X86::MM0 <= RegNo && RegNo <= X86::MM7) in getVectorRegSize()
|