/external/mesa3d/src/mesa/x86/ |
D | sse_normal.S | 76 MOVSS ( M(0), XMM1 ) /* m0 */ 78 UNPCKLPS( XMM2, XMM1 ) /* m5 | m0 */ 81 MULPS ( XMM0, XMM1 ) /* m5*scale | m0*scale */ 87 MULPS ( XMM1, XMM2 ) /* uy*m5*scale | ux*m0*scale */ 139 MOVSS ( M(4), XMM1 ) /* m4 */ 140 UNPCKLPS( XMM1, XMM0 ) /* m4 | m0 */ 146 MOVSS ( M(1), XMM1 ) /* m1 */ 148 UNPCKLPS( XMM2, XMM1 ) /* m5 | m1 */ 149 MULPS ( XMM4, XMM1 ) /* m5*scale | m1*scale */ 167 MULPS ( XMM1, XMM4 ) /* uy*m5 | uy*m1 */ [all …]
|
D | sse_xform2.S | 77 MOVAPS( M(4), XMM1 ) /* m7 | m6 | m5 | m4 */ 87 MULPS( XMM1, XMM4 ) /* oy*m7 | oy*m6 | oy*m5 | oy*m4 */ 192 MOVSS ( M(0), XMM1 ) /* - | - | - | m0 */ 194 UNPCKLPS ( XMM2, XMM1 ) /* - | - | m5 | m0 */ 201 MULPS ( XMM1, XMM0 ) /* - | - | oy*m5 | ox*m0 */ 251 MOVSS ( M(0), XMM1 ) /* - | - | - | m0 */ 253 UNPCKLPS ( XMM2, XMM1 ) /* - | - | m5 | m0 */ 260 MULPS( XMM1, XMM4 ) /* oy*m5 | ox*m0 */ 311 MOVLPS( M(4), XMM1 ) /* m5 | m4 */ 322 MULPS( XMM1, XMM4 ) /* oy*m5 | oy*m4 */ [all …]
|
D | sse_xform3.S | 78 MOVAPS ( REGOFF(16, EDX), XMM1 ) /* m4 | m5 | m6 | m7 */ 93 MULPS ( XMM1, XMM5 ) /* m7*oy | m6*oy | m5*oy | m4*oy */ 204 MOVSS ( M(0), XMM1 ) /* - | - | - | m0 */ 206 UNPCKLPS ( XMM2, XMM1 ) /* - | - | m5 | m0 */ 215 MULPS ( XMM1, XMM0 ) /* - | - | s1*m5 | s0*m0 */ 269 MOVSS ( M(0), XMM1 ) /* - | - | - | m0 */ 271 UNPCKLPS ( XMM2, XMM1 ) /* - | - | m5 | m0 */ 280 MULPS ( XMM1, XMM0 ) /* oy*m5 | ox*m0 */ 343 MOVLPS( M(4), XMM1 ) /* m5 | m4 */ 353 MULPS ( XMM1, XMM4 ) /* oy*m5 | oy*m4 */ [all …]
|
D | sse_xform1.S | 78 MOVAPS( M(12), XMM1 ) /* m15 | m14 | m13 | m12 */ 85 ADDPS( XMM1, XMM2 ) /* + | + | + | + */ 187 MOVSS( M(12), XMM1 ) /* m12 */ 195 ADDSS( XMM1, XMM4 ) /* ox*m0+m12 */ 248 MOVSS( M(0), XMM1 ) /* m0 */ 254 MULSS( XMM1, XMM3 ) /* ox*m0 */ 306 MOVLPS( M(12), XMM1 ) /* m13 | m12 */ 313 ADDPS( XMM1, XMM2 ) /* - | - | ox*m1+m13 | ox*m0+m12 */ 361 MOVSS( M(12), XMM1 ) /* m12 */ 368 ADDSS( XMM1, XMM3 ) /* ox*m0+m12 */ [all …]
|
D | sse_xform4.S | 82 MOVSS( SRC(1), XMM1 ) /* oy */ 83 SHUFPS( CONST(0x0), XMM1, XMM1 ) /* oy | oy | oy | oy */ 84 MULPS( XMM5, XMM1 ) /* oy*m7 | oy*m6 | oy*m5 | oy*m4 */ 94 ADDPS( XMM1, XMM0 ) /* ox*m3+oy*m7 | ... */ 144 MOVAPS( MAT(4), XMM1 ) /* m7 | m6 | m5 | m4 */ 156 MULPS( XMM1, XMM5 ) /* oy*m7 | oy*m6 | oy*m5 | oy*m4 */
|
D | common_x86_asm.S | 204 MOVUPS ( REGIND( ESP ), XMM1 ) 206 DIVPS ( XMM0, XMM1 )
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | pr31143.ll | 5 ; CHECK: xorps %[[XMM1:xmm[0-9]+]], %[[XMM1]] 6 ; CHECK: roundss $9, %[[XMM0]], %[[XMM1]] 32 ; CHECK: xorps %[[XMM1:xmm[0-9]+]], %[[XMM1]] 33 ; CHECK: roundsd $9, %[[XMM0]], %[[XMM1]]
|
/external/swiftshader/third_party/LLVM/lib/Target/X86/ |
D | X86CallingConv.td | 37 // Vector types are returned in XMM0 and XMM1, when they fit. XMM2 and XMM3 41 CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>, 43 // 256-bit vectors are returned in YMM0 and XMM1, when they fit. YMM2 and YMM3 65 CCIfType<[f32, f64], CCAssignToReg<[XMM0,XMM1,XMM2]>>>>, 76 CCIfType<[f32], CCIfSubtarget<"hasXMMInt()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>, 77 CCIfType<[f64], CCIfSubtarget<"hasXMMInt()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>, 91 CCIfType<[f32], CCAssignToReg<[XMM0, XMM1]>>, 92 CCIfType<[f64], CCAssignToReg<[XMM0, XMM1]>>, 95 CCIfType<[x86mmx], CCAssignToReg<[XMM0, XMM1]>>, 159 CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>>, [all …]
|
D | X86GenCallingConv.inc | 176 X86::XMM0, X86::XMM1, X86::XMM2 227 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3 329 X86::XMM0, X86::XMM1, X86::XMM2 548 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 653 X86::XMM1, X86::XMM2, X86::XMM3, X86::XMM4, X86::XMM5, X86::XMM6 708 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3 723 X86::XMM1, X86::XMM2, X86::XMM3 738 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3 755 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3 854 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3 [all …]
|
/external/llvm/lib/Target/X86/ |
D | X86CallingConv.td | 53 // Vector types are returned in XMM0 and XMM1, when they fit. XMM2 and XMM3 57 CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>, 59 // 256-bit vectors are returned in YMM0 and XMM1, when they fit. YMM2 and YMM3 87 CCIfType<[f32, f64], CCAssignToReg<[XMM0,XMM1,XMM2]>>>>, 98 CCIfType<[f32], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>, 99 CCIfType<[f64], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>, 112 // Vector types are returned in XMM0,XMM1,XMMM2 and XMM3. 114 CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>, 140 // Vector types are returned in XMM0,XMM1,XMMM2 and XMM3. 142 CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>, [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/X86/ |
D | X86CallingConv.td | 47 let XMM = [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]; 53 let XMM = [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, 233 // Vector types are returned in XMM0 and XMM1, when they fit. XMM2 and XMM3 237 CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>, 239 // 256-bit vectors are returned in YMM0 and XMM1, when they fit. YMM2 and YMM3 268 CCIfType<[f32, f64], CCAssignToReg<[XMM0,XMM1,XMM2]>>>>, 279 CCIfType<[f32], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>, 280 CCIfType<[f64], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>, 293 // Vector types are returned in XMM0,XMM1,XMMM2 and XMM3. 295 CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>, [all …]
|
/external/swiftshader/third_party/LLVM/test/MC/Disassembler/X86/ |
D | enhanced.txt | 7 …:xorps][w: ][2-r:%xmm1=r129][p:,][w: ][0-r:%xmm2=r130] 0:[XMM2/130]=0 1:[XMM2/130]=0 2:[XMM1/129]=0 9 …:andps][w: ][2-r:%xmm1=r129][p:,][w: ][0-r:%xmm2=r130] 0:[XMM2/130]=0 1:[XMM2/130]=0 2:[XMM1/129]=0
|
/external/swiftshader/third_party/llvm-7.0/llvm/unittests/tools/llvm-exegesis/X86/ |
D | TargetTest.cpp | 56 const auto Insts = ExegesisTarget_->setRegToConstant(*STI, llvm::X86::XMM1); in TEST_F() 70 const auto Insts = ExegesisTarget_->setRegToConstant(*STI, llvm::X86::XMM1); in TEST_F() 84 const auto Insts = ExegesisTarget_->setRegToConstant(*STI, llvm::X86::XMM1); in TEST_F()
|
/external/swiftshader/third_party/LLVM/test/TableGen/ |
D | cast.td | 42 def XMM1: Register<"xmm1">; 59 [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
|
D | Slice.td | 42 def XMM1: Register<"xmm1">; 59 [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
|
D | TargetInstrSpec.td | 43 def XMM1: Register<"xmm1">; 60 [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/TableGen/ |
D | TargetInstrSpec.td | 48 def XMM1: Register<"xmm1">; 65 [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
|
D | Slice.td | 41 def XMM1: Register<"xmm1">; 58 [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
|
D | cast.td | 47 def XMM1: Register<"xmm1">; 64 [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
|
/external/llvm/test/TableGen/ |
D | cast.td | 47 def XMM1: Register<"xmm1">; 64 [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
|
D | Slice.td | 41 def XMM1: Register<"xmm1">; 58 [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
|
D | TargetInstrSpec.td | 48 def XMM1: Register<"xmm1">; 65 [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
|
/external/swiftshader/third_party/llvm-7.0/configs/common/lib/Target/X86/ |
D | X86GenCallingConv.inc | 235 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3 431 X86::XMM0, X86::XMM1, X86::XMM2 596 X86::XMM0, X86::XMM1, X86::XMM2 885 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 909 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1176 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3 1240 X86::XMM0, X86::XMM1, X86::XMM2 1553 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1689 X86::XMM1, X86::XMM2, X86::XMM3, X86::XMM4, X86::XMM5, X86::XMM6 1965 …X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7, X86::XMM8,… [all …]
|
/external/llvm/test/MC/X86/ |
D | intel-syntax-encoding.s | 65 cmpltps XMM2, XMM1
|
/external/capstone/suite/MC/X86/ |
D | intel-syntax-encoding.s.cs | 26 0x0f,0xc2,0xd1,0x01 = cmpltps XMM2, XMM1
|