/external/mesa3d/src/mesa/x86/ |
D | sse_xform3.S | 77 MOVAPS ( REGOFF(0, EDX), XMM0 ) /* m0 | m1 | m2 | m3 */ 92 MULPS ( XMM0, XMM4 ) /* m3*ox | m2*ox | m1*ox | m0*ox */ 150 MOVLPS ( S(0), XMM0 ) 151 MOVLPS ( XMM0, D(0) ) 152 MOVSS ( S(2), XMM0 ) 153 MOVSS ( XMM0, D(2) ) 201 XORPS( XMM0, XMM0 ) /* clean the working register */ 214 MOVLPS ( S(0), XMM0 ) /* - | - | s1 | s0 */ 215 MULPS ( XMM1, XMM0 ) /* - | - | s1*m5 | s0*m0 */ 216 ADDPS ( XMM2, XMM0 ) /* - | - | +m13 | +m12 */ [all …]
|
D | sse_normal.S | 79 MOVSS ( ARG_SCALE, XMM0 ) /* scale */ 80 SHUFPS ( CONST(0x0), XMM0, XMM0 ) /* scale | scale */ 81 MULPS ( XMM0, XMM1 ) /* m5*scale | m0*scale */ 82 MULSS ( M(10), XMM0 ) /* m10*scale */ 91 MULSS ( XMM0, XMM2 ) /* uz*m10*scale */ 138 MOVSS ( M(0), XMM0 ) /* m0 */ 140 UNPCKLPS( XMM1, XMM0 ) /* m4 | m0 */ 145 MULPS ( XMM4, XMM0 ) /* m4*scale | m0*scale */ 164 MULPS ( XMM0, XMM3 ) /* ux*m4 | ux*m0 */ 231 MOVSS( M(0), XMM0 ) /* m0 */ [all …]
|
D | sse_xform4.S | 78 MOVSS( SRC(0), XMM0 ) /* ox */ 79 SHUFPS( CONST(0x0), XMM0, XMM0 ) /* ox | ox | ox | ox */ 80 MULPS( XMM4, XMM0 ) /* ox*m3 | ox*m2 | ox*m1 | ox*m0 */ 94 ADDPS( XMM1, XMM0 ) /* ox*m3+oy*m7 | ... */ 95 ADDPS( XMM2, XMM0 ) /* ox*m3+oy*m7+oz*m11 | ... */ 96 ADDPS( XMM3, XMM0 ) /* ox*m3+oy*m7+oz*m11+ow*m15 | ... */ 97 MOVAPS( XMM0, DST(0) ) /* ->D(3) | ->D(2) | ->D(1) | ->D(0) */ 143 MOVAPS( MAT(0), XMM0 ) /* m3 | m2 | m1 | m0 */ 152 MULPS( XMM0, XMM4 ) /* ox*m3 | ox*m2 | ox*m1 | ox*m0 */ 217 MOVAPS( REGIND(ESI), XMM0 ) [all …]
|
D | sse_xform2.S | 76 MOVAPS( M(0), XMM0 ) /* m3 | m2 | m1 | m0 */ 84 MULPS( XMM0, XMM3 ) /* ox*m3 | ox*m2 | ox*m1 | ox*m0 */ 189 XORPS( XMM0, XMM0 ) /* clean the working register */ 200 MOVLPS ( S(0), XMM0 ) /* - | - | oy | ox */ 201 MULPS ( XMM1, XMM0 ) /* - | - | oy*m5 | ox*m0 */ 202 ADDPS ( XMM2, XMM0 ) /* - | - | +m13 | +m12 */ 203 MOVLPS ( XMM0, D(0) ) /* -> D(1) | -> D(0) */ 255 XORPS ( XMM0, XMM0 ) /* 0 | 0 | 0 | 0 */ 263 MOVSS( XMM0, D(3) ) /* ->D(3) */ 310 MOVLPS( M(0), XMM0 ) /* m1 | m0 */ [all …]
|
D | sse_xform1.S | 77 MOVAPS( M(0), XMM0 ) /* m3 | m2 | m1 | m0 */ 84 MULPS( XMM0, XMM2 ) /* ox*m3 | ox*m2 | ox*m1 | ox*m0 */ 186 MOVSS( M(0), XMM0 ) /* m0 */ 194 MULSS( XMM0, XMM4 ) /* ox*m0 */ 247 XORPS( XMM0, XMM0 ) /* 0 | 0 | 0 | 0 */ 258 MOVSS( XMM0, D(1) ) 259 MOVSS( XMM0, D(3) ) 305 MOVLPS( M(0), XMM0 ) /* m1 | m0 */ 312 MULPS( XMM0, XMM2 ) /* - | - | ox*m1 | ox*m0 */ 360 MOVSS( M(0), XMM0 ) /* m0 */ [all …]
|
D | common_x86_asm.S | 168 XORPS ( XMM0, XMM0 ) 197 XORPS ( XMM0, XMM0 ) 206 DIVPS ( XMM0, XMM1 )
|
/external/llvm/test/CodeGen/X86/ |
D | break-false-dep.ll | 188 ;SSE: xorps [[XMM0:%xmm[0-9]+]], [[XMM0]] 189 ;SSE-NEXT: cvtsi2sdl {{.*}}, [[XMM0]] 190 ;SSE-NEXT: mulsd {{.*}}, [[XMM0]] 191 ;SSE-NEXT: mulsd {{.*}}, [[XMM0]] 192 ;SSE-NEXT: mulsd {{.*}}, [[XMM0]] 193 ;SSE-NEXT: movsd [[XMM0]], 195 ;AVX: vxorps [[XMM0:%xmm[0-9]+]], [[XMM0]] 196 ;AVX-NEXT: vcvtsi2sdl {{.*}}, [[XMM0]], [[XMM0]] 197 ;AVX-NEXT: vmulsd {{.*}}, [[XMM0]], [[XMM0]] 198 ;AVX-NEXT: vmulsd {{.*}}, [[XMM0]], [[XMM0]] [all …]
|
D | vector-sext.ll | 14 ; SSE2-NEXT: # kill: XMM0<def> XMM1<kill> 26 ; SSSE3-NEXT: # kill: XMM0<def> XMM1<kill>
|
D | vector-shuffle-256-v4.ll | 846 ; ALL-NEXT: # kill: XMM0<def> XMM0<kill> YMM0<def>
|
/external/llvm/lib/Target/X86/ |
D | X86CallingConv.td | 42 // Vector types are returned in XMM0 and XMM1, when they fit. XMM2 and XMM3 46 CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>, 73 // case they use XMM0, otherwise it is the same as the common X86 calling 76 CCIfType<[f32, f64], CCAssignToReg<[XMM0,XMM1,XMM2]>>>>, 83 // The X86-32 fastcc returns 1, 2, or 3 FP values in XMM0-2 if the target has 87 CCIfType<[f32], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>, 88 CCIfType<[f64], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>, 101 // Vector types are returned in XMM0,XMM1,XMMM2 and XMM3. 103 CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>, 129 // Vector types are returned in XMM0,XMM1,XMMM2 and XMM3. [all …]
|
D | README-SSE.txt | 184 %XMM0 = MOVAPSrm %ECX, 1, %NOREG, 0 185 %XMM1 = MOVAPSrr %XMM0 187 %XMM2 = MOVAPSrr %XMM0 189 %XMM3 = MOVAPSrr %XMM0 191 SHUFPSrr %XMM0<def&use>, %XMM0, 85 807 This would be better kept in the SSE unit by treating XMM0 as a 4xfloat and
|
D | X86RegisterInfo.cpp | 414 for (MCRegAliasIterator AI(X86::XMM0 + n, this, true); AI.isValid(); ++AI) in getReservedRegs() 758 if (Reg >= X86::XMM0 && Reg <= X86::XMM31) in get512BitSuperRegister() 759 return X86::ZMM0 + (Reg - X86::XMM0); in get512BitSuperRegister()
|
D | X86RegisterInfo.td | 172 def XMM0: X86Reg<"xmm0", 0>, DwarfRegNum<[17, 21, 21]>;
|
D | X86InstrSSE.td | 7233 let Uses = [XMM0], Constraints = "$src1 = $dst" in { 7241 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))], 7250 (bitconvert (mem_frag addr:$src2)), XMM0))], 7282 def : Pat<(v16i8 (vselect (v16i8 XMM0), (v16i8 VR128:$src1), 7285 def : Pat<(v4i32 (vselect (v4i32 XMM0), (v4i32 VR128:$src1), 7288 def : Pat<(v4f32 (vselect (v4i32 XMM0), (v4f32 VR128:$src1), 7291 def : Pat<(v2i64 (vselect (v2i64 XMM0), (v2i64 VR128:$src1), 7294 def : Pat<(v2f64 (vselect (v2i64 XMM0), (v2f64 VR128:$src1), 7385 let Defs = [XMM0, EFLAGS], hasSideEffects = 0 in { 7422 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX], hasSideEffects = 0 in { [all …]
|
/external/llvm/test/TableGen/ |
D | cast.td | 46 def XMM0: Register<"xmm0">; 64 [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
|
D | Slice.td | 40 def XMM0: Register<"xmm0">; 58 [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
|
D | TargetInstrSpec.td | 47 def XMM0: Register<"xmm0">; 65 [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
|
D | MultiPat.td | 50 def XMM0: Register<"xmm0">; 68 [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
|
/external/llvm/lib/Target/X86/AsmParser/ |
D | X86Operand.h | 236 getMemIndexReg() >= X86::XMM0 && getMemIndexReg() <= X86::XMM15; in isMemVX32() 244 getMemIndexReg() >= X86::XMM0 && getMemIndexReg() <= X86::XMM15; in isMemVX64()
|
/external/llvm/lib/Target/X86/Disassembler/ |
D | X86DisassemblerDecoder.h | 217 ENTRY(XMM0) \
|
D | X86Disassembler.cpp | 542 mcInst.addOperand(MCOperand::CreateReg(X86::XMM0 + (immediate >> 4))); in translateImmediate()
|
/external/llvm/test/MC/X86/ |
D | intel-syntax.s | 73 vshufpd XMM0, XMM1, XMM2, 1
|
/external/valgrind/memcheck/ |
D | mc_machine.c | 714 if (o >= GOF(XMM0) && o+sz <= GOF(XMM0)+SZB(XMM0)) return GOF(XMM0); in get_otrack_shadow_offset_wrk()
|
/external/llvm/docs/TableGen/ |
D | index.rst | 69 XMM0, XMM1, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, XMM2, XMM3, XMM4, XMM5,
|
D | LangIntro.rst | 544 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, EFLAGS] in {
|