Searched refs:VMOVDRR (Results 1 – 8 of 8) sorted by relevance
/external/llvm/lib/Target/ARM/ |
D | ARMISelLowering.h | 78 VMOVDRR, // Two gprs to double. enumerator
|
D | ARMInstrVFP.td | 21 def arm_fmdrr : SDNode<"ARMISD::VMOVDRR", SDT_VMOVDRR>; 822 // Bitcast i32 -> f32. NEON prefers to use VMOVDRR. 899 def VMOVDRR : AVConv5I<0b11000100, 0b1011, 1857 (VMOVDRR DPR:$Dn, GPR:$Rt, GPR:$Rt2, pred:$p)>;
|
D | ARMISelLowering.cpp | 1045 case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR"; in getTargetNodeName() 1370 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); in LowerCallResult() 1387 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); in LowerCallResult() 2828 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); in GetF64FormalArgument() 3449 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Low, High); in getCMOV() 3876 Tmp0.getOpcode() == ARMISD::VMOVDRR; in LowerFCOPYSIGN() 3947 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); in LowerFCOPYSIGN() 4029 DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); in ExpandBITCAST() 8472 if (InDouble.getOpcode() == ARMISD::VMOVDRR && !Subtarget->isFPOnlySP()) in PerformVMOVRRDCombine() 9182 if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR && in PerformSTORECombine() [all …]
|
D | ARMFastISel.cpp | 2048 TII.get(ARM::VMOVDRR), ResultReg) in FinishCall()
|
D | ARMScheduleSwift.td | 1659 def : InstRW<[SwiftWriteP2FourCycle], (instregex "VMOVDRR$")>;
|
D | ARMInstrNEON.td | 5750 // NEONvdup patterns for uarchs with slow VDUP.32 - use VMOVDRR instead. 5751 def : Pat<(v2i32 (NEONvdup (i32 GPR:$R))), (VMOVDRR GPR:$R, GPR:$R)>, 5753 def : Pat<(v2f32 (NEONvdup (f32 (bitconvert GPR:$R)))), (VMOVDRR GPR:$R, GPR:$R)>, 6360 // Prefer VMOVDRR for i32 -> f32 bitcasts, it can write all DPR registers. 6362 (EXTRACT_SUBREG (VMOVDRR GPR:$a, GPR:$a), ssub_0)>,
|
D | ARMBaseInstrInfo.cpp | 4540 case ARM::VMOVDRR: in getRegSequenceLikeInputs()
|
D | ARMInstrInfo.td | 304 // Cortex-A9 prefers VMOVSR to VMOVDRR even when using NEON for scalar FP, as
|