/external/llvm/lib/Target/AArch64/ |
D | AArch64FastISel.cpp | 324 unsigned ResultReg = createResultReg(&AArch64::GPR64spRegClass); in fastMaterializeAlloca() local 326 ResultReg) in fastMaterializeAlloca() 330 return ResultReg; in fastMaterializeAlloca() 347 unsigned ResultReg = createResultReg(RC); in materializeInt() local 349 ResultReg).addReg(ZeroReg, getKillRegState(true)); in materializeInt() 350 return ResultReg; in materializeInt() 384 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); in materializeFP() local 386 TII.get(TargetOpcode::COPY), ResultReg) in materializeFP() 389 return ResultReg; in materializeFP() 404 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); in materializeFP() local [all …]
|
D | AArch64InstrInfo.cpp | 2623 unsigned ResultReg = Root.getOperand(0).getReg(); in genMadd() local 2631 if (TargetRegisterInfo::isVirtualRegister(ResultReg)) in genMadd() 2632 MRI.constrainRegClass(ResultReg, RC); in genMadd() 2641 ResultReg) in genMadd() 2673 unsigned ResultReg = Root.getOperand(0).getReg(); in genMaddR() local 2679 if (TargetRegisterInfo::isVirtualRegister(ResultReg)) in genMaddR() 2680 MRI.constrainRegClass(ResultReg, RC); in genMaddR() 2689 ResultReg) in genMaddR()
|
/external/llvm/lib/CodeGen/SelectionDAG/ |
D | FastISel.cpp | 411 unsigned ResultReg = in selectBinaryOp() local 414 if (!ResultReg) in selectBinaryOp() 418 updateValueMap(I, ResultReg); in selectBinaryOp() 445 unsigned ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0, in selectBinaryOp() local 447 if (!ResultReg) in selectBinaryOp() 451 updateValueMap(I, ResultReg); in selectBinaryOp() 457 unsigned ResultReg = fastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(), in selectBinaryOp() local 459 if (ResultReg) { in selectBinaryOp() 461 updateValueMap(I, ResultReg); in selectBinaryOp() 472 unsigned ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(), in selectBinaryOp() local [all …]
|
/external/llvm/lib/Target/Mips/ |
D | MipsFastISel.cpp | 120 bool emitLoad(MVT VT, unsigned &ResultReg, Address &Addr, 265 unsigned ResultReg = createResultReg(&Mips::GPR32RegClass); in emitLogicalOp() local 266 if (!ResultReg) in emitLogicalOp() 269 emitInst(Opc, ResultReg).addReg(LHSReg).addReg(RHSReg); in emitLogicalOp() 270 return ResultReg; in emitLogicalOp() 284 unsigned ResultReg = createResultReg(&Mips::GPR32RegClass); in fastMaterializeAlloca() local 286 ResultReg) in fastMaterializeAlloca() 289 return ResultReg; in fastMaterializeAlloca() 305 unsigned ResultReg = createResultReg(RC); in materialize32BitInt() local 309 emitInst(Opc, ResultReg).addReg(Mips::ZERO).addImm(Imm); in materialize32BitInt() [all …]
|
/external/llvm/lib/Target/X86/ |
D | X86FastISel.cpp | 88 unsigned &ResultReg, unsigned Alignment = 1); 97 unsigned &ResultReg); 348 MachineMemOperand *MMO, unsigned &ResultReg, in X86FastEmitLoad() argument 420 ResultReg = createResultReg(RC); in X86FastEmitLoad() 422 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg); in X86FastEmitLoad() 571 unsigned &ResultReg) { in X86FastEmitExtend() argument 577 ResultReg = RR; in X86FastEmitExtend() 1149 unsigned ResultReg = 0; in X86SelectLoad() local 1150 if (!X86FastEmitLoad(VT, AM, createMachineMemOperandFor(LI), ResultReg, in X86SelectLoad() 1154 updateValueMap(I, ResultReg); in X86SelectLoad() [all …]
|
/external/llvm/lib/Target/ARM/ |
D | ARMFastISel.cpp | 171 bool ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, 287 unsigned ResultReg = createResultReg(RC); in fastEmitInst_r() local 295 ResultReg).addReg(Op0, Op0IsKill * RegState::Kill)); in fastEmitInst_r() 300 TII.get(TargetOpcode::COPY), ResultReg) in fastEmitInst_r() 303 return ResultReg; in fastEmitInst_r() 310 unsigned ResultReg = createResultReg(RC); in fastEmitInst_rr() local 320 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) in fastEmitInst_rr() 328 TII.get(TargetOpcode::COPY), ResultReg) in fastEmitInst_rr() 331 return ResultReg; in fastEmitInst_rr() 339 unsigned ResultReg = createResultReg(RC); in fastEmitInst_rrr() local [all …]
|
/external/llvm/lib/Target/PowerPC/ |
D | PPCFastISel.cpp | 156 bool PPCEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, 429 unsigned ResultReg = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass); in PPCSimplifyAddress() local 431 ResultReg).addFrameIndex(Addr.Base.FI).addImm(0); in PPCSimplifyAddress() 432 Addr.Base.Reg = ResultReg; in PPCSimplifyAddress() 449 bool PPCFastISel::PPCEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, in PPCEmitLoad() argument 463 (ResultReg ? MRI.getRegClass(ResultReg) : in PPCEmitLoad() 511 bool IsVSSRC = (ResultReg != 0) && isVSSRCRegister(ResultReg); in PPCEmitLoad() 512 bool IsVSFRC = (ResultReg != 0) && isVSFRCRegister(ResultReg); in PPCEmitLoad() 521 if (ResultReg == 0) in PPCEmitLoad() 522 ResultReg = createResultReg(UseRC); in PPCEmitLoad() [all …]
|
/external/llvm/lib/Target/AMDGPU/ |
D | SIInstrInfo.cpp | 2633 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); in lowerScalarAbs() local 2639 BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg) in lowerScalarAbs() 2643 MRI.replaceRegWith(Dest.getReg(), ResultReg); in lowerScalarAbs() 2644 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); in lowerScalarAbs() 2786 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); in splitScalar64BitBCNT() local 2799 BuildMI(MBB, MII, DL, InstDesc, ResultReg) in splitScalar64BitBCNT() 2803 MRI.replaceRegWith(Dest.getReg(), ResultReg); in splitScalar64BitBCNT() 2807 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); in splitScalar64BitBCNT() 2833 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); in splitScalar64BitBFE() local 2844 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) in splitScalar64BitBFE() [all …]
|
/external/llvm/include/llvm/CodeGen/ |
D | FastISel.h | 76 unsigned ResultReg; member 92 ResultReg(0), NumResultRegs(0), IsPatchPoint(false) {} in CallLoweringInfo()
|