Searched refs:UseOp (Results 1 – 5 of 5) sorted by relevance
/external/llvm/lib/Target/AMDGPU/ |
D | SIFoldOperands.cpp | 205 const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx); in foldOperand() local 208 if (UseOp.isReg() && ((UseOp.getSubReg() && OpToFold.isReg()) || in foldOperand() 209 UseOp.isImplicit())) { in foldOperand() 217 unsigned UseReg = UseOp.getReg(); in foldOperand() 230 if (FoldRC->getSize() == 8 && UseOp.getSubReg()) { in foldOperand() 234 if (UseOp.getSubReg() == AMDGPU::sub0) { in foldOperand() 237 assert(UseOp.getSubReg() == AMDGPU::sub1); in foldOperand()
|
/external/llvm/lib/CodeGen/ |
D | MachineTraceMetrics.cpp | 604 unsigned UseOp; member 606 DataDep(const MachineInstr *DefMI, unsigned DefOp, unsigned UseOp) in DataDep() 607 : DefMI(DefMI), DefOp(DefOp), UseOp(UseOp) {} in DataDep() 610 DataDep(const MachineRegisterInfo *MRI, unsigned VirtReg, unsigned UseOp) in DataDep() 611 : UseOp(UseOp) { in DataDep() 843 .computeOperandLatency(Dep.DefMI, Dep.DefOp, &UseMI, Dep.UseOp); in computeInstrDepths() 932 UseMI, Dep.UseOp); in pushDepHeight() 1165 .computeOperandLatency(Dep.DefMI, Dep.DefOp, PHI, Dep.UseOp); in getPHIDepth()
|
D | ScheduleDAGInstrs.cpp | 265 int UseOp = I->OpIdx; in addPhysRegDataDeps() local 268 if (UseOp < 0) in addPhysRegDataDeps() 279 UseOp)); in addPhysRegDataDeps()
|
/external/llvm/lib/Target/ARM/ |
D | ARMBaseInstrInfo.cpp | 4463 int UseOp = -1; in getPartialRegUpdateClearance() local 4475 UseOp = MI->findRegisterUseOperandIdx(Reg, false, TRI); in getPartialRegUpdateClearance() 4480 UseOp = 3; in getPartialRegUpdateClearance() 4488 if (UseOp != -1 && MI->getOperand(UseOp).readsReg()) in getPartialRegUpdateClearance()
|
/external/llvm/lib/CodeGen/SelectionDAG/ |
D | DAGCombiner.cpp | 5804 SDValue UseOp = User->getOperand(i); in ExtendUsesToFormExtLoad() local 5805 if (UseOp == N0) in ExtendUsesToFormExtLoad() 5807 if (!isa<ConstantSDNode>(UseOp)) in ExtendUsesToFormExtLoad()
|