/external/swiftshader/third_party/llvm-10.0/llvm/lib/Analysis/ |
D | IVDescriptors.cpp | 224 InstDesc ReduxDesc(false, nullptr); in AddReductionVar() 369 InstDesc IgnoredVal(false, nullptr); in AddReductionVar() 459 RecurrenceDescriptor::InstDesc 460 RecurrenceDescriptor::isMinMaxSelectCmpPattern(Instruction *I, InstDesc &Prev) { in isMinMaxSelectCmpPattern() 471 return InstDesc(false, I); in isMinMaxSelectCmpPattern() 472 return InstDesc(Select, Prev.getMinMaxKind()); in isMinMaxSelectCmpPattern() 477 return InstDesc(false, I); in isMinMaxSelectCmpPattern() 480 return InstDesc(false, I); in isMinMaxSelectCmpPattern() 482 return InstDesc(false, I); in isMinMaxSelectCmpPattern() 489 return InstDesc(Select, MRK_UIntMin); in isMinMaxSelectCmpPattern() [all …]
|
/external/llvm-project/llvm/lib/Analysis/ |
D | IVDescriptors.cpp | 224 InstDesc ReduxDesc(false, nullptr); in AddReductionVar() 369 InstDesc IgnoredVal(false, nullptr); in AddReductionVar() 461 RecurrenceDescriptor::InstDesc 462 RecurrenceDescriptor::isMinMaxSelectCmpPattern(Instruction *I, InstDesc &Prev) { in isMinMaxSelectCmpPattern() 473 return InstDesc(false, I); in isMinMaxSelectCmpPattern() 474 return InstDesc(Select, Prev.getMinMaxKind()); in isMinMaxSelectCmpPattern() 479 return InstDesc(false, I); in isMinMaxSelectCmpPattern() 482 return InstDesc(false, I); in isMinMaxSelectCmpPattern() 484 return InstDesc(false, I); in isMinMaxSelectCmpPattern() 491 return InstDesc(Select, MRK_UIntMin); in isMinMaxSelectCmpPattern() [all …]
|
/external/llvm/lib/Transforms/Utils/ |
D | LoopUtils.cpp | 194 InstDesc ReduxDesc(false, nullptr); in AddReductionVar() 318 InstDesc IgnoredVal(false, nullptr); in AddReductionVar() 372 RecurrenceDescriptor::InstDesc 373 RecurrenceDescriptor::isMinMaxSelectCmpPattern(Instruction *I, InstDesc &Prev) { in isMinMaxSelectCmpPattern() 384 return InstDesc(false, I); in isMinMaxSelectCmpPattern() 385 return InstDesc(Select, Prev.getMinMaxKind()); in isMinMaxSelectCmpPattern() 390 return InstDesc(false, I); in isMinMaxSelectCmpPattern() 393 return InstDesc(false, I); in isMinMaxSelectCmpPattern() 395 return InstDesc(false, I); in isMinMaxSelectCmpPattern() 402 return InstDesc(Select, MRK_UIntMin); in isMinMaxSelectCmpPattern() [all …]
|
/external/swiftshader/third_party/llvm-10.0/llvm/include/llvm/Analysis/ |
D | IVDescriptors.h | 102 class InstDesc { 104 InstDesc(bool IsRecur, Instruction *I, Instruction *UAI = nullptr) 108 InstDesc(Instruction *I, MinMaxRecurrenceKind K, Instruction *UAI = nullptr) 139 static InstDesc isRecurrenceInstr(Instruction *I, RecurrenceKind Kind, 140 InstDesc &Prev, bool HasFunNoNaNAttr); 153 static InstDesc isMinMaxSelectCmpPattern(Instruction *I, InstDesc &Prev); 157 static InstDesc isConditionalRdxPattern(RecurrenceKind Kind, Instruction *I);
|
/external/llvm-project/llvm/include/llvm/Analysis/ |
D | IVDescriptors.h | 89 class InstDesc { 91 InstDesc(bool IsRecur, Instruction *I, Instruction *UAI = nullptr) 95 InstDesc(Instruction *I, MinMaxRecurrenceKind K, Instruction *UAI = nullptr) 126 static InstDesc isRecurrenceInstr(Instruction *I, RecurrenceKind Kind, 127 InstDesc &Prev, bool HasFunNoNaNAttr); 140 static InstDesc isMinMaxSelectCmpPattern(Instruction *I, InstDesc &Prev); 144 static InstDesc isConditionalRdxPattern(RecurrenceKind Kind, Instruction *I);
|
/external/llvm/include/llvm/Transforms/Utils/ |
D | LoopUtils.h | 104 class InstDesc { 107 InstDesc(bool IsRecur, Instruction *I, Instruction *UAI = nullptr) 111 InstDesc(Instruction *I, MinMaxRecurrenceKind K, Instruction *UAI = nullptr) 142 static InstDesc isRecurrenceInstr(Instruction *I, RecurrenceKind Kind, 143 InstDesc &Prev, bool HasFunNoNaNAttr); 155 static InstDesc isMinMaxSelectCmpPattern(Instruction *I, InstDesc &Prev);
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/MCTargetDesc/ |
D | X86AsmBackend.cpp | 332 const MCInstrDesc &InstDesc = MCII->get(Jcc.getOpcode()); in isMacroFused() local 333 if (!InstDesc.isConditionalBranch()) in isMacroFused() 387 const MCInstrDesc &InstDesc = MCII->get(Inst.getOpcode()); in needAlignInst() local 388 return (InstDesc.isConditionalBranch() && in needAlignInst() 390 (InstDesc.isUnconditionalBranch() && in needAlignInst() 392 (InstDesc.isCall() && in needAlignInst() 394 (InstDesc.isReturn() && in needAlignInst() 396 (InstDesc.isIndirectBranch() && in needAlignInst()
|
/external/llvm-project/llvm/lib/Target/AArch64/ |
D | AArch64SIMDInstrOpt.cpp | 160 bool shouldReplaceInst(MachineFunction *MF, const MCInstrDesc *InstDesc, 218 shouldReplaceInst(MachineFunction *MF, const MCInstrDesc *InstDesc, in shouldReplaceInst() argument 223 auto InstID = std::make_pair(InstDesc->getOpcode(), Subtarget); in shouldReplaceInst() 228 unsigned SCIdx = InstDesc->getSchedClass(); in shouldReplaceInst() 256 if (SchedModel.computeInstrLatency(InstDesc->getOpcode()) > ReplCost) in shouldReplaceInst()
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/ |
D | AArch64SIMDInstrOpt.cpp | 160 bool shouldReplaceInst(MachineFunction *MF, const MCInstrDesc *InstDesc, 218 shouldReplaceInst(MachineFunction *MF, const MCInstrDesc *InstDesc, in shouldReplaceInst() argument 223 auto InstID = std::make_pair(InstDesc->getOpcode(), Subtarget); in shouldReplaceInst() 227 unsigned SCIdx = InstDesc->getSchedClass(); in shouldReplaceInst() 255 if (SchedModel.computeInstrLatency(InstDesc->getOpcode()) > ReplCost) in shouldReplaceInst()
|
/external/llvm/lib/Target/AMDGPU/ |
D | SIInstrInfo.cpp | 2005 const MCInstrDesc &InstDesc = MI.getDesc(); in isOperandLegal() local 2006 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx]; in isOperandLegal() 2027 } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) { in isOperandLegal() 2708 const MCInstrDesc &InstDesc = get(Opcode); in splitScalar64BitUnaryOp() local 2723 BuildMI(MBB, MII, DL, InstDesc, DestSub0) in splitScalar64BitUnaryOp() 2730 BuildMI(MBB, MII, DL, InstDesc, DestSub1) in splitScalar64BitUnaryOp() 2762 const MCInstrDesc &InstDesc = get(Opcode); in splitScalar64BitBinaryOp() local 2784 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0) in splitScalar64BitBinaryOp() 2794 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1) in splitScalar64BitBinaryOp() 2827 const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64); in splitScalar64BitBCNT() local [all …]
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/ |
D | SIInstrInfo.cpp | 2935 const MCInstrDesc &InstDesc = MI.getDesc(); in isImmOperandLegal() local 2936 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo]; in isImmOperandLegal() 2960 if (!isVOP3(MI) || !AMDGPU::isSISrcOperand(InstDesc, OpNo)) in isImmOperandLegal() 3962 const MCInstrDesc &InstDesc = MI.getDesc(); in isOperandLegal() local 3963 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx]; in isOperandLegal() 3987 usesConstantBus(MRI, Op, InstDesc.OpInfo[i])) { in isOperandLegal() 3992 } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) { in isOperandLegal() 3995 } else if (isVOP3(MI) && AMDGPU::isSISrcOperand(InstDesc, i) && in isOperandLegal() 3996 isLiteralConstantLike(Op, InstDesc.OpInfo[i])) { in isOperandLegal() 5307 const MCInstrDesc &InstDesc = get(Opcode); in splitScalar64BitUnaryOp() local [all …]
|
D | SIFoldOperands.cpp | 434 const MCInstrDesc &InstDesc = MI->getDesc(); in tryAddToFoldList() local 435 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo]; in tryAddToFoldList() 443 for (unsigned i = 0, e = InstDesc.getNumOperands(); i != e; ++i) { in tryAddToFoldList()
|
/external/llvm-project/llvm/lib/Target/AMDGPU/ |
D | SIInstrInfo.cpp | 3383 const MCInstrDesc &InstDesc = MI.getDesc(); in isImmOperandLegal() local 3384 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo]; in isImmOperandLegal() 3405 if (!isVOP3(MI) || !AMDGPU::isSISrcOperand(InstDesc, OpNo)) in isImmOperandLegal() 4452 const MCInstrDesc &InstDesc = MI.getDesc(); in isOperandLegal() local 4453 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx]; in isOperandLegal() 4476 usesConstantBus(MRI, Op, InstDesc.OpInfo[i])) { in isOperandLegal() 4481 } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) { in isOperandLegal() 4484 } else if (isVOP3(MI) && AMDGPU::isSISrcOperand(InstDesc, i) && in isOperandLegal() 4485 isLiteralConstantLike(Op, InstDesc.OpInfo[i])) { in isOperandLegal() 6039 const MCInstrDesc &InstDesc = get(Opcode); in splitScalar64BitUnaryOp() local [all …]
|
D | SIFoldOperands.cpp | 441 const MCInstrDesc &InstDesc = MI->getDesc(); in tryAddToFoldList() local 442 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo]; in tryAddToFoldList() 450 for (unsigned i = 0, e = InstDesc.getNumOperands(); i != e; ++i) { in tryAddToFoldList()
|
/external/llvm-project/llvm/lib/Target/X86/MCTargetDesc/ |
D | X86AsmBackend.cpp | 435 const MCInstrDesc &InstDesc = MCII->get(Jcc.getOpcode()); in isMacroFused() local 436 if (!InstDesc.isConditionalBranch()) in isMacroFused()
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/AsmParser/ |
D | AMDGPUAsmParser.cpp | 1701 const auto& InstDesc = AsmParser->getMII()->get(Inst.getOpcode()); in addLiteralImmOperand() local 1704 assert(AMDGPU::isSISrcOperand(InstDesc, OpNum)); in addLiteralImmOperand() 1707 assert(AMDGPU::isSISrcFPOperand(InstDesc, OpNum)); in addLiteralImmOperand() 1708 const unsigned Size = Imm.IsFPImm ? sizeof(double) : getOperandSize(InstDesc, OpNum); in addLiteralImmOperand() 1713 uint8_t OpTy = InstDesc.OpInfo[OpNum].OperandType; in addLiteralImmOperand() 1728 if (AMDGPU::isSISrcFPOperand(InstDesc, OpNum)) { // Expected 64-bit fp operand in addLiteralImmOperand()
|
/external/llvm-project/llvm/lib/Target/AMDGPU/AsmParser/ |
D | AMDGPUAsmParser.cpp | 1813 const auto& InstDesc = AsmParser->getMII()->get(Inst.getOpcode()); in addLiteralImmOperand() local 1816 assert(AMDGPU::isSISrcOperand(InstDesc, OpNum)); in addLiteralImmOperand() 1819 assert(AMDGPU::isSISrcFPOperand(InstDesc, OpNum)); in addLiteralImmOperand() 1820 const unsigned Size = Imm.IsFPImm ? sizeof(double) : getOperandSize(InstDesc, OpNum); in addLiteralImmOperand() 1825 uint8_t OpTy = InstDesc.OpInfo[OpNum].OperandType; in addLiteralImmOperand() 1841 if (AMDGPU::isSISrcFPOperand(InstDesc, OpNum)) { // Expected 64-bit fp operand in addLiteralImmOperand()
|