1 //===- TargetTransformInfoImpl.h --------------------------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file provides helpers for the implementation of 11 /// a TargetTransformInfo-conforming class. 12 /// 13 //===----------------------------------------------------------------------===// 14 15 #ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H 16 #define LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H 17 18 #include "llvm/Analysis/TargetTransformInfo.h" 19 #include "llvm/IR/CallSite.h" 20 #include "llvm/IR/DataLayout.h" 21 #include "llvm/IR/Function.h" 22 #include "llvm/IR/GetElementPtrTypeIterator.h" 23 #include "llvm/IR/Operator.h" 24 #include "llvm/IR/Type.h" 25 #include "llvm/Analysis/VectorUtils.h" 26 27 namespace llvm { 28 29 /// \brief Base class for use as a mix-in that aids implementing 30 /// a TargetTransformInfo-compatible class. 31 class TargetTransformInfoImplBase { 32 protected: 33 typedef TargetTransformInfo TTI; 34 35 const DataLayout &DL; 36 TargetTransformInfoImplBase(const DataLayout & DL)37 explicit TargetTransformInfoImplBase(const DataLayout &DL) : DL(DL) {} 38 39 public: 40 // Provide value semantics. MSVC requires that we spell all of these out. TargetTransformInfoImplBase(const TargetTransformInfoImplBase & Arg)41 TargetTransformInfoImplBase(const TargetTransformInfoImplBase &Arg) 42 : DL(Arg.DL) {} TargetTransformInfoImplBase(TargetTransformInfoImplBase && Arg)43 TargetTransformInfoImplBase(TargetTransformInfoImplBase &&Arg) : DL(Arg.DL) {} 44 getDataLayout()45 const DataLayout &getDataLayout() const { return DL; } 46 getOperationCost(unsigned Opcode,Type * Ty,Type * OpTy)47 unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) { 48 switch (Opcode) { 49 default: 50 // By default, just classify everything as 'basic'. 51 return TTI::TCC_Basic; 52 53 case Instruction::GetElementPtr: 54 llvm_unreachable("Use getGEPCost for GEP operations!"); 55 56 case Instruction::BitCast: 57 assert(OpTy && "Cast instructions must provide the operand type"); 58 if (Ty == OpTy || (Ty->isPointerTy() && OpTy->isPointerTy())) 59 // Identity and pointer-to-pointer casts are free. 60 return TTI::TCC_Free; 61 62 // Otherwise, the default basic cost is used. 63 return TTI::TCC_Basic; 64 65 case Instruction::FDiv: 66 case Instruction::FRem: 67 case Instruction::SDiv: 68 case Instruction::SRem: 69 case Instruction::UDiv: 70 case Instruction::URem: 71 return TTI::TCC_Expensive; 72 73 case Instruction::IntToPtr: { 74 // An inttoptr cast is free so long as the input is a legal integer type 75 // which doesn't contain values outside the range of a pointer. 76 unsigned OpSize = OpTy->getScalarSizeInBits(); 77 if (DL.isLegalInteger(OpSize) && 78 OpSize <= DL.getPointerTypeSizeInBits(Ty)) 79 return TTI::TCC_Free; 80 81 // Otherwise it's not a no-op. 82 return TTI::TCC_Basic; 83 } 84 case Instruction::PtrToInt: { 85 // A ptrtoint cast is free so long as the result is large enough to store 86 // the pointer, and a legal integer type. 87 unsigned DestSize = Ty->getScalarSizeInBits(); 88 if (DL.isLegalInteger(DestSize) && 89 DestSize >= DL.getPointerTypeSizeInBits(OpTy)) 90 return TTI::TCC_Free; 91 92 // Otherwise it's not a no-op. 93 return TTI::TCC_Basic; 94 } 95 case Instruction::Trunc: 96 // trunc to a native type is free (assuming the target has compare and 97 // shift-right of the same width). 98 if (DL.isLegalInteger(DL.getTypeSizeInBits(Ty))) 99 return TTI::TCC_Free; 100 101 return TTI::TCC_Basic; 102 } 103 } 104 getGEPCost(Type * PointeeType,const Value * Ptr,ArrayRef<const Value * > Operands)105 int getGEPCost(Type *PointeeType, const Value *Ptr, 106 ArrayRef<const Value *> Operands) { 107 // In the basic model, we just assume that all-constant GEPs will be folded 108 // into their uses via addressing modes. 109 for (unsigned Idx = 0, Size = Operands.size(); Idx != Size; ++Idx) 110 if (!isa<Constant>(Operands[Idx])) 111 return TTI::TCC_Basic; 112 113 return TTI::TCC_Free; 114 } 115 getCallCost(FunctionType * FTy,int NumArgs)116 unsigned getCallCost(FunctionType *FTy, int NumArgs) { 117 assert(FTy && "FunctionType must be provided to this routine."); 118 119 // The target-independent implementation just measures the size of the 120 // function by approximating that each argument will take on average one 121 // instruction to prepare. 122 123 if (NumArgs < 0) 124 // Set the argument number to the number of explicit arguments in the 125 // function. 126 NumArgs = FTy->getNumParams(); 127 128 return TTI::TCC_Basic * (NumArgs + 1); 129 } 130 getInliningThresholdMultiplier()131 unsigned getInliningThresholdMultiplier() { return 1; } 132 getIntrinsicCost(Intrinsic::ID IID,Type * RetTy,ArrayRef<Type * > ParamTys)133 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy, 134 ArrayRef<Type *> ParamTys) { 135 switch (IID) { 136 default: 137 // Intrinsics rarely (if ever) have normal argument setup constraints. 138 // Model them as having a basic instruction cost. 139 // FIXME: This is wrong for libc intrinsics. 140 return TTI::TCC_Basic; 141 142 case Intrinsic::annotation: 143 case Intrinsic::assume: 144 case Intrinsic::dbg_declare: 145 case Intrinsic::dbg_value: 146 case Intrinsic::invariant_start: 147 case Intrinsic::invariant_end: 148 case Intrinsic::lifetime_start: 149 case Intrinsic::lifetime_end: 150 case Intrinsic::objectsize: 151 case Intrinsic::ptr_annotation: 152 case Intrinsic::var_annotation: 153 case Intrinsic::experimental_gc_result: 154 case Intrinsic::experimental_gc_relocate: 155 // These intrinsics don't actually represent code after lowering. 156 return TTI::TCC_Free; 157 } 158 } 159 hasBranchDivergence()160 bool hasBranchDivergence() { return false; } 161 isSourceOfDivergence(const Value * V)162 bool isSourceOfDivergence(const Value *V) { return false; } 163 isLoweredToCall(const Function * F)164 bool isLoweredToCall(const Function *F) { 165 // FIXME: These should almost certainly not be handled here, and instead 166 // handled with the help of TLI or the target itself. This was largely 167 // ported from existing analysis heuristics here so that such refactorings 168 // can take place in the future. 169 170 if (F->isIntrinsic()) 171 return false; 172 173 if (F->hasLocalLinkage() || !F->hasName()) 174 return true; 175 176 StringRef Name = F->getName(); 177 178 // These will all likely lower to a single selection DAG node. 179 if (Name == "copysign" || Name == "copysignf" || Name == "copysignl" || 180 Name == "fabs" || Name == "fabsf" || Name == "fabsl" || Name == "sin" || 181 Name == "fmin" || Name == "fminf" || Name == "fminl" || 182 Name == "fmax" || Name == "fmaxf" || Name == "fmaxl" || 183 Name == "sinf" || Name == "sinl" || Name == "cos" || Name == "cosf" || 184 Name == "cosl" || Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl") 185 return false; 186 187 // These are all likely to be optimized into something smaller. 188 if (Name == "pow" || Name == "powf" || Name == "powl" || Name == "exp2" || 189 Name == "exp2l" || Name == "exp2f" || Name == "floor" || 190 Name == "floorf" || Name == "ceil" || Name == "round" || 191 Name == "ffs" || Name == "ffsl" || Name == "abs" || Name == "labs" || 192 Name == "llabs") 193 return false; 194 195 return true; 196 } 197 getUnrollingPreferences(Loop *,TTI::UnrollingPreferences &)198 void getUnrollingPreferences(Loop *, TTI::UnrollingPreferences &) {} 199 isLegalAddImmediate(int64_t Imm)200 bool isLegalAddImmediate(int64_t Imm) { return false; } 201 isLegalICmpImmediate(int64_t Imm)202 bool isLegalICmpImmediate(int64_t Imm) { return false; } 203 isLegalAddressingMode(Type * Ty,GlobalValue * BaseGV,int64_t BaseOffset,bool HasBaseReg,int64_t Scale,unsigned AddrSpace)204 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, 205 bool HasBaseReg, int64_t Scale, 206 unsigned AddrSpace) { 207 // Guess that only reg and reg+reg addressing is allowed. This heuristic is 208 // taken from the implementation of LSR. 209 return !BaseGV && BaseOffset == 0 && (Scale == 0 || Scale == 1); 210 } 211 isLegalMaskedStore(Type * DataType)212 bool isLegalMaskedStore(Type *DataType) { return false; } 213 isLegalMaskedLoad(Type * DataType)214 bool isLegalMaskedLoad(Type *DataType) { return false; } 215 isLegalMaskedScatter(Type * DataType)216 bool isLegalMaskedScatter(Type *DataType) { return false; } 217 isLegalMaskedGather(Type * DataType)218 bool isLegalMaskedGather(Type *DataType) { return false; } 219 getScalingFactorCost(Type * Ty,GlobalValue * BaseGV,int64_t BaseOffset,bool HasBaseReg,int64_t Scale,unsigned AddrSpace)220 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, 221 bool HasBaseReg, int64_t Scale, unsigned AddrSpace) { 222 // Guess that all legal addressing mode are free. 223 if (isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg, 224 Scale, AddrSpace)) 225 return 0; 226 return -1; 227 } 228 isTruncateFree(Type * Ty1,Type * Ty2)229 bool isTruncateFree(Type *Ty1, Type *Ty2) { return false; } 230 isProfitableToHoist(Instruction * I)231 bool isProfitableToHoist(Instruction *I) { return true; } 232 isTypeLegal(Type * Ty)233 bool isTypeLegal(Type *Ty) { return false; } 234 getJumpBufAlignment()235 unsigned getJumpBufAlignment() { return 0; } 236 getJumpBufSize()237 unsigned getJumpBufSize() { return 0; } 238 shouldBuildLookupTables()239 bool shouldBuildLookupTables() { return true; } 240 enableAggressiveInterleaving(bool LoopHasReductions)241 bool enableAggressiveInterleaving(bool LoopHasReductions) { return false; } 242 enableInterleavedAccessVectorization()243 bool enableInterleavedAccessVectorization() { return false; } 244 isFPVectorizationPotentiallyUnsafe()245 bool isFPVectorizationPotentiallyUnsafe() { return false; } 246 allowsMisalignedMemoryAccesses(unsigned BitWidth,unsigned AddressSpace,unsigned Alignment,bool * Fast)247 bool allowsMisalignedMemoryAccesses(unsigned BitWidth, 248 unsigned AddressSpace, 249 unsigned Alignment, 250 bool *Fast) { return false; } 251 getPopcntSupport(unsigned IntTyWidthInBit)252 TTI::PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) { 253 return TTI::PSK_Software; 254 } 255 haveFastSqrt(Type * Ty)256 bool haveFastSqrt(Type *Ty) { return false; } 257 getFPOpCost(Type * Ty)258 unsigned getFPOpCost(Type *Ty) { return TargetTransformInfo::TCC_Basic; } 259 getIntImmCodeSizeCost(unsigned Opcode,unsigned Idx,const APInt & Imm,Type * Ty)260 int getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, const APInt &Imm, 261 Type *Ty) { 262 return 0; 263 } 264 getIntImmCost(const APInt & Imm,Type * Ty)265 unsigned getIntImmCost(const APInt &Imm, Type *Ty) { return TTI::TCC_Basic; } 266 getIntImmCost(unsigned Opcode,unsigned Idx,const APInt & Imm,Type * Ty)267 unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, 268 Type *Ty) { 269 return TTI::TCC_Free; 270 } 271 getIntImmCost(Intrinsic::ID IID,unsigned Idx,const APInt & Imm,Type * Ty)272 unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, 273 Type *Ty) { 274 return TTI::TCC_Free; 275 } 276 getNumberOfRegisters(bool Vector)277 unsigned getNumberOfRegisters(bool Vector) { return 8; } 278 getRegisterBitWidth(bool Vector)279 unsigned getRegisterBitWidth(bool Vector) { return 32; } 280 getLoadStoreVecRegBitWidth(unsigned AddrSpace)281 unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) { return 128; } 282 getCacheLineSize()283 unsigned getCacheLineSize() { return 0; } 284 getPrefetchDistance()285 unsigned getPrefetchDistance() { return 0; } 286 getMinPrefetchStride()287 unsigned getMinPrefetchStride() { return 1; } 288 getMaxPrefetchIterationsAhead()289 unsigned getMaxPrefetchIterationsAhead() { return UINT_MAX; } 290 getMaxInterleaveFactor(unsigned VF)291 unsigned getMaxInterleaveFactor(unsigned VF) { return 1; } 292 getArithmeticInstrCost(unsigned Opcode,Type * Ty,TTI::OperandValueKind Opd1Info,TTI::OperandValueKind Opd2Info,TTI::OperandValueProperties Opd1PropInfo,TTI::OperandValueProperties Opd2PropInfo)293 unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, 294 TTI::OperandValueKind Opd1Info, 295 TTI::OperandValueKind Opd2Info, 296 TTI::OperandValueProperties Opd1PropInfo, 297 TTI::OperandValueProperties Opd2PropInfo) { 298 return 1; 299 } 300 getShuffleCost(TTI::ShuffleKind Kind,Type * Ty,int Index,Type * SubTp)301 unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Ty, int Index, 302 Type *SubTp) { 303 return 1; 304 } 305 getCastInstrCost(unsigned Opcode,Type * Dst,Type * Src)306 unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) { return 1; } 307 getExtractWithExtendCost(unsigned Opcode,Type * Dst,VectorType * VecTy,unsigned Index)308 unsigned getExtractWithExtendCost(unsigned Opcode, Type *Dst, 309 VectorType *VecTy, unsigned Index) { 310 return 1; 311 } 312 getCFInstrCost(unsigned Opcode)313 unsigned getCFInstrCost(unsigned Opcode) { return 1; } 314 getCmpSelInstrCost(unsigned Opcode,Type * ValTy,Type * CondTy)315 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) { 316 return 1; 317 } 318 getVectorInstrCost(unsigned Opcode,Type * Val,unsigned Index)319 unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) { 320 return 1; 321 } 322 getMemoryOpCost(unsigned Opcode,Type * Src,unsigned Alignment,unsigned AddressSpace)323 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 324 unsigned AddressSpace) { 325 return 1; 326 } 327 getMaskedMemoryOpCost(unsigned Opcode,Type * Src,unsigned Alignment,unsigned AddressSpace)328 unsigned getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 329 unsigned AddressSpace) { 330 return 1; 331 } 332 getGatherScatterOpCost(unsigned Opcode,Type * DataTy,Value * Ptr,bool VariableMask,unsigned Alignment)333 unsigned getGatherScatterOpCost(unsigned Opcode, Type *DataTy, Value *Ptr, 334 bool VariableMask, 335 unsigned Alignment) { 336 return 1; 337 } 338 getInterleavedMemoryOpCost(unsigned Opcode,Type * VecTy,unsigned Factor,ArrayRef<unsigned> Indices,unsigned Alignment,unsigned AddressSpace)339 unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, 340 unsigned Factor, 341 ArrayRef<unsigned> Indices, 342 unsigned Alignment, 343 unsigned AddressSpace) { 344 return 1; 345 } 346 getIntrinsicInstrCost(Intrinsic::ID ID,Type * RetTy,ArrayRef<Type * > Tys,FastMathFlags FMF)347 unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy, 348 ArrayRef<Type *> Tys, FastMathFlags FMF) { 349 return 1; 350 } getIntrinsicInstrCost(Intrinsic::ID ID,Type * RetTy,ArrayRef<Value * > Args,FastMathFlags FMF)351 unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy, 352 ArrayRef<Value *> Args, FastMathFlags FMF) { 353 return 1; 354 } 355 getCallInstrCost(Function * F,Type * RetTy,ArrayRef<Type * > Tys)356 unsigned getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) { 357 return 1; 358 } 359 getNumberOfParts(Type * Tp)360 unsigned getNumberOfParts(Type *Tp) { return 0; } 361 getAddressComputationCost(Type * Tp,bool)362 unsigned getAddressComputationCost(Type *Tp, bool) { return 0; } 363 getReductionCost(unsigned,Type *,bool)364 unsigned getReductionCost(unsigned, Type *, bool) { return 1; } 365 getCostOfKeepingLiveOverCall(ArrayRef<Type * > Tys)366 unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) { return 0; } 367 getTgtMemIntrinsic(IntrinsicInst * Inst,MemIntrinsicInfo & Info)368 bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) { 369 return false; 370 } 371 getOrCreateResultFromMemIntrinsic(IntrinsicInst * Inst,Type * ExpectedType)372 Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, 373 Type *ExpectedType) { 374 return nullptr; 375 } 376 areInlineCompatible(const Function * Caller,const Function * Callee)377 bool areInlineCompatible(const Function *Caller, 378 const Function *Callee) const { 379 return (Caller->getFnAttribute("target-cpu") == 380 Callee->getFnAttribute("target-cpu")) && 381 (Caller->getFnAttribute("target-features") == 382 Callee->getFnAttribute("target-features")); 383 } 384 }; 385 386 /// \brief CRTP base class for use as a mix-in that aids implementing 387 /// a TargetTransformInfo-compatible class. 388 template <typename T> 389 class TargetTransformInfoImplCRTPBase : public TargetTransformInfoImplBase { 390 private: 391 typedef TargetTransformInfoImplBase BaseT; 392 393 protected: TargetTransformInfoImplCRTPBase(const DataLayout & DL)394 explicit TargetTransformInfoImplCRTPBase(const DataLayout &DL) : BaseT(DL) {} 395 396 public: 397 // Provide value semantics. MSVC requires that we spell all of these out. TargetTransformInfoImplCRTPBase(const TargetTransformInfoImplCRTPBase & Arg)398 TargetTransformInfoImplCRTPBase(const TargetTransformInfoImplCRTPBase &Arg) 399 : BaseT(static_cast<const BaseT &>(Arg)) {} TargetTransformInfoImplCRTPBase(TargetTransformInfoImplCRTPBase && Arg)400 TargetTransformInfoImplCRTPBase(TargetTransformInfoImplCRTPBase &&Arg) 401 : BaseT(std::move(static_cast<BaseT &>(Arg))) {} 402 403 using BaseT::getCallCost; 404 getCallCost(const Function * F,int NumArgs)405 unsigned getCallCost(const Function *F, int NumArgs) { 406 assert(F && "A concrete function must be provided to this routine."); 407 408 if (NumArgs < 0) 409 // Set the argument number to the number of explicit arguments in the 410 // function. 411 NumArgs = F->arg_size(); 412 413 if (Intrinsic::ID IID = F->getIntrinsicID()) { 414 FunctionType *FTy = F->getFunctionType(); 415 SmallVector<Type *, 8> ParamTys(FTy->param_begin(), FTy->param_end()); 416 return static_cast<T *>(this) 417 ->getIntrinsicCost(IID, FTy->getReturnType(), ParamTys); 418 } 419 420 if (!static_cast<T *>(this)->isLoweredToCall(F)) 421 return TTI::TCC_Basic; // Give a basic cost if it will be lowered 422 // directly. 423 424 return static_cast<T *>(this)->getCallCost(F->getFunctionType(), NumArgs); 425 } 426 getCallCost(const Function * F,ArrayRef<const Value * > Arguments)427 unsigned getCallCost(const Function *F, ArrayRef<const Value *> Arguments) { 428 // Simply delegate to generic handling of the call. 429 // FIXME: We should use instsimplify or something else to catch calls which 430 // will constant fold with these arguments. 431 return static_cast<T *>(this)->getCallCost(F, Arguments.size()); 432 } 433 434 using BaseT::getGEPCost; 435 getGEPCost(Type * PointeeType,const Value * Ptr,ArrayRef<const Value * > Operands)436 int getGEPCost(Type *PointeeType, const Value *Ptr, 437 ArrayRef<const Value *> Operands) { 438 const GlobalValue *BaseGV = nullptr; 439 if (Ptr != nullptr) { 440 // TODO: will remove this when pointers have an opaque type. 441 assert(Ptr->getType()->getScalarType()->getPointerElementType() == 442 PointeeType && 443 "explicit pointee type doesn't match operand's pointee type"); 444 BaseGV = dyn_cast<GlobalValue>(Ptr->stripPointerCasts()); 445 } 446 bool HasBaseReg = (BaseGV == nullptr); 447 int64_t BaseOffset = 0; 448 int64_t Scale = 0; 449 450 // Assumes the address space is 0 when Ptr is nullptr. 451 unsigned AS = 452 (Ptr == nullptr ? 0 : Ptr->getType()->getPointerAddressSpace()); 453 auto GTI = gep_type_begin(PointeeType, AS, Operands); 454 for (auto I = Operands.begin(); I != Operands.end(); ++I, ++GTI) { 455 // We assume that the cost of Scalar GEP with constant index and the 456 // cost of Vector GEP with splat constant index are the same. 457 const ConstantInt *ConstIdx = dyn_cast<ConstantInt>(*I); 458 if (!ConstIdx) 459 if (auto Splat = getSplatValue(*I)) 460 ConstIdx = dyn_cast<ConstantInt>(Splat); 461 if (isa<SequentialType>(*GTI)) { 462 int64_t ElementSize = DL.getTypeAllocSize(GTI.getIndexedType()); 463 if (ConstIdx) 464 BaseOffset += ConstIdx->getSExtValue() * ElementSize; 465 else { 466 // Needs scale register. 467 if (Scale != 0) 468 // No addressing mode takes two scale registers. 469 return TTI::TCC_Basic; 470 Scale = ElementSize; 471 } 472 } else { 473 StructType *STy = cast<StructType>(*GTI); 474 // For structures the index is always splat or scalar constant 475 assert(ConstIdx && "Unexpected GEP index"); 476 uint64_t Field = ConstIdx->getZExtValue(); 477 BaseOffset += DL.getStructLayout(STy)->getElementOffset(Field); 478 } 479 } 480 481 if (static_cast<T *>(this)->isLegalAddressingMode( 482 PointerType::get(*GTI, AS), const_cast<GlobalValue *>(BaseGV), 483 BaseOffset, HasBaseReg, Scale, AS)) { 484 return TTI::TCC_Free; 485 } 486 return TTI::TCC_Basic; 487 } 488 489 using BaseT::getIntrinsicCost; 490 getIntrinsicCost(Intrinsic::ID IID,Type * RetTy,ArrayRef<const Value * > Arguments)491 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy, 492 ArrayRef<const Value *> Arguments) { 493 // Delegate to the generic intrinsic handling code. This mostly provides an 494 // opportunity for targets to (for example) special case the cost of 495 // certain intrinsics based on constants used as arguments. 496 SmallVector<Type *, 8> ParamTys; 497 ParamTys.reserve(Arguments.size()); 498 for (unsigned Idx = 0, Size = Arguments.size(); Idx != Size; ++Idx) 499 ParamTys.push_back(Arguments[Idx]->getType()); 500 return static_cast<T *>(this)->getIntrinsicCost(IID, RetTy, ParamTys); 501 } 502 getUserCost(const User * U)503 unsigned getUserCost(const User *U) { 504 if (isa<PHINode>(U)) 505 return TTI::TCC_Free; // Model all PHI nodes as free. 506 507 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(U)) { 508 SmallVector<Value *, 4> Indices(GEP->idx_begin(), GEP->idx_end()); 509 return static_cast<T *>(this)->getGEPCost( 510 GEP->getSourceElementType(), GEP->getPointerOperand(), Indices); 511 } 512 513 if (auto CS = ImmutableCallSite(U)) { 514 const Function *F = CS.getCalledFunction(); 515 if (!F) { 516 // Just use the called value type. 517 Type *FTy = CS.getCalledValue()->getType()->getPointerElementType(); 518 return static_cast<T *>(this) 519 ->getCallCost(cast<FunctionType>(FTy), CS.arg_size()); 520 } 521 522 SmallVector<const Value *, 8> Arguments(CS.arg_begin(), CS.arg_end()); 523 return static_cast<T *>(this)->getCallCost(F, Arguments); 524 } 525 526 if (const CastInst *CI = dyn_cast<CastInst>(U)) { 527 // Result of a cmp instruction is often extended (to be used by other 528 // cmp instructions, logical or return instructions). These are usually 529 // nop on most sane targets. 530 if (isa<CmpInst>(CI->getOperand(0))) 531 return TTI::TCC_Free; 532 } 533 534 return static_cast<T *>(this)->getOperationCost( 535 Operator::getOpcode(U), U->getType(), 536 U->getNumOperands() == 1 ? U->getOperand(0)->getType() : nullptr); 537 } 538 }; 539 } 540 541 #endif 542