1 //===- HexagonTargetTransformInfo.cpp - Hexagon specific TTI pass ---------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 /// \file
9 /// This file implements a TargetTransformInfo analysis pass specific to the
10 /// Hexagon target machine. It uses the target's detailed information to provide
11 /// more precise answers to certain TTI queries, while letting the target
12 /// independent and default TTI implementations handle the rest.
13 ///
14 //===----------------------------------------------------------------------===//
15
16 #include "HexagonTargetTransformInfo.h"
17 #include "HexagonSubtarget.h"
18 #include "llvm/Analysis/TargetTransformInfo.h"
19 #include "llvm/CodeGen/ValueTypes.h"
20 #include "llvm/IR/InstrTypes.h"
21 #include "llvm/IR/Instructions.h"
22 #include "llvm/IR/User.h"
23 #include "llvm/Support/Casting.h"
24 #include "llvm/Support/CommandLine.h"
25 #include "llvm/Transforms/Utils/UnrollLoop.h"
26
27 using namespace llvm;
28
29 #define DEBUG_TYPE "hexagontti"
30
31 static cl::opt<bool> HexagonAutoHVX("hexagon-autohvx", cl::init(false),
32 cl::Hidden, cl::desc("Enable loop vectorizer for HVX"));
33
34 static cl::opt<bool> EmitLookupTables("hexagon-emit-lookup-tables",
35 cl::init(true), cl::Hidden,
36 cl::desc("Control lookup table emission on Hexagon target"));
37
38 // Constant "cost factor" to make floating point operations more expensive
39 // in terms of vectorization cost. This isn't the best way, but it should
40 // do. Ultimately, the cost should use cycles.
41 static const unsigned FloatFactor = 4;
42
useHVX() const43 bool HexagonTTIImpl::useHVX() const {
44 return ST.useHVXOps() && HexagonAutoHVX;
45 }
46
isTypeForHVX(Type * VecTy) const47 bool HexagonTTIImpl::isTypeForHVX(Type *VecTy) const {
48 assert(VecTy->isVectorTy());
49 // Avoid types like <2 x i32*>.
50 if (!cast<VectorType>(VecTy)->getElementType()->isIntegerTy())
51 return false;
52 EVT VecVT = EVT::getEVT(VecTy);
53 if (!VecVT.isSimple() || VecVT.getSizeInBits() <= 64)
54 return false;
55 if (ST.isHVXVectorType(VecVT.getSimpleVT()))
56 return true;
57 auto Action = TLI.getPreferredVectorAction(VecVT);
58 return Action == TargetLoweringBase::TypeWidenVector;
59 }
60
getTypeNumElements(Type * Ty) const61 unsigned HexagonTTIImpl::getTypeNumElements(Type *Ty) const {
62 if (Ty->isVectorTy())
63 return Ty->getVectorNumElements();
64 assert((Ty->isIntegerTy() || Ty->isFloatingPointTy()) &&
65 "Expecting scalar type");
66 return 1;
67 }
68
69 TargetTransformInfo::PopcntSupportKind
getPopcntSupport(unsigned IntTyWidthInBit) const70 HexagonTTIImpl::getPopcntSupport(unsigned IntTyWidthInBit) const {
71 // Return fast hardware support as every input < 64 bits will be promoted
72 // to 64 bits.
73 return TargetTransformInfo::PSK_FastHardware;
74 }
75
76 // The Hexagon target can unroll loops with run-time trip counts.
getUnrollingPreferences(Loop * L,ScalarEvolution & SE,TTI::UnrollingPreferences & UP)77 void HexagonTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
78 TTI::UnrollingPreferences &UP) {
79 UP.Runtime = UP.Partial = true;
80 // Only try to peel innermost loops with small runtime trip counts.
81 if (L && L->empty() && canPeel(L) &&
82 SE.getSmallConstantTripCount(L) == 0 &&
83 SE.getSmallConstantMaxTripCount(L) > 0 &&
84 SE.getSmallConstantMaxTripCount(L) <= 5) {
85 UP.PeelCount = 2;
86 }
87 }
88
shouldFavorPostInc() const89 bool HexagonTTIImpl::shouldFavorPostInc() const {
90 return true;
91 }
92
93 /// --- Vector TTI begin ---
94
getNumberOfRegisters(bool Vector) const95 unsigned HexagonTTIImpl::getNumberOfRegisters(bool Vector) const {
96 if (Vector)
97 return useHVX() ? 32 : 0;
98 return 32;
99 }
100
getMaxInterleaveFactor(unsigned VF)101 unsigned HexagonTTIImpl::getMaxInterleaveFactor(unsigned VF) {
102 return useHVX() ? 2 : 0;
103 }
104
getRegisterBitWidth(bool Vector) const105 unsigned HexagonTTIImpl::getRegisterBitWidth(bool Vector) const {
106 return Vector ? getMinVectorRegisterBitWidth() : 32;
107 }
108
getMinVectorRegisterBitWidth() const109 unsigned HexagonTTIImpl::getMinVectorRegisterBitWidth() const {
110 return useHVX() ? ST.getVectorLength()*8 : 0;
111 }
112
getMinimumVF(unsigned ElemWidth) const113 unsigned HexagonTTIImpl::getMinimumVF(unsigned ElemWidth) const {
114 return (8 * ST.getVectorLength()) / ElemWidth;
115 }
116
getScalarizationOverhead(Type * Ty,bool Insert,bool Extract)117 unsigned HexagonTTIImpl::getScalarizationOverhead(Type *Ty, bool Insert,
118 bool Extract) {
119 return BaseT::getScalarizationOverhead(Ty, Insert, Extract);
120 }
121
getOperandsScalarizationOverhead(ArrayRef<const Value * > Args,unsigned VF)122 unsigned HexagonTTIImpl::getOperandsScalarizationOverhead(
123 ArrayRef<const Value*> Args, unsigned VF) {
124 return BaseT::getOperandsScalarizationOverhead(Args, VF);
125 }
126
getCallInstrCost(Function * F,Type * RetTy,ArrayRef<Type * > Tys)127 unsigned HexagonTTIImpl::getCallInstrCost(Function *F, Type *RetTy,
128 ArrayRef<Type*> Tys) {
129 return BaseT::getCallInstrCost(F, RetTy, Tys);
130 }
131
getIntrinsicInstrCost(Intrinsic::ID ID,Type * RetTy,ArrayRef<Value * > Args,FastMathFlags FMF,unsigned VF)132 unsigned HexagonTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
133 ArrayRef<Value*> Args, FastMathFlags FMF, unsigned VF) {
134 return BaseT::getIntrinsicInstrCost(ID, RetTy, Args, FMF, VF);
135 }
136
getIntrinsicInstrCost(Intrinsic::ID ID,Type * RetTy,ArrayRef<Type * > Tys,FastMathFlags FMF,unsigned ScalarizationCostPassed)137 unsigned HexagonTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
138 ArrayRef<Type*> Tys, FastMathFlags FMF,
139 unsigned ScalarizationCostPassed) {
140 if (ID == Intrinsic::bswap) {
141 std::pair<int, MVT> LT = TLI.getTypeLegalizationCost(DL, RetTy);
142 return LT.first + 2;
143 }
144 return BaseT::getIntrinsicInstrCost(ID, RetTy, Tys, FMF,
145 ScalarizationCostPassed);
146 }
147
getAddressComputationCost(Type * Tp,ScalarEvolution * SE,const SCEV * S)148 unsigned HexagonTTIImpl::getAddressComputationCost(Type *Tp,
149 ScalarEvolution *SE, const SCEV *S) {
150 return 0;
151 }
152
getMemoryOpCost(unsigned Opcode,Type * Src,unsigned Alignment,unsigned AddressSpace,const Instruction * I)153 unsigned HexagonTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
154 unsigned Alignment, unsigned AddressSpace, const Instruction *I) {
155 assert(Opcode == Instruction::Load || Opcode == Instruction::Store);
156 if (Opcode == Instruction::Store)
157 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, I);
158
159 if (Src->isVectorTy()) {
160 VectorType *VecTy = cast<VectorType>(Src);
161 unsigned VecWidth = VecTy->getBitWidth();
162 if (useHVX() && isTypeForHVX(VecTy)) {
163 unsigned RegWidth = getRegisterBitWidth(true);
164 Alignment = std::min(Alignment, RegWidth/8);
165 // Cost of HVX loads.
166 if (VecWidth % RegWidth == 0)
167 return VecWidth / RegWidth;
168 // Cost of constructing HVX vector from scalar loads.
169 unsigned AlignWidth = 8 * std::max(1u, Alignment);
170 unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
171 return 3*NumLoads;
172 }
173
174 // Non-HVX vectors.
175 // Add extra cost for floating point types.
176 unsigned Cost = VecTy->getElementType()->isFloatingPointTy() ? FloatFactor
177 : 1;
178 Alignment = std::min(Alignment, 8u);
179 unsigned AlignWidth = 8 * std::max(1u, Alignment);
180 unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
181 if (Alignment == 4 || Alignment == 8)
182 return Cost * NumLoads;
183 // Loads of less than 32 bits will need extra inserts to compose a vector.
184 unsigned LogA = Log2_32(Alignment);
185 return (3 - LogA) * Cost * NumLoads;
186 }
187
188 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, I);
189 }
190
getMaskedMemoryOpCost(unsigned Opcode,Type * Src,unsigned Alignment,unsigned AddressSpace)191 unsigned HexagonTTIImpl::getMaskedMemoryOpCost(unsigned Opcode,
192 Type *Src, unsigned Alignment, unsigned AddressSpace) {
193 return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
194 }
195
getShuffleCost(TTI::ShuffleKind Kind,Type * Tp,int Index,Type * SubTp)196 unsigned HexagonTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp,
197 int Index, Type *SubTp) {
198 return 1;
199 }
200
getGatherScatterOpCost(unsigned Opcode,Type * DataTy,Value * Ptr,bool VariableMask,unsigned Alignment)201 unsigned HexagonTTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
202 Value *Ptr, bool VariableMask, unsigned Alignment) {
203 return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
204 Alignment);
205 }
206
getInterleavedMemoryOpCost(unsigned Opcode,Type * VecTy,unsigned Factor,ArrayRef<unsigned> Indices,unsigned Alignment,unsigned AddressSpace)207 unsigned HexagonTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode,
208 Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
209 unsigned Alignment, unsigned AddressSpace) {
210 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
211 Alignment, AddressSpace);
212 }
213
getCmpSelInstrCost(unsigned Opcode,Type * ValTy,Type * CondTy,const Instruction * I)214 unsigned HexagonTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
215 Type *CondTy, const Instruction *I) {
216 if (ValTy->isVectorTy()) {
217 std::pair<int, MVT> LT = TLI.getTypeLegalizationCost(DL, ValTy);
218 if (Opcode == Instruction::FCmp)
219 return LT.first + FloatFactor * getTypeNumElements(ValTy);
220 }
221 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I);
222 }
223
getArithmeticInstrCost(unsigned Opcode,Type * Ty,TTI::OperandValueKind Opd1Info,TTI::OperandValueKind Opd2Info,TTI::OperandValueProperties Opd1PropInfo,TTI::OperandValueProperties Opd2PropInfo,ArrayRef<const Value * > Args)224 unsigned HexagonTTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
225 TTI::OperandValueKind Opd1Info, TTI::OperandValueKind Opd2Info,
226 TTI::OperandValueProperties Opd1PropInfo,
227 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value*> Args) {
228 if (Ty->isVectorTy()) {
229 std::pair<int, MVT> LT = TLI.getTypeLegalizationCost(DL, Ty);
230 if (LT.second.isFloatingPoint())
231 return LT.first + FloatFactor * getTypeNumElements(Ty);
232 }
233 return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
234 Opd1PropInfo, Opd2PropInfo, Args);
235 }
236
getCastInstrCost(unsigned Opcode,Type * DstTy,Type * SrcTy,const Instruction * I)237 unsigned HexagonTTIImpl::getCastInstrCost(unsigned Opcode, Type *DstTy,
238 Type *SrcTy, const Instruction *I) {
239 if (SrcTy->isFPOrFPVectorTy() || DstTy->isFPOrFPVectorTy()) {
240 unsigned SrcN = SrcTy->isFPOrFPVectorTy() ? getTypeNumElements(SrcTy) : 0;
241 unsigned DstN = DstTy->isFPOrFPVectorTy() ? getTypeNumElements(DstTy) : 0;
242
243 std::pair<int, MVT> SrcLT = TLI.getTypeLegalizationCost(DL, SrcTy);
244 std::pair<int, MVT> DstLT = TLI.getTypeLegalizationCost(DL, DstTy);
245 return std::max(SrcLT.first, DstLT.first) + FloatFactor * (SrcN + DstN);
246 }
247 return 1;
248 }
249
getVectorInstrCost(unsigned Opcode,Type * Val,unsigned Index)250 unsigned HexagonTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
251 unsigned Index) {
252 Type *ElemTy = Val->isVectorTy() ? cast<VectorType>(Val)->getElementType()
253 : Val;
254 if (Opcode == Instruction::InsertElement) {
255 // Need two rotations for non-zero index.
256 unsigned Cost = (Index != 0) ? 2 : 0;
257 if (ElemTy->isIntegerTy(32))
258 return Cost;
259 // If it's not a 32-bit value, there will need to be an extract.
260 return Cost + getVectorInstrCost(Instruction::ExtractElement, Val, Index);
261 }
262
263 if (Opcode == Instruction::ExtractElement)
264 return 2;
265
266 return 1;
267 }
268
269 /// --- Vector TTI end ---
270
getPrefetchDistance() const271 unsigned HexagonTTIImpl::getPrefetchDistance() const {
272 return ST.getL1PrefetchDistance();
273 }
274
getCacheLineSize() const275 unsigned HexagonTTIImpl::getCacheLineSize() const {
276 return ST.getL1CacheLineSize();
277 }
278
getUserCost(const User * U,ArrayRef<const Value * > Operands)279 int HexagonTTIImpl::getUserCost(const User *U,
280 ArrayRef<const Value *> Operands) {
281 auto isCastFoldedIntoLoad = [this](const CastInst *CI) -> bool {
282 if (!CI->isIntegerCast())
283 return false;
284 // Only extensions from an integer type shorter than 32-bit to i32
285 // can be folded into the load.
286 const DataLayout &DL = getDataLayout();
287 unsigned SBW = DL.getTypeSizeInBits(CI->getSrcTy());
288 unsigned DBW = DL.getTypeSizeInBits(CI->getDestTy());
289 if (DBW != 32 || SBW >= DBW)
290 return false;
291
292 const LoadInst *LI = dyn_cast<const LoadInst>(CI->getOperand(0));
293 // Technically, this code could allow multiple uses of the load, and
294 // check if all the uses are the same extension operation, but this
295 // should be sufficient for most cases.
296 return LI && LI->hasOneUse();
297 };
298
299 if (const CastInst *CI = dyn_cast<const CastInst>(U))
300 if (isCastFoldedIntoLoad(CI))
301 return TargetTransformInfo::TCC_Free;
302 return BaseT::getUserCost(U, Operands);
303 }
304
shouldBuildLookupTables() const305 bool HexagonTTIImpl::shouldBuildLookupTables() const {
306 return EmitLookupTables;
307 }
308