1 //===-- PPCTargetTransformInfo.cpp - PPC specific TTI ---------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9
10 #include "PPCTargetTransformInfo.h"
11 #include "llvm/Analysis/TargetTransformInfo.h"
12 #include "llvm/CodeGen/BasicTTIImpl.h"
13 #include "llvm/CodeGen/CostTable.h"
14 #include "llvm/CodeGen/TargetLowering.h"
15 #include "llvm/Support/CommandLine.h"
16 #include "llvm/Support/Debug.h"
17 using namespace llvm;
18
19 #define DEBUG_TYPE "ppctti"
20
21 static cl::opt<bool> DisablePPCConstHoist("disable-ppc-constant-hoisting",
22 cl::desc("disable constant hoisting on PPC"), cl::init(false), cl::Hidden);
23
24 // This is currently only used for the data prefetch pass which is only enabled
25 // for BG/Q by default.
26 static cl::opt<unsigned>
27 CacheLineSize("ppc-loop-prefetch-cache-line", cl::Hidden, cl::init(64),
28 cl::desc("The loop prefetch cache line size"));
29
30 static cl::opt<bool>
31 EnablePPCColdCC("ppc-enable-coldcc", cl::Hidden, cl::init(false),
32 cl::desc("Enable using coldcc calling conv for cold "
33 "internal functions"));
34
35 //===----------------------------------------------------------------------===//
36 //
37 // PPC cost model.
38 //
39 //===----------------------------------------------------------------------===//
40
41 TargetTransformInfo::PopcntSupportKind
getPopcntSupport(unsigned TyWidth)42 PPCTTIImpl::getPopcntSupport(unsigned TyWidth) {
43 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
44 if (ST->hasPOPCNTD() != PPCSubtarget::POPCNTD_Unavailable && TyWidth <= 64)
45 return ST->hasPOPCNTD() == PPCSubtarget::POPCNTD_Slow ?
46 TTI::PSK_SlowHardware : TTI::PSK_FastHardware;
47 return TTI::PSK_Software;
48 }
49
getIntImmCost(const APInt & Imm,Type * Ty)50 int PPCTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
51 if (DisablePPCConstHoist)
52 return BaseT::getIntImmCost(Imm, Ty);
53
54 assert(Ty->isIntegerTy());
55
56 unsigned BitSize = Ty->getPrimitiveSizeInBits();
57 if (BitSize == 0)
58 return ~0U;
59
60 if (Imm == 0)
61 return TTI::TCC_Free;
62
63 if (Imm.getBitWidth() <= 64) {
64 if (isInt<16>(Imm.getSExtValue()))
65 return TTI::TCC_Basic;
66
67 if (isInt<32>(Imm.getSExtValue())) {
68 // A constant that can be materialized using lis.
69 if ((Imm.getZExtValue() & 0xFFFF) == 0)
70 return TTI::TCC_Basic;
71
72 return 2 * TTI::TCC_Basic;
73 }
74 }
75
76 return 4 * TTI::TCC_Basic;
77 }
78
getIntImmCost(Intrinsic::ID IID,unsigned Idx,const APInt & Imm,Type * Ty)79 int PPCTTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
80 Type *Ty) {
81 if (DisablePPCConstHoist)
82 return BaseT::getIntImmCost(IID, Idx, Imm, Ty);
83
84 assert(Ty->isIntegerTy());
85
86 unsigned BitSize = Ty->getPrimitiveSizeInBits();
87 if (BitSize == 0)
88 return ~0U;
89
90 switch (IID) {
91 default:
92 return TTI::TCC_Free;
93 case Intrinsic::sadd_with_overflow:
94 case Intrinsic::uadd_with_overflow:
95 case Intrinsic::ssub_with_overflow:
96 case Intrinsic::usub_with_overflow:
97 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<16>(Imm.getSExtValue()))
98 return TTI::TCC_Free;
99 break;
100 case Intrinsic::experimental_stackmap:
101 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
102 return TTI::TCC_Free;
103 break;
104 case Intrinsic::experimental_patchpoint_void:
105 case Intrinsic::experimental_patchpoint_i64:
106 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
107 return TTI::TCC_Free;
108 break;
109 }
110 return PPCTTIImpl::getIntImmCost(Imm, Ty);
111 }
112
getIntImmCost(unsigned Opcode,unsigned Idx,const APInt & Imm,Type * Ty)113 int PPCTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
114 Type *Ty) {
115 if (DisablePPCConstHoist)
116 return BaseT::getIntImmCost(Opcode, Idx, Imm, Ty);
117
118 assert(Ty->isIntegerTy());
119
120 unsigned BitSize = Ty->getPrimitiveSizeInBits();
121 if (BitSize == 0)
122 return ~0U;
123
124 unsigned ImmIdx = ~0U;
125 bool ShiftedFree = false, RunFree = false, UnsignedFree = false,
126 ZeroFree = false;
127 switch (Opcode) {
128 default:
129 return TTI::TCC_Free;
130 case Instruction::GetElementPtr:
131 // Always hoist the base address of a GetElementPtr. This prevents the
132 // creation of new constants for every base constant that gets constant
133 // folded with the offset.
134 if (Idx == 0)
135 return 2 * TTI::TCC_Basic;
136 return TTI::TCC_Free;
137 case Instruction::And:
138 RunFree = true; // (for the rotate-and-mask instructions)
139 LLVM_FALLTHROUGH;
140 case Instruction::Add:
141 case Instruction::Or:
142 case Instruction::Xor:
143 ShiftedFree = true;
144 LLVM_FALLTHROUGH;
145 case Instruction::Sub:
146 case Instruction::Mul:
147 case Instruction::Shl:
148 case Instruction::LShr:
149 case Instruction::AShr:
150 ImmIdx = 1;
151 break;
152 case Instruction::ICmp:
153 UnsignedFree = true;
154 ImmIdx = 1;
155 // Zero comparisons can use record-form instructions.
156 LLVM_FALLTHROUGH;
157 case Instruction::Select:
158 ZeroFree = true;
159 break;
160 case Instruction::PHI:
161 case Instruction::Call:
162 case Instruction::Ret:
163 case Instruction::Load:
164 case Instruction::Store:
165 break;
166 }
167
168 if (ZeroFree && Imm == 0)
169 return TTI::TCC_Free;
170
171 if (Idx == ImmIdx && Imm.getBitWidth() <= 64) {
172 if (isInt<16>(Imm.getSExtValue()))
173 return TTI::TCC_Free;
174
175 if (RunFree) {
176 if (Imm.getBitWidth() <= 32 &&
177 (isShiftedMask_32(Imm.getZExtValue()) ||
178 isShiftedMask_32(~Imm.getZExtValue())))
179 return TTI::TCC_Free;
180
181 if (ST->isPPC64() &&
182 (isShiftedMask_64(Imm.getZExtValue()) ||
183 isShiftedMask_64(~Imm.getZExtValue())))
184 return TTI::TCC_Free;
185 }
186
187 if (UnsignedFree && isUInt<16>(Imm.getZExtValue()))
188 return TTI::TCC_Free;
189
190 if (ShiftedFree && (Imm.getZExtValue() & 0xFFFF) == 0)
191 return TTI::TCC_Free;
192 }
193
194 return PPCTTIImpl::getIntImmCost(Imm, Ty);
195 }
196
getUserCost(const User * U,ArrayRef<const Value * > Operands)197 unsigned PPCTTIImpl::getUserCost(const User *U,
198 ArrayRef<const Value *> Operands) {
199 if (U->getType()->isVectorTy()) {
200 // Instructions that need to be split should cost more.
201 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, U->getType());
202 return LT.first * BaseT::getUserCost(U, Operands);
203 }
204
205 return BaseT::getUserCost(U, Operands);
206 }
207
getUnrollingPreferences(Loop * L,ScalarEvolution & SE,TTI::UnrollingPreferences & UP)208 void PPCTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
209 TTI::UnrollingPreferences &UP) {
210 if (ST->getDarwinDirective() == PPC::DIR_A2) {
211 // The A2 is in-order with a deep pipeline, and concatenation unrolling
212 // helps expose latency-hiding opportunities to the instruction scheduler.
213 UP.Partial = UP.Runtime = true;
214
215 // We unroll a lot on the A2 (hundreds of instructions), and the benefits
216 // often outweigh the cost of a division to compute the trip count.
217 UP.AllowExpensiveTripCount = true;
218 }
219
220 BaseT::getUnrollingPreferences(L, SE, UP);
221 }
222
223 // This function returns true to allow using coldcc calling convention.
224 // Returning true results in coldcc being used for functions which are cold at
225 // all call sites when the callers of the functions are not calling any other
226 // non coldcc functions.
useColdCCForColdCall(Function & F)227 bool PPCTTIImpl::useColdCCForColdCall(Function &F) {
228 return EnablePPCColdCC;
229 }
230
enableAggressiveInterleaving(bool LoopHasReductions)231 bool PPCTTIImpl::enableAggressiveInterleaving(bool LoopHasReductions) {
232 // On the A2, always unroll aggressively. For QPX unaligned loads, we depend
233 // on combining the loads generated for consecutive accesses, and failure to
234 // do so is particularly expensive. This makes it much more likely (compared
235 // to only using concatenation unrolling).
236 if (ST->getDarwinDirective() == PPC::DIR_A2)
237 return true;
238
239 return LoopHasReductions;
240 }
241
242 const PPCTTIImpl::TTI::MemCmpExpansionOptions *
enableMemCmpExpansion(bool IsZeroCmp) const243 PPCTTIImpl::enableMemCmpExpansion(bool IsZeroCmp) const {
244 static const auto Options = []() {
245 TTI::MemCmpExpansionOptions Options;
246 Options.LoadSizes.push_back(8);
247 Options.LoadSizes.push_back(4);
248 Options.LoadSizes.push_back(2);
249 Options.LoadSizes.push_back(1);
250 return Options;
251 }();
252 return &Options;
253 }
254
enableInterleavedAccessVectorization()255 bool PPCTTIImpl::enableInterleavedAccessVectorization() {
256 return true;
257 }
258
getNumberOfRegisters(bool Vector)259 unsigned PPCTTIImpl::getNumberOfRegisters(bool Vector) {
260 if (Vector && !ST->hasAltivec() && !ST->hasQPX())
261 return 0;
262 return ST->hasVSX() ? 64 : 32;
263 }
264
getRegisterBitWidth(bool Vector) const265 unsigned PPCTTIImpl::getRegisterBitWidth(bool Vector) const {
266 if (Vector) {
267 if (ST->hasQPX()) return 256;
268 if (ST->hasAltivec()) return 128;
269 return 0;
270 }
271
272 if (ST->isPPC64())
273 return 64;
274 return 32;
275
276 }
277
getCacheLineSize()278 unsigned PPCTTIImpl::getCacheLineSize() {
279 // Check first if the user specified a custom line size.
280 if (CacheLineSize.getNumOccurrences() > 0)
281 return CacheLineSize;
282
283 // On P7, P8 or P9 we have a cache line size of 128.
284 unsigned Directive = ST->getDarwinDirective();
285 if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 ||
286 Directive == PPC::DIR_PWR9)
287 return 128;
288
289 // On other processors return a default of 64 bytes.
290 return 64;
291 }
292
getPrefetchDistance()293 unsigned PPCTTIImpl::getPrefetchDistance() {
294 // This seems like a reasonable default for the BG/Q (this pass is enabled, by
295 // default, only on the BG/Q).
296 return 300;
297 }
298
getMaxInterleaveFactor(unsigned VF)299 unsigned PPCTTIImpl::getMaxInterleaveFactor(unsigned VF) {
300 unsigned Directive = ST->getDarwinDirective();
301 // The 440 has no SIMD support, but floating-point instructions
302 // have a 5-cycle latency, so unroll by 5x for latency hiding.
303 if (Directive == PPC::DIR_440)
304 return 5;
305
306 // The A2 has no SIMD support, but floating-point instructions
307 // have a 6-cycle latency, so unroll by 6x for latency hiding.
308 if (Directive == PPC::DIR_A2)
309 return 6;
310
311 // FIXME: For lack of any better information, do no harm...
312 if (Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500)
313 return 1;
314
315 // For P7 and P8, floating-point instructions have a 6-cycle latency and
316 // there are two execution units, so unroll by 12x for latency hiding.
317 // FIXME: the same for P9 as previous gen until POWER9 scheduling is ready
318 if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 ||
319 Directive == PPC::DIR_PWR9)
320 return 12;
321
322 // For most things, modern systems have two execution units (and
323 // out-of-order execution).
324 return 2;
325 }
326
getArithmeticInstrCost(unsigned Opcode,Type * Ty,TTI::OperandValueKind Op1Info,TTI::OperandValueKind Op2Info,TTI::OperandValueProperties Opd1PropInfo,TTI::OperandValueProperties Opd2PropInfo,ArrayRef<const Value * > Args)327 int PPCTTIImpl::getArithmeticInstrCost(
328 unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
329 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
330 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args) {
331 assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
332
333 // Fallback to the default implementation.
334 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info,
335 Opd1PropInfo, Opd2PropInfo);
336 }
337
getShuffleCost(TTI::ShuffleKind Kind,Type * Tp,int Index,Type * SubTp)338 int PPCTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
339 Type *SubTp) {
340 // Legalize the type.
341 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
342
343 // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations
344 // (at least in the sense that there need only be one non-loop-invariant
345 // instruction). We need one such shuffle instruction for each actual
346 // register (this is not true for arbitrary shuffles, but is true for the
347 // structured types of shuffles covered by TTI::ShuffleKind).
348 return LT.first;
349 }
350
getCastInstrCost(unsigned Opcode,Type * Dst,Type * Src,const Instruction * I)351 int PPCTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
352 const Instruction *I) {
353 assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
354
355 return BaseT::getCastInstrCost(Opcode, Dst, Src);
356 }
357
getCmpSelInstrCost(unsigned Opcode,Type * ValTy,Type * CondTy,const Instruction * I)358 int PPCTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
359 const Instruction *I) {
360 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I);
361 }
362
getVectorInstrCost(unsigned Opcode,Type * Val,unsigned Index)363 int PPCTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
364 assert(Val->isVectorTy() && "This must be a vector type");
365
366 int ISD = TLI->InstructionOpcodeToISD(Opcode);
367 assert(ISD && "Invalid opcode");
368
369 if (ST->hasVSX() && Val->getScalarType()->isDoubleTy()) {
370 // Double-precision scalars are already located in index #0.
371 if (Index == 0)
372 return 0;
373
374 return BaseT::getVectorInstrCost(Opcode, Val, Index);
375 } else if (ST->hasQPX() && Val->getScalarType()->isFloatingPointTy()) {
376 // Floating point scalars are already located in index #0.
377 if (Index == 0)
378 return 0;
379
380 return BaseT::getVectorInstrCost(Opcode, Val, Index);
381 }
382
383 // Estimated cost of a load-hit-store delay. This was obtained
384 // experimentally as a minimum needed to prevent unprofitable
385 // vectorization for the paq8p benchmark. It may need to be
386 // raised further if other unprofitable cases remain.
387 unsigned LHSPenalty = 2;
388 if (ISD == ISD::INSERT_VECTOR_ELT)
389 LHSPenalty += 7;
390
391 // Vector element insert/extract with Altivec is very expensive,
392 // because they require store and reload with the attendant
393 // processor stall for load-hit-store. Until VSX is available,
394 // these need to be estimated as very costly.
395 if (ISD == ISD::EXTRACT_VECTOR_ELT ||
396 ISD == ISD::INSERT_VECTOR_ELT)
397 return LHSPenalty + BaseT::getVectorInstrCost(Opcode, Val, Index);
398
399 return BaseT::getVectorInstrCost(Opcode, Val, Index);
400 }
401
getMemoryOpCost(unsigned Opcode,Type * Src,unsigned Alignment,unsigned AddressSpace,const Instruction * I)402 int PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
403 unsigned AddressSpace, const Instruction *I) {
404 // Legalize the type.
405 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
406 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
407 "Invalid Opcode");
408
409 int Cost = BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
410
411 bool IsAltivecType = ST->hasAltivec() &&
412 (LT.second == MVT::v16i8 || LT.second == MVT::v8i16 ||
413 LT.second == MVT::v4i32 || LT.second == MVT::v4f32);
414 bool IsVSXType = ST->hasVSX() &&
415 (LT.second == MVT::v2f64 || LT.second == MVT::v2i64);
416 bool IsQPXType = ST->hasQPX() &&
417 (LT.second == MVT::v4f64 || LT.second == MVT::v4f32);
418
419 // VSX has 32b/64b load instructions. Legalization can handle loading of
420 // 32b/64b to VSR correctly and cheaply. But BaseT::getMemoryOpCost and
421 // PPCTargetLowering can't compute the cost appropriately. So here we
422 // explicitly check this case.
423 unsigned MemBytes = Src->getPrimitiveSizeInBits();
424 if (Opcode == Instruction::Load && ST->hasVSX() && IsAltivecType &&
425 (MemBytes == 64 || (ST->hasP8Vector() && MemBytes == 32)))
426 return 1;
427
428 // Aligned loads and stores are easy.
429 unsigned SrcBytes = LT.second.getStoreSize();
430 if (!SrcBytes || !Alignment || Alignment >= SrcBytes)
431 return Cost;
432
433 // If we can use the permutation-based load sequence, then this is also
434 // relatively cheap (not counting loop-invariant instructions): one load plus
435 // one permute (the last load in a series has extra cost, but we're
436 // neglecting that here). Note that on the P7, we could do unaligned loads
437 // for Altivec types using the VSX instructions, but that's more expensive
438 // than using the permutation-based load sequence. On the P8, that's no
439 // longer true.
440 if (Opcode == Instruction::Load &&
441 ((!ST->hasP8Vector() && IsAltivecType) || IsQPXType) &&
442 Alignment >= LT.second.getScalarType().getStoreSize())
443 return Cost + LT.first; // Add the cost of the permutations.
444
445 // For VSX, we can do unaligned loads and stores on Altivec/VSX types. On the
446 // P7, unaligned vector loads are more expensive than the permutation-based
447 // load sequence, so that might be used instead, but regardless, the net cost
448 // is about the same (not counting loop-invariant instructions).
449 if (IsVSXType || (ST->hasVSX() && IsAltivecType))
450 return Cost;
451
452 // Newer PPC supports unaligned memory access.
453 if (TLI->allowsMisalignedMemoryAccesses(LT.second, 0))
454 return Cost;
455
456 // PPC in general does not support unaligned loads and stores. They'll need
457 // to be decomposed based on the alignment factor.
458
459 // Add the cost of each scalar load or store.
460 Cost += LT.first*(SrcBytes/Alignment-1);
461
462 // For a vector type, there is also scalarization overhead (only for
463 // stores, loads are expanded using the vector-load + permutation sequence,
464 // which is much less expensive).
465 if (Src->isVectorTy() && Opcode == Instruction::Store)
466 for (int i = 0, e = Src->getVectorNumElements(); i < e; ++i)
467 Cost += getVectorInstrCost(Instruction::ExtractElement, Src, i);
468
469 return Cost;
470 }
471
getInterleavedMemoryOpCost(unsigned Opcode,Type * VecTy,unsigned Factor,ArrayRef<unsigned> Indices,unsigned Alignment,unsigned AddressSpace)472 int PPCTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
473 unsigned Factor,
474 ArrayRef<unsigned> Indices,
475 unsigned Alignment,
476 unsigned AddressSpace) {
477 assert(isa<VectorType>(VecTy) &&
478 "Expect a vector type for interleaved memory op");
479
480 // Legalize the type.
481 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, VecTy);
482
483 // Firstly, the cost of load/store operation.
484 int Cost = getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace);
485
486 // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations
487 // (at least in the sense that there need only be one non-loop-invariant
488 // instruction). For each result vector, we need one shuffle per incoming
489 // vector (except that the first shuffle can take two incoming vectors
490 // because it does not need to take itself).
491 Cost += Factor*(LT.first-1);
492
493 return Cost;
494 }
495
496