1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements a TargetTransformInfo analysis pass specific to the
11 /// X86 target machine. It uses the target's detailed information to provide
12 /// more precise answers to certain TTI queries, while letting the target
13 /// independent and default TTI implementations handle the rest.
14 ///
15 //===----------------------------------------------------------------------===//
16
17 #include "X86TargetTransformInfo.h"
18 #include "llvm/Analysis/TargetTransformInfo.h"
19 #include "llvm/CodeGen/BasicTTIImpl.h"
20 #include "llvm/IR/IntrinsicInst.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Target/CostTable.h"
23 #include "llvm/Target/TargetLowering.h"
24
25 using namespace llvm;
26
27 #define DEBUG_TYPE "x86tti"
28
29 //===----------------------------------------------------------------------===//
30 //
31 // X86 cost model.
32 //
33 //===----------------------------------------------------------------------===//
34
35 TargetTransformInfo::PopcntSupportKind
getPopcntSupport(unsigned TyWidth)36 X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
37 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
38 // TODO: Currently the __builtin_popcount() implementation using SSE3
39 // instructions is inefficient. Once the problem is fixed, we should
40 // call ST->hasSSE3() instead of ST->hasPOPCNT().
41 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
42 }
43
getNumberOfRegisters(bool Vector)44 unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) {
45 if (Vector && !ST->hasSSE1())
46 return 0;
47
48 if (ST->is64Bit()) {
49 if (Vector && ST->hasAVX512())
50 return 32;
51 return 16;
52 }
53 return 8;
54 }
55
getRegisterBitWidth(bool Vector)56 unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) {
57 if (Vector) {
58 if (ST->hasAVX512()) return 512;
59 if (ST->hasAVX()) return 256;
60 if (ST->hasSSE1()) return 128;
61 return 0;
62 }
63
64 if (ST->is64Bit())
65 return 64;
66
67 return 32;
68 }
69
getMaxInterleaveFactor(unsigned VF)70 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) {
71 // If the loop will not be vectorized, don't interleave the loop.
72 // Let regular unroll to unroll the loop, which saves the overflow
73 // check and memory check cost.
74 if (VF == 1)
75 return 1;
76
77 if (ST->isAtom())
78 return 1;
79
80 // Sandybridge and Haswell have multiple execution ports and pipelined
81 // vector units.
82 if (ST->hasAVX())
83 return 4;
84
85 return 2;
86 }
87
getArithmeticInstrCost(unsigned Opcode,Type * Ty,TTI::OperandValueKind Op1Info,TTI::OperandValueKind Op2Info,TTI::OperandValueProperties Opd1PropInfo,TTI::OperandValueProperties Opd2PropInfo)88 int X86TTIImpl::getArithmeticInstrCost(
89 unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
90 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
91 TTI::OperandValueProperties Opd2PropInfo) {
92 // Legalize the type.
93 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
94
95 int ISD = TLI->InstructionOpcodeToISD(Opcode);
96 assert(ISD && "Invalid opcode");
97
98 if (ISD == ISD::SDIV &&
99 Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
100 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
101 // On X86, vector signed division by constants power-of-two are
102 // normally expanded to the sequence SRA + SRL + ADD + SRA.
103 // The OperandValue properties many not be same as that of previous
104 // operation;conservatively assume OP_None.
105 int Cost = 2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info,
106 Op2Info, TargetTransformInfo::OP_None,
107 TargetTransformInfo::OP_None);
108 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info,
109 TargetTransformInfo::OP_None,
110 TargetTransformInfo::OP_None);
111 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Op1Info, Op2Info,
112 TargetTransformInfo::OP_None,
113 TargetTransformInfo::OP_None);
114
115 return Cost;
116 }
117
118 static const CostTblEntry AVX2UniformConstCostTable[] = {
119 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle.
120
121 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence
122 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence
123 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence
124 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence
125 };
126
127 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
128 ST->hasAVX2()) {
129 if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD,
130 LT.second))
131 return LT.first * Entry->Cost;
132 }
133
134 static const CostTblEntry AVX512CostTable[] = {
135 { ISD::SHL, MVT::v16i32, 1 },
136 { ISD::SRL, MVT::v16i32, 1 },
137 { ISD::SRA, MVT::v16i32, 1 },
138 { ISD::SHL, MVT::v8i64, 1 },
139 { ISD::SRL, MVT::v8i64, 1 },
140 { ISD::SRA, MVT::v8i64, 1 },
141 };
142
143 if (ST->hasAVX512()) {
144 if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second))
145 return LT.first * Entry->Cost;
146 }
147
148 static const CostTblEntry AVX2CostTable[] = {
149 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
150 // customize them to detect the cases where shift amount is a scalar one.
151 { ISD::SHL, MVT::v4i32, 1 },
152 { ISD::SRL, MVT::v4i32, 1 },
153 { ISD::SRA, MVT::v4i32, 1 },
154 { ISD::SHL, MVT::v8i32, 1 },
155 { ISD::SRL, MVT::v8i32, 1 },
156 { ISD::SRA, MVT::v8i32, 1 },
157 { ISD::SHL, MVT::v2i64, 1 },
158 { ISD::SRL, MVT::v2i64, 1 },
159 { ISD::SHL, MVT::v4i64, 1 },
160 { ISD::SRL, MVT::v4i64, 1 },
161 };
162
163 // Look for AVX2 lowering tricks.
164 if (ST->hasAVX2()) {
165 if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
166 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
167 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
168 // On AVX2, a packed v16i16 shift left by a constant build_vector
169 // is lowered into a vector multiply (vpmullw).
170 return LT.first;
171
172 if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second))
173 return LT.first * Entry->Cost;
174 }
175
176 static const CostTblEntry XOPCostTable[] = {
177 // 128bit shifts take 1cy, but right shifts require negation beforehand.
178 { ISD::SHL, MVT::v16i8, 1 },
179 { ISD::SRL, MVT::v16i8, 2 },
180 { ISD::SRA, MVT::v16i8, 2 },
181 { ISD::SHL, MVT::v8i16, 1 },
182 { ISD::SRL, MVT::v8i16, 2 },
183 { ISD::SRA, MVT::v8i16, 2 },
184 { ISD::SHL, MVT::v4i32, 1 },
185 { ISD::SRL, MVT::v4i32, 2 },
186 { ISD::SRA, MVT::v4i32, 2 },
187 { ISD::SHL, MVT::v2i64, 1 },
188 { ISD::SRL, MVT::v2i64, 2 },
189 { ISD::SRA, MVT::v2i64, 2 },
190 // 256bit shifts require splitting if AVX2 didn't catch them above.
191 { ISD::SHL, MVT::v32i8, 2 },
192 { ISD::SRL, MVT::v32i8, 4 },
193 { ISD::SRA, MVT::v32i8, 4 },
194 { ISD::SHL, MVT::v16i16, 2 },
195 { ISD::SRL, MVT::v16i16, 4 },
196 { ISD::SRA, MVT::v16i16, 4 },
197 { ISD::SHL, MVT::v8i32, 2 },
198 { ISD::SRL, MVT::v8i32, 4 },
199 { ISD::SRA, MVT::v8i32, 4 },
200 { ISD::SHL, MVT::v4i64, 2 },
201 { ISD::SRL, MVT::v4i64, 4 },
202 { ISD::SRA, MVT::v4i64, 4 },
203 };
204
205 // Look for XOP lowering tricks.
206 if (ST->hasXOP()) {
207 if (const auto *Entry = CostTableLookup(XOPCostTable, ISD, LT.second))
208 return LT.first * Entry->Cost;
209 }
210
211 static const CostTblEntry AVX2CustomCostTable[] = {
212 { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence.
213 { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
214
215 { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence.
216 { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
217
218 { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence.
219 { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence.
220 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence.
221 { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence.
222
223 // Vectorizing division is a bad idea. See the SSE2 table for more comments.
224 { ISD::SDIV, MVT::v32i8, 32*20 },
225 { ISD::SDIV, MVT::v16i16, 16*20 },
226 { ISD::SDIV, MVT::v8i32, 8*20 },
227 { ISD::SDIV, MVT::v4i64, 4*20 },
228 { ISD::UDIV, MVT::v32i8, 32*20 },
229 { ISD::UDIV, MVT::v16i16, 16*20 },
230 { ISD::UDIV, MVT::v8i32, 8*20 },
231 { ISD::UDIV, MVT::v4i64, 4*20 },
232 };
233
234 // Look for AVX2 lowering tricks for custom cases.
235 if (ST->hasAVX2()) {
236 if (const auto *Entry = CostTableLookup(AVX2CustomCostTable, ISD,
237 LT.second))
238 return LT.first * Entry->Cost;
239 }
240
241 static const CostTblEntry
242 SSE2UniformConstCostTable[] = {
243 // We don't correctly identify costs of casts because they are marked as
244 // custom.
245 // Constant splats are cheaper for the following instructions.
246 { ISD::SHL, MVT::v16i8, 1 }, // psllw.
247 { ISD::SHL, MVT::v32i8, 2 }, // psllw.
248 { ISD::SHL, MVT::v8i16, 1 }, // psllw.
249 { ISD::SHL, MVT::v16i16, 2 }, // psllw.
250 { ISD::SHL, MVT::v4i32, 1 }, // pslld
251 { ISD::SHL, MVT::v8i32, 2 }, // pslld
252 { ISD::SHL, MVT::v2i64, 1 }, // psllq.
253 { ISD::SHL, MVT::v4i64, 2 }, // psllq.
254
255 { ISD::SRL, MVT::v16i8, 1 }, // psrlw.
256 { ISD::SRL, MVT::v32i8, 2 }, // psrlw.
257 { ISD::SRL, MVT::v8i16, 1 }, // psrlw.
258 { ISD::SRL, MVT::v16i16, 2 }, // psrlw.
259 { ISD::SRL, MVT::v4i32, 1 }, // psrld.
260 { ISD::SRL, MVT::v8i32, 2 }, // psrld.
261 { ISD::SRL, MVT::v2i64, 1 }, // psrlq.
262 { ISD::SRL, MVT::v4i64, 2 }, // psrlq.
263
264 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb.
265 { ISD::SRA, MVT::v32i8, 8 }, // psrlw, pand, pxor, psubb.
266 { ISD::SRA, MVT::v8i16, 1 }, // psraw.
267 { ISD::SRA, MVT::v16i16, 2 }, // psraw.
268 { ISD::SRA, MVT::v4i32, 1 }, // psrad.
269 { ISD::SRA, MVT::v8i32, 2 }, // psrad.
270 { ISD::SRA, MVT::v2i64, 4 }, // 2 x psrad + shuffle.
271 { ISD::SRA, MVT::v4i64, 8 }, // 2 x psrad + shuffle.
272
273 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence
274 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence
275 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence
276 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence
277 };
278
279 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
280 ST->hasSSE2()) {
281 // pmuldq sequence.
282 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
283 return LT.first * 15;
284
285 if (const auto *Entry = CostTableLookup(SSE2UniformConstCostTable, ISD,
286 LT.second))
287 return LT.first * Entry->Cost;
288 }
289
290 if (ISD == ISD::SHL &&
291 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
292 MVT VT = LT.second;
293 // Vector shift left by non uniform constant can be lowered
294 // into vector multiply (pmullw/pmulld).
295 if ((VT == MVT::v8i16 && ST->hasSSE2()) ||
296 (VT == MVT::v4i32 && ST->hasSSE41()))
297 return LT.first;
298
299 // v16i16 and v8i32 shifts by non-uniform constants are lowered into a
300 // sequence of extract + two vector multiply + insert.
301 if ((VT == MVT::v8i32 || VT == MVT::v16i16) &&
302 (ST->hasAVX() && !ST->hasAVX2()))
303 ISD = ISD::MUL;
304
305 // A vector shift left by non uniform constant is converted
306 // into a vector multiply; the new multiply is eventually
307 // lowered into a sequence of shuffles and 2 x pmuludq.
308 if (VT == MVT::v4i32 && ST->hasSSE2())
309 ISD = ISD::MUL;
310 }
311
312 static const CostTblEntry SSE2CostTable[] = {
313 // We don't correctly identify costs of casts because they are marked as
314 // custom.
315 // For some cases, where the shift amount is a scalar we would be able
316 // to generate better code. Unfortunately, when this is the case the value
317 // (the splat) will get hoisted out of the loop, thereby making it invisible
318 // to ISel. The cost model must return worst case assumptions because it is
319 // used for vectorization and we don't want to make vectorized code worse
320 // than scalar code.
321 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence.
322 { ISD::SHL, MVT::v32i8, 2*26 }, // cmpgtb sequence.
323 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence.
324 { ISD::SHL, MVT::v16i16, 2*32 }, // cmpgtb sequence.
325 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
326 { ISD::SHL, MVT::v8i32, 2*2*5 }, // We optimized this using mul.
327 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence.
328 { ISD::SHL, MVT::v4i64, 2*4 }, // splat+shuffle sequence.
329
330 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence.
331 { ISD::SRL, MVT::v32i8, 2*26 }, // cmpgtb sequence.
332 { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence.
333 { ISD::SRL, MVT::v16i16, 2*32 }, // cmpgtb sequence.
334 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend.
335 { ISD::SRL, MVT::v8i32, 2*16 }, // Shift each lane + blend.
336 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence.
337 { ISD::SRL, MVT::v4i64, 2*4 }, // splat+shuffle sequence.
338
339 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence.
340 { ISD::SRA, MVT::v32i8, 2*54 }, // unpacked cmpgtb sequence.
341 { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence.
342 { ISD::SRA, MVT::v16i16, 2*32 }, // cmpgtb sequence.
343 { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend.
344 { ISD::SRA, MVT::v8i32, 2*16 }, // Shift each lane + blend.
345 { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence.
346 { ISD::SRA, MVT::v4i64, 2*12 }, // srl/xor/sub sequence.
347
348 // It is not a good idea to vectorize division. We have to scalarize it and
349 // in the process we will often end up having to spilling regular
350 // registers. The overhead of division is going to dominate most kernels
351 // anyways so try hard to prevent vectorization of division - it is
352 // generally a bad idea. Assume somewhat arbitrarily that we have to be able
353 // to hide "20 cycles" for each lane.
354 { ISD::SDIV, MVT::v16i8, 16*20 },
355 { ISD::SDIV, MVT::v8i16, 8*20 },
356 { ISD::SDIV, MVT::v4i32, 4*20 },
357 { ISD::SDIV, MVT::v2i64, 2*20 },
358 { ISD::UDIV, MVT::v16i8, 16*20 },
359 { ISD::UDIV, MVT::v8i16, 8*20 },
360 { ISD::UDIV, MVT::v4i32, 4*20 },
361 { ISD::UDIV, MVT::v2i64, 2*20 },
362 };
363
364 if (ST->hasSSE2()) {
365 if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second))
366 return LT.first * Entry->Cost;
367 }
368
369 static const CostTblEntry AVX1CostTable[] = {
370 // We don't have to scalarize unsupported ops. We can issue two half-sized
371 // operations and we only need to extract the upper YMM half.
372 // Two ops + 1 extract + 1 insert = 4.
373 { ISD::MUL, MVT::v16i16, 4 },
374 { ISD::MUL, MVT::v8i32, 4 },
375 { ISD::SUB, MVT::v8i32, 4 },
376 { ISD::ADD, MVT::v8i32, 4 },
377 { ISD::SUB, MVT::v4i64, 4 },
378 { ISD::ADD, MVT::v4i64, 4 },
379 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then
380 // are lowered as a series of long multiplies(3), shifts(4) and adds(2)
381 // Because we believe v4i64 to be a legal type, we must also include the
382 // split factor of two in the cost table. Therefore, the cost here is 18
383 // instead of 9.
384 { ISD::MUL, MVT::v4i64, 18 },
385 };
386
387 // Look for AVX1 lowering tricks.
388 if (ST->hasAVX() && !ST->hasAVX2()) {
389 MVT VT = LT.second;
390
391 if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, VT))
392 return LT.first * Entry->Cost;
393 }
394
395 // Custom lowering of vectors.
396 static const CostTblEntry CustomLowered[] = {
397 // A v2i64/v4i64 and multiply is custom lowered as a series of long
398 // multiplies(3), shifts(4) and adds(2).
399 { ISD::MUL, MVT::v2i64, 9 },
400 { ISD::MUL, MVT::v4i64, 9 },
401 };
402 if (const auto *Entry = CostTableLookup(CustomLowered, ISD, LT.second))
403 return LT.first * Entry->Cost;
404
405 // Special lowering of v4i32 mul on sse2, sse3: Lower v4i32 mul as 2x shuffle,
406 // 2x pmuludq, 2x shuffle.
407 if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() &&
408 !ST->hasSSE41())
409 return LT.first * 6;
410
411 // Fallback to the default implementation.
412 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info);
413 }
414
getShuffleCost(TTI::ShuffleKind Kind,Type * Tp,int Index,Type * SubTp)415 int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
416 Type *SubTp) {
417 // We only estimate the cost of reverse and alternate shuffles.
418 if (Kind != TTI::SK_Reverse && Kind != TTI::SK_Alternate)
419 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
420
421 if (Kind == TTI::SK_Reverse) {
422 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
423 int Cost = 1;
424 if (LT.second.getSizeInBits() > 128)
425 Cost = 3; // Extract + insert + copy.
426
427 // Multiple by the number of parts.
428 return Cost * LT.first;
429 }
430
431 if (Kind == TTI::SK_Alternate) {
432 // 64-bit packed float vectors (v2f32) are widened to type v4f32.
433 // 64-bit packed integer vectors (v2i32) are promoted to type v2i64.
434 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
435
436 // The backend knows how to generate a single VEX.256 version of
437 // instruction VPBLENDW if the target supports AVX2.
438 if (ST->hasAVX2() && LT.second == MVT::v16i16)
439 return LT.first;
440
441 static const CostTblEntry AVXAltShuffleTbl[] = {
442 {ISD::VECTOR_SHUFFLE, MVT::v4i64, 1}, // vblendpd
443 {ISD::VECTOR_SHUFFLE, MVT::v4f64, 1}, // vblendpd
444
445 {ISD::VECTOR_SHUFFLE, MVT::v8i32, 1}, // vblendps
446 {ISD::VECTOR_SHUFFLE, MVT::v8f32, 1}, // vblendps
447
448 // This shuffle is custom lowered into a sequence of:
449 // 2x vextractf128 , 2x vpblendw , 1x vinsertf128
450 {ISD::VECTOR_SHUFFLE, MVT::v16i16, 5},
451
452 // This shuffle is custom lowered into a long sequence of:
453 // 2x vextractf128 , 4x vpshufb , 2x vpor , 1x vinsertf128
454 {ISD::VECTOR_SHUFFLE, MVT::v32i8, 9}
455 };
456
457 if (ST->hasAVX())
458 if (const auto *Entry = CostTableLookup(AVXAltShuffleTbl,
459 ISD::VECTOR_SHUFFLE, LT.second))
460 return LT.first * Entry->Cost;
461
462 static const CostTblEntry SSE41AltShuffleTbl[] = {
463 // These are lowered into movsd.
464 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
465 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
466
467 // packed float vectors with four elements are lowered into BLENDI dag
468 // nodes. A v4i32/v4f32 BLENDI generates a single 'blendps'/'blendpd'.
469 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
470 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
471
472 // This shuffle generates a single pshufw.
473 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
474
475 // There is no instruction that matches a v16i8 alternate shuffle.
476 // The backend will expand it into the sequence 'pshufb + pshufb + or'.
477 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3}
478 };
479
480 if (ST->hasSSE41())
481 if (const auto *Entry = CostTableLookup(SSE41AltShuffleTbl, ISD::VECTOR_SHUFFLE,
482 LT.second))
483 return LT.first * Entry->Cost;
484
485 static const CostTblEntry SSSE3AltShuffleTbl[] = {
486 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd
487 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd
488
489 // SSE3 doesn't have 'blendps'. The following shuffles are expanded into
490 // the sequence 'shufps + pshufd'
491 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
492 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
493
494 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 3}, // pshufb + pshufb + or
495 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} // pshufb + pshufb + or
496 };
497
498 if (ST->hasSSSE3())
499 if (const auto *Entry = CostTableLookup(SSSE3AltShuffleTbl,
500 ISD::VECTOR_SHUFFLE, LT.second))
501 return LT.first * Entry->Cost;
502
503 static const CostTblEntry SSEAltShuffleTbl[] = {
504 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd
505 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd
506
507 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, // shufps + pshufd
508 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, // shufps + pshufd
509
510 // This is expanded into a long sequence of four extract + four insert.
511 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 8}, // 4 x pextrw + 4 pinsrw.
512
513 // 8 x (pinsrw + pextrw + and + movb + movzb + or)
514 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 48}
515 };
516
517 // Fall-back (SSE3 and SSE2).
518 if (const auto *Entry = CostTableLookup(SSEAltShuffleTbl,
519 ISD::VECTOR_SHUFFLE, LT.second))
520 return LT.first * Entry->Cost;
521 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
522 }
523
524 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
525 }
526
getCastInstrCost(unsigned Opcode,Type * Dst,Type * Src)527 int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
528 int ISD = TLI->InstructionOpcodeToISD(Opcode);
529 assert(ISD && "Invalid opcode");
530
531 // FIXME: Need a better design of the cost table to handle non-simple types of
532 // potential massive combinations (elem_num x src_type x dst_type).
533
534 static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = {
535 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
536 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
537 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
538 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
539 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
540 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
541
542 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 1 },
543 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 },
544 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 },
545 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
546 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 },
547 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 },
548 };
549
550 // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and
551 // 256-bit wide vectors.
552
553 static const TypeConversionCostTblEntry AVX512FConversionTbl[] = {
554 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 },
555 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 },
556 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 },
557
558 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 1 },
559 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 1 },
560 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 1 },
561 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 },
562
563 // v16i1 -> v16i32 - load + broadcast
564 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
565 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
566 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
567 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
568 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
569 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
570 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
571 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
572 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
573 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
574
575 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
576 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
577 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 },
578 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
579 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
580 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
581 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
582 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
583 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 },
584 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 26 },
585
586 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
587 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
588 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 2 },
589 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
590 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 2 },
591 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 },
592 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
593 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 5 },
594 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
595 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 },
596 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
597 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
598 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 },
599 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 },
600 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
601 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
602 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
603 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
604 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
605 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 },
606 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 },
607 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 12 },
608 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 26 },
609
610 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
611 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
612 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 },
613 { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 },
614 };
615
616 static const TypeConversionCostTblEntry AVX2ConversionTbl[] = {
617 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
618 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
619 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
620 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
621 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
622 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
623 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
624 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
625 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
626 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
627 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
628 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
629 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
630 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
631 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
632 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
633
634 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 },
635 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 },
636 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
637 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 },
638 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 },
639 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 },
640
641 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 },
642 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 },
643
644 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 },
645 };
646
647 static const TypeConversionCostTblEntry AVXConversionTbl[] = {
648 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
649 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
650 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
651 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
652 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 },
653 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
654 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 },
655 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
656 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
657 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
658 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 },
659 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
660 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
661 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
662 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
663 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
664
665 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 },
666 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
667 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
668 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 },
669 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 },
670 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 },
671 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 },
672
673 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
674 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
675 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
676 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
677 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 },
678 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
679 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 },
680 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 },
681 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
682 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
683 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
684 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
685
686 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
687 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
688 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
689 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 },
690 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
691 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 },
692 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
693 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
694 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
695 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 6 },
696 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 },
697 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
698 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 },
699 // The generic code to compute the scalar overhead is currently broken.
700 // Workaround this limitation by estimating the scalarization overhead
701 // here. We have roughly 10 instructions per scalar element.
702 // Multiply that by the vector width.
703 // FIXME: remove that when PR19268 is fixed.
704 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 10 },
705 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 20 },
706 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 },
707 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 },
708
709 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 },
710 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 },
711 // This node is expanded into scalarized operations but BasicTTI is overly
712 // optimistic estimating its cost. It computes 3 per element (one
713 // vector-extract, one scalar conversion and one vector-insert). The
714 // problem is that the inserts form a read-modify-write chain so latency
715 // should be factored in too. Inflating the cost per element by 1.
716 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 },
717 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 },
718
719 { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 },
720 { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 },
721 };
722
723 static const TypeConversionCostTblEntry SSE41ConversionTbl[] = {
724 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 2 },
725 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 2 },
726 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 2 },
727 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 2 },
728 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
729 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
730
731 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 },
732 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 2 },
733 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 1 },
734 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 1 },
735 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
736 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
737 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 2 },
738 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 2 },
739 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
740 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
741 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 4 },
742 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 4 },
743 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
744 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
745 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
746 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
747 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
748 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
749
750 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 },
751 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 1 },
752 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 1 },
753 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 },
754 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 },
755 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 3 },
756 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 },
757
758 };
759
760 static const TypeConversionCostTblEntry SSE2ConversionTbl[] = {
761 // These are somewhat magic numbers justified by looking at the output of
762 // Intel's IACA, running some kernels and making sure when we take
763 // legalization into account the throughput will be overestimated.
764 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
765 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
766 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
767 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
768 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 },
769 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
770 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
771 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
772
773 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
774 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
775 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
776 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
777 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
778 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 },
779 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
780 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
781
782 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 },
783 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 6 },
784 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
785 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 3 },
786 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
787 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 8 },
788 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
789 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 2 },
790 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 6 },
791 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 6 },
792 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 },
793 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
794 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 9 },
795 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 12 },
796 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
797 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 2 },
798 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
799 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 10 },
800 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 },
801 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
802 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 6 },
803 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 8 },
804 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 },
805 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 5 },
806
807 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 4 },
808 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 },
809 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 },
810 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 3 },
811 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 3 },
812 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
813 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 },
814 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
815 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 10 },
816 };
817
818 std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src);
819 std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst);
820
821 if (ST->hasSSE2() && !ST->hasAVX()) {
822 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
823 LTDest.second, LTSrc.second))
824 return LTSrc.first * Entry->Cost;
825 }
826
827 EVT SrcTy = TLI->getValueType(DL, Src);
828 EVT DstTy = TLI->getValueType(DL, Dst);
829
830 // The function getSimpleVT only handles simple value types.
831 if (!SrcTy.isSimple() || !DstTy.isSimple())
832 return BaseT::getCastInstrCost(Opcode, Dst, Src);
833
834 if (ST->hasDQI())
835 if (const auto *Entry = ConvertCostTableLookup(AVX512DQConversionTbl, ISD,
836 DstTy.getSimpleVT(),
837 SrcTy.getSimpleVT()))
838 return Entry->Cost;
839
840 if (ST->hasAVX512())
841 if (const auto *Entry = ConvertCostTableLookup(AVX512FConversionTbl, ISD,
842 DstTy.getSimpleVT(),
843 SrcTy.getSimpleVT()))
844 return Entry->Cost;
845
846 if (ST->hasAVX2()) {
847 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
848 DstTy.getSimpleVT(),
849 SrcTy.getSimpleVT()))
850 return Entry->Cost;
851 }
852
853 if (ST->hasAVX()) {
854 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
855 DstTy.getSimpleVT(),
856 SrcTy.getSimpleVT()))
857 return Entry->Cost;
858 }
859
860 if (ST->hasSSE41()) {
861 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
862 DstTy.getSimpleVT(),
863 SrcTy.getSimpleVT()))
864 return Entry->Cost;
865 }
866
867 if (ST->hasSSE2()) {
868 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
869 DstTy.getSimpleVT(),
870 SrcTy.getSimpleVT()))
871 return Entry->Cost;
872 }
873
874 return BaseT::getCastInstrCost(Opcode, Dst, Src);
875 }
876
getCmpSelInstrCost(unsigned Opcode,Type * ValTy,Type * CondTy)877 int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) {
878 // Legalize the type.
879 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
880
881 MVT MTy = LT.second;
882
883 int ISD = TLI->InstructionOpcodeToISD(Opcode);
884 assert(ISD && "Invalid opcode");
885
886 static const CostTblEntry SSE2CostTbl[] = {
887 { ISD::SETCC, MVT::v2i64, 8 },
888 { ISD::SETCC, MVT::v4i32, 1 },
889 { ISD::SETCC, MVT::v8i16, 1 },
890 { ISD::SETCC, MVT::v16i8, 1 },
891 };
892
893 static const CostTblEntry SSE42CostTbl[] = {
894 { ISD::SETCC, MVT::v2f64, 1 },
895 { ISD::SETCC, MVT::v4f32, 1 },
896 { ISD::SETCC, MVT::v2i64, 1 },
897 };
898
899 static const CostTblEntry AVX1CostTbl[] = {
900 { ISD::SETCC, MVT::v4f64, 1 },
901 { ISD::SETCC, MVT::v8f32, 1 },
902 // AVX1 does not support 8-wide integer compare.
903 { ISD::SETCC, MVT::v4i64, 4 },
904 { ISD::SETCC, MVT::v8i32, 4 },
905 { ISD::SETCC, MVT::v16i16, 4 },
906 { ISD::SETCC, MVT::v32i8, 4 },
907 };
908
909 static const CostTblEntry AVX2CostTbl[] = {
910 { ISD::SETCC, MVT::v4i64, 1 },
911 { ISD::SETCC, MVT::v8i32, 1 },
912 { ISD::SETCC, MVT::v16i16, 1 },
913 { ISD::SETCC, MVT::v32i8, 1 },
914 };
915
916 static const CostTblEntry AVX512CostTbl[] = {
917 { ISD::SETCC, MVT::v8i64, 1 },
918 { ISD::SETCC, MVT::v16i32, 1 },
919 { ISD::SETCC, MVT::v8f64, 1 },
920 { ISD::SETCC, MVT::v16f32, 1 },
921 };
922
923 if (ST->hasAVX512())
924 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
925 return LT.first * Entry->Cost;
926
927 if (ST->hasAVX2())
928 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
929 return LT.first * Entry->Cost;
930
931 if (ST->hasAVX())
932 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
933 return LT.first * Entry->Cost;
934
935 if (ST->hasSSE42())
936 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
937 return LT.first * Entry->Cost;
938
939 if (ST->hasSSE2())
940 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
941 return LT.first * Entry->Cost;
942
943 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
944 }
945
getIntrinsicInstrCost(Intrinsic::ID IID,Type * RetTy,ArrayRef<Type * > Tys,FastMathFlags FMF)946 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
947 ArrayRef<Type *> Tys, FastMathFlags FMF) {
948 static const CostTblEntry XOPCostTbl[] = {
949 { ISD::BITREVERSE, MVT::v4i64, 4 },
950 { ISD::BITREVERSE, MVT::v8i32, 4 },
951 { ISD::BITREVERSE, MVT::v16i16, 4 },
952 { ISD::BITREVERSE, MVT::v32i8, 4 },
953 { ISD::BITREVERSE, MVT::v2i64, 1 },
954 { ISD::BITREVERSE, MVT::v4i32, 1 },
955 { ISD::BITREVERSE, MVT::v8i16, 1 },
956 { ISD::BITREVERSE, MVT::v16i8, 1 },
957 { ISD::BITREVERSE, MVT::i64, 3 },
958 { ISD::BITREVERSE, MVT::i32, 3 },
959 { ISD::BITREVERSE, MVT::i16, 3 },
960 { ISD::BITREVERSE, MVT::i8, 3 }
961 };
962 static const CostTblEntry AVX2CostTbl[] = {
963 { ISD::BITREVERSE, MVT::v4i64, 5 },
964 { ISD::BITREVERSE, MVT::v8i32, 5 },
965 { ISD::BITREVERSE, MVT::v16i16, 5 },
966 { ISD::BITREVERSE, MVT::v32i8, 5 },
967 { ISD::BSWAP, MVT::v4i64, 1 },
968 { ISD::BSWAP, MVT::v8i32, 1 },
969 { ISD::BSWAP, MVT::v16i16, 1 }
970 };
971 static const CostTblEntry AVX1CostTbl[] = {
972 { ISD::BITREVERSE, MVT::v4i64, 10 },
973 { ISD::BITREVERSE, MVT::v8i32, 10 },
974 { ISD::BITREVERSE, MVT::v16i16, 10 },
975 { ISD::BITREVERSE, MVT::v32i8, 10 },
976 { ISD::BSWAP, MVT::v4i64, 4 },
977 { ISD::BSWAP, MVT::v8i32, 4 },
978 { ISD::BSWAP, MVT::v16i16, 4 }
979 };
980 static const CostTblEntry SSSE3CostTbl[] = {
981 { ISD::BITREVERSE, MVT::v2i64, 5 },
982 { ISD::BITREVERSE, MVT::v4i32, 5 },
983 { ISD::BITREVERSE, MVT::v8i16, 5 },
984 { ISD::BITREVERSE, MVT::v16i8, 5 },
985 { ISD::BSWAP, MVT::v2i64, 1 },
986 { ISD::BSWAP, MVT::v4i32, 1 },
987 { ISD::BSWAP, MVT::v8i16, 1 }
988 };
989 static const CostTblEntry SSE2CostTbl[] = {
990 { ISD::BSWAP, MVT::v2i64, 7 },
991 { ISD::BSWAP, MVT::v4i32, 7 },
992 { ISD::BSWAP, MVT::v8i16, 7 }
993 };
994
995 unsigned ISD = ISD::DELETED_NODE;
996 switch (IID) {
997 default:
998 break;
999 case Intrinsic::bitreverse:
1000 ISD = ISD::BITREVERSE;
1001 break;
1002 case Intrinsic::bswap:
1003 ISD = ISD::BSWAP;
1004 break;
1005 }
1006
1007 // Legalize the type.
1008 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
1009 MVT MTy = LT.second;
1010
1011 // Attempt to lookup cost.
1012 if (ST->hasXOP())
1013 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
1014 return LT.first * Entry->Cost;
1015
1016 if (ST->hasAVX2())
1017 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
1018 return LT.first * Entry->Cost;
1019
1020 if (ST->hasAVX())
1021 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
1022 return LT.first * Entry->Cost;
1023
1024 if (ST->hasSSSE3())
1025 if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy))
1026 return LT.first * Entry->Cost;
1027
1028 if (ST->hasSSE2())
1029 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
1030 return LT.first * Entry->Cost;
1031
1032 return BaseT::getIntrinsicInstrCost(IID, RetTy, Tys, FMF);
1033 }
1034
getIntrinsicInstrCost(Intrinsic::ID IID,Type * RetTy,ArrayRef<Value * > Args,FastMathFlags FMF)1035 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
1036 ArrayRef<Value *> Args, FastMathFlags FMF) {
1037 return BaseT::getIntrinsicInstrCost(IID, RetTy, Args, FMF);
1038 }
1039
getVectorInstrCost(unsigned Opcode,Type * Val,unsigned Index)1040 int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
1041 assert(Val->isVectorTy() && "This must be a vector type");
1042
1043 Type *ScalarType = Val->getScalarType();
1044
1045 if (Index != -1U) {
1046 // Legalize the type.
1047 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
1048
1049 // This type is legalized to a scalar type.
1050 if (!LT.second.isVector())
1051 return 0;
1052
1053 // The type may be split. Normalize the index to the new type.
1054 unsigned Width = LT.second.getVectorNumElements();
1055 Index = Index % Width;
1056
1057 // Floating point scalars are already located in index #0.
1058 if (ScalarType->isFloatingPointTy() && Index == 0)
1059 return 0;
1060 }
1061
1062 // Add to the base cost if we know that the extracted element of a vector is
1063 // destined to be moved to and used in the integer register file.
1064 int RegisterFileMoveCost = 0;
1065 if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy())
1066 RegisterFileMoveCost = 1;
1067
1068 return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost;
1069 }
1070
getScalarizationOverhead(Type * Ty,bool Insert,bool Extract)1071 int X86TTIImpl::getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
1072 assert (Ty->isVectorTy() && "Can only scalarize vectors");
1073 int Cost = 0;
1074
1075 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
1076 if (Insert)
1077 Cost += getVectorInstrCost(Instruction::InsertElement, Ty, i);
1078 if (Extract)
1079 Cost += getVectorInstrCost(Instruction::ExtractElement, Ty, i);
1080 }
1081
1082 return Cost;
1083 }
1084
getMemoryOpCost(unsigned Opcode,Type * Src,unsigned Alignment,unsigned AddressSpace)1085 int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
1086 unsigned AddressSpace) {
1087 // Handle non-power-of-two vectors such as <3 x float>
1088 if (VectorType *VTy = dyn_cast<VectorType>(Src)) {
1089 unsigned NumElem = VTy->getVectorNumElements();
1090
1091 // Handle a few common cases:
1092 // <3 x float>
1093 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32)
1094 // Cost = 64 bit store + extract + 32 bit store.
1095 return 3;
1096
1097 // <3 x double>
1098 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64)
1099 // Cost = 128 bit store + unpack + 64 bit store.
1100 return 3;
1101
1102 // Assume that all other non-power-of-two numbers are scalarized.
1103 if (!isPowerOf2_32(NumElem)) {
1104 int Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(), Alignment,
1105 AddressSpace);
1106 int SplitCost = getScalarizationOverhead(Src, Opcode == Instruction::Load,
1107 Opcode == Instruction::Store);
1108 return NumElem * Cost + SplitCost;
1109 }
1110 }
1111
1112 // Legalize the type.
1113 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
1114 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
1115 "Invalid Opcode");
1116
1117 // Each load/store unit costs 1.
1118 int Cost = LT.first * 1;
1119
1120 // This isn't exactly right. We're using slow unaligned 32-byte accesses as a
1121 // proxy for a double-pumped AVX memory interface such as on Sandybridge.
1122 if (LT.second.getStoreSize() == 32 && ST->isUnalignedMem32Slow())
1123 Cost *= 2;
1124
1125 return Cost;
1126 }
1127
getMaskedMemoryOpCost(unsigned Opcode,Type * SrcTy,unsigned Alignment,unsigned AddressSpace)1128 int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy,
1129 unsigned Alignment,
1130 unsigned AddressSpace) {
1131 VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy);
1132 if (!SrcVTy)
1133 // To calculate scalar take the regular cost, without mask
1134 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace);
1135
1136 unsigned NumElem = SrcVTy->getVectorNumElements();
1137 VectorType *MaskTy =
1138 VectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem);
1139 if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy)) ||
1140 (Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy)) ||
1141 !isPowerOf2_32(NumElem)) {
1142 // Scalarization
1143 int MaskSplitCost = getScalarizationOverhead(MaskTy, false, true);
1144 int ScalarCompareCost = getCmpSelInstrCost(
1145 Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr);
1146 int BranchCost = getCFInstrCost(Instruction::Br);
1147 int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
1148
1149 int ValueSplitCost = getScalarizationOverhead(
1150 SrcVTy, Opcode == Instruction::Load, Opcode == Instruction::Store);
1151 int MemopCost =
1152 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
1153 Alignment, AddressSpace);
1154 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
1155 }
1156
1157 // Legalize the type.
1158 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy);
1159 auto VT = TLI->getValueType(DL, SrcVTy);
1160 int Cost = 0;
1161 if (VT.isSimple() && LT.second != VT.getSimpleVT() &&
1162 LT.second.getVectorNumElements() == NumElem)
1163 // Promotion requires expand/truncate for data and a shuffle for mask.
1164 Cost += getShuffleCost(TTI::SK_Alternate, SrcVTy, 0, nullptr) +
1165 getShuffleCost(TTI::SK_Alternate, MaskTy, 0, nullptr);
1166
1167 else if (LT.second.getVectorNumElements() > NumElem) {
1168 VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(),
1169 LT.second.getVectorNumElements());
1170 // Expanding requires fill mask with zeroes
1171 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy);
1172 }
1173 if (!ST->hasAVX512())
1174 return Cost + LT.first*4; // Each maskmov costs 4
1175
1176 // AVX-512 masked load/store is cheapper
1177 return Cost+LT.first;
1178 }
1179
getAddressComputationCost(Type * Ty,bool IsComplex)1180 int X86TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
1181 // Address computations in vectorized code with non-consecutive addresses will
1182 // likely result in more instructions compared to scalar code where the
1183 // computation can more often be merged into the index mode. The resulting
1184 // extra micro-ops can significantly decrease throughput.
1185 unsigned NumVectorInstToHideOverhead = 10;
1186
1187 if (Ty->isVectorTy() && IsComplex)
1188 return NumVectorInstToHideOverhead;
1189
1190 return BaseT::getAddressComputationCost(Ty, IsComplex);
1191 }
1192
getReductionCost(unsigned Opcode,Type * ValTy,bool IsPairwise)1193 int X86TTIImpl::getReductionCost(unsigned Opcode, Type *ValTy,
1194 bool IsPairwise) {
1195
1196 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
1197
1198 MVT MTy = LT.second;
1199
1200 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1201 assert(ISD && "Invalid opcode");
1202
1203 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
1204 // and make it as the cost.
1205
1206 static const CostTblEntry SSE42CostTblPairWise[] = {
1207 { ISD::FADD, MVT::v2f64, 2 },
1208 { ISD::FADD, MVT::v4f32, 4 },
1209 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
1210 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
1211 { ISD::ADD, MVT::v8i16, 5 },
1212 };
1213
1214 static const CostTblEntry AVX1CostTblPairWise[] = {
1215 { ISD::FADD, MVT::v4f32, 4 },
1216 { ISD::FADD, MVT::v4f64, 5 },
1217 { ISD::FADD, MVT::v8f32, 7 },
1218 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
1219 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
1220 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8".
1221 { ISD::ADD, MVT::v8i16, 5 },
1222 { ISD::ADD, MVT::v8i32, 5 },
1223 };
1224
1225 static const CostTblEntry SSE42CostTblNoPairWise[] = {
1226 { ISD::FADD, MVT::v2f64, 2 },
1227 { ISD::FADD, MVT::v4f32, 4 },
1228 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
1229 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
1230 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
1231 };
1232
1233 static const CostTblEntry AVX1CostTblNoPairWise[] = {
1234 { ISD::FADD, MVT::v4f32, 3 },
1235 { ISD::FADD, MVT::v4f64, 3 },
1236 { ISD::FADD, MVT::v8f32, 4 },
1237 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
1238 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8".
1239 { ISD::ADD, MVT::v4i64, 3 },
1240 { ISD::ADD, MVT::v8i16, 4 },
1241 { ISD::ADD, MVT::v8i32, 5 },
1242 };
1243
1244 if (IsPairwise) {
1245 if (ST->hasAVX())
1246 if (const auto *Entry = CostTableLookup(AVX1CostTblPairWise, ISD, MTy))
1247 return LT.first * Entry->Cost;
1248
1249 if (ST->hasSSE42())
1250 if (const auto *Entry = CostTableLookup(SSE42CostTblPairWise, ISD, MTy))
1251 return LT.first * Entry->Cost;
1252 } else {
1253 if (ST->hasAVX())
1254 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
1255 return LT.first * Entry->Cost;
1256
1257 if (ST->hasSSE42())
1258 if (const auto *Entry = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy))
1259 return LT.first * Entry->Cost;
1260 }
1261
1262 return BaseT::getReductionCost(Opcode, ValTy, IsPairwise);
1263 }
1264
1265 /// \brief Calculate the cost of materializing a 64-bit value. This helper
1266 /// method might only calculate a fraction of a larger immediate. Therefore it
1267 /// is valid to return a cost of ZERO.
getIntImmCost(int64_t Val)1268 int X86TTIImpl::getIntImmCost(int64_t Val) {
1269 if (Val == 0)
1270 return TTI::TCC_Free;
1271
1272 if (isInt<32>(Val))
1273 return TTI::TCC_Basic;
1274
1275 return 2 * TTI::TCC_Basic;
1276 }
1277
getIntImmCost(const APInt & Imm,Type * Ty)1278 int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
1279 assert(Ty->isIntegerTy());
1280
1281 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1282 if (BitSize == 0)
1283 return ~0U;
1284
1285 // Never hoist constants larger than 128bit, because this might lead to
1286 // incorrect code generation or assertions in codegen.
1287 // Fixme: Create a cost model for types larger than i128 once the codegen
1288 // issues have been fixed.
1289 if (BitSize > 128)
1290 return TTI::TCC_Free;
1291
1292 if (Imm == 0)
1293 return TTI::TCC_Free;
1294
1295 // Sign-extend all constants to a multiple of 64-bit.
1296 APInt ImmVal = Imm;
1297 if (BitSize & 0x3f)
1298 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
1299
1300 // Split the constant into 64-bit chunks and calculate the cost for each
1301 // chunk.
1302 int Cost = 0;
1303 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
1304 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
1305 int64_t Val = Tmp.getSExtValue();
1306 Cost += getIntImmCost(Val);
1307 }
1308 // We need at least one instruction to materialize the constant.
1309 return std::max(1, Cost);
1310 }
1311
getIntImmCost(unsigned Opcode,unsigned Idx,const APInt & Imm,Type * Ty)1312 int X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
1313 Type *Ty) {
1314 assert(Ty->isIntegerTy());
1315
1316 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1317 // There is no cost model for constants with a bit size of 0. Return TCC_Free
1318 // here, so that constant hoisting will ignore this constant.
1319 if (BitSize == 0)
1320 return TTI::TCC_Free;
1321
1322 unsigned ImmIdx = ~0U;
1323 switch (Opcode) {
1324 default:
1325 return TTI::TCC_Free;
1326 case Instruction::GetElementPtr:
1327 // Always hoist the base address of a GetElementPtr. This prevents the
1328 // creation of new constants for every base constant that gets constant
1329 // folded with the offset.
1330 if (Idx == 0)
1331 return 2 * TTI::TCC_Basic;
1332 return TTI::TCC_Free;
1333 case Instruction::Store:
1334 ImmIdx = 0;
1335 break;
1336 case Instruction::ICmp:
1337 // This is an imperfect hack to prevent constant hoisting of
1338 // compares that might be trying to check if a 64-bit value fits in
1339 // 32-bits. The backend can optimize these cases using a right shift by 32.
1340 // Ideally we would check the compare predicate here. There also other
1341 // similar immediates the backend can use shifts for.
1342 if (Idx == 1 && Imm.getBitWidth() == 64) {
1343 uint64_t ImmVal = Imm.getZExtValue();
1344 if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff)
1345 return TTI::TCC_Free;
1346 }
1347 ImmIdx = 1;
1348 break;
1349 case Instruction::And:
1350 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes
1351 // by using a 32-bit operation with implicit zero extension. Detect such
1352 // immediates here as the normal path expects bit 31 to be sign extended.
1353 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue()))
1354 return TTI::TCC_Free;
1355 // Fallthrough
1356 case Instruction::Add:
1357 case Instruction::Sub:
1358 case Instruction::Mul:
1359 case Instruction::UDiv:
1360 case Instruction::SDiv:
1361 case Instruction::URem:
1362 case Instruction::SRem:
1363 case Instruction::Or:
1364 case Instruction::Xor:
1365 ImmIdx = 1;
1366 break;
1367 // Always return TCC_Free for the shift value of a shift instruction.
1368 case Instruction::Shl:
1369 case Instruction::LShr:
1370 case Instruction::AShr:
1371 if (Idx == 1)
1372 return TTI::TCC_Free;
1373 break;
1374 case Instruction::Trunc:
1375 case Instruction::ZExt:
1376 case Instruction::SExt:
1377 case Instruction::IntToPtr:
1378 case Instruction::PtrToInt:
1379 case Instruction::BitCast:
1380 case Instruction::PHI:
1381 case Instruction::Call:
1382 case Instruction::Select:
1383 case Instruction::Ret:
1384 case Instruction::Load:
1385 break;
1386 }
1387
1388 if (Idx == ImmIdx) {
1389 int NumConstants = (BitSize + 63) / 64;
1390 int Cost = X86TTIImpl::getIntImmCost(Imm, Ty);
1391 return (Cost <= NumConstants * TTI::TCC_Basic)
1392 ? static_cast<int>(TTI::TCC_Free)
1393 : Cost;
1394 }
1395
1396 return X86TTIImpl::getIntImmCost(Imm, Ty);
1397 }
1398
getIntImmCost(Intrinsic::ID IID,unsigned Idx,const APInt & Imm,Type * Ty)1399 int X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
1400 Type *Ty) {
1401 assert(Ty->isIntegerTy());
1402
1403 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1404 // There is no cost model for constants with a bit size of 0. Return TCC_Free
1405 // here, so that constant hoisting will ignore this constant.
1406 if (BitSize == 0)
1407 return TTI::TCC_Free;
1408
1409 switch (IID) {
1410 default:
1411 return TTI::TCC_Free;
1412 case Intrinsic::sadd_with_overflow:
1413 case Intrinsic::uadd_with_overflow:
1414 case Intrinsic::ssub_with_overflow:
1415 case Intrinsic::usub_with_overflow:
1416 case Intrinsic::smul_with_overflow:
1417 case Intrinsic::umul_with_overflow:
1418 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
1419 return TTI::TCC_Free;
1420 break;
1421 case Intrinsic::experimental_stackmap:
1422 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
1423 return TTI::TCC_Free;
1424 break;
1425 case Intrinsic::experimental_patchpoint_void:
1426 case Intrinsic::experimental_patchpoint_i64:
1427 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
1428 return TTI::TCC_Free;
1429 break;
1430 }
1431 return X86TTIImpl::getIntImmCost(Imm, Ty);
1432 }
1433
1434 // Return an average cost of Gather / Scatter instruction, maybe improved later
getGSVectorCost(unsigned Opcode,Type * SrcVTy,Value * Ptr,unsigned Alignment,unsigned AddressSpace)1435 int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, Value *Ptr,
1436 unsigned Alignment, unsigned AddressSpace) {
1437
1438 assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost");
1439 unsigned VF = SrcVTy->getVectorNumElements();
1440
1441 // Try to reduce index size from 64 bit (default for GEP)
1442 // to 32. It is essential for VF 16. If the index can't be reduced to 32, the
1443 // operation will use 16 x 64 indices which do not fit in a zmm and needs
1444 // to split. Also check that the base pointer is the same for all lanes,
1445 // and that there's at most one variable index.
1446 auto getIndexSizeInBits = [](Value *Ptr, const DataLayout& DL) {
1447 unsigned IndexSize = DL.getPointerSizeInBits();
1448 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1449 if (IndexSize < 64 || !GEP)
1450 return IndexSize;
1451
1452 unsigned NumOfVarIndices = 0;
1453 Value *Ptrs = GEP->getPointerOperand();
1454 if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs))
1455 return IndexSize;
1456 for (unsigned i = 1; i < GEP->getNumOperands(); ++i) {
1457 if (isa<Constant>(GEP->getOperand(i)))
1458 continue;
1459 Type *IndxTy = GEP->getOperand(i)->getType();
1460 if (IndxTy->isVectorTy())
1461 IndxTy = IndxTy->getVectorElementType();
1462 if ((IndxTy->getPrimitiveSizeInBits() == 64 &&
1463 !isa<SExtInst>(GEP->getOperand(i))) ||
1464 ++NumOfVarIndices > 1)
1465 return IndexSize; // 64
1466 }
1467 return (unsigned)32;
1468 };
1469
1470
1471 // Trying to reduce IndexSize to 32 bits for vector 16.
1472 // By default the IndexSize is equal to pointer size.
1473 unsigned IndexSize = (VF >= 16) ? getIndexSizeInBits(Ptr, DL) :
1474 DL.getPointerSizeInBits();
1475
1476 Type *IndexVTy = VectorType::get(IntegerType::get(SrcVTy->getContext(),
1477 IndexSize), VF);
1478 std::pair<int, MVT> IdxsLT = TLI->getTypeLegalizationCost(DL, IndexVTy);
1479 std::pair<int, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, SrcVTy);
1480 int SplitFactor = std::max(IdxsLT.first, SrcLT.first);
1481 if (SplitFactor > 1) {
1482 // Handle splitting of vector of pointers
1483 Type *SplitSrcTy = VectorType::get(SrcVTy->getScalarType(), VF / SplitFactor);
1484 return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment,
1485 AddressSpace);
1486 }
1487
1488 // The gather / scatter cost is given by Intel architects. It is a rough
1489 // number since we are looking at one instruction in a time.
1490 const int GSOverhead = 2;
1491 return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
1492 Alignment, AddressSpace);
1493 }
1494
1495 /// Return the cost of full scalarization of gather / scatter operation.
1496 ///
1497 /// Opcode - Load or Store instruction.
1498 /// SrcVTy - The type of the data vector that should be gathered or scattered.
1499 /// VariableMask - The mask is non-constant at compile time.
1500 /// Alignment - Alignment for one element.
1501 /// AddressSpace - pointer[s] address space.
1502 ///
getGSScalarCost(unsigned Opcode,Type * SrcVTy,bool VariableMask,unsigned Alignment,unsigned AddressSpace)1503 int X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy,
1504 bool VariableMask, unsigned Alignment,
1505 unsigned AddressSpace) {
1506 unsigned VF = SrcVTy->getVectorNumElements();
1507
1508 int MaskUnpackCost = 0;
1509 if (VariableMask) {
1510 VectorType *MaskTy =
1511 VectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF);
1512 MaskUnpackCost = getScalarizationOverhead(MaskTy, false, true);
1513 int ScalarCompareCost =
1514 getCmpSelInstrCost(Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()),
1515 nullptr);
1516 int BranchCost = getCFInstrCost(Instruction::Br);
1517 MaskUnpackCost += VF * (BranchCost + ScalarCompareCost);
1518 }
1519
1520 // The cost of the scalar loads/stores.
1521 int MemoryOpCost = VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
1522 Alignment, AddressSpace);
1523
1524 int InsertExtractCost = 0;
1525 if (Opcode == Instruction::Load)
1526 for (unsigned i = 0; i < VF; ++i)
1527 // Add the cost of inserting each scalar load into the vector
1528 InsertExtractCost +=
1529 getVectorInstrCost(Instruction::InsertElement, SrcVTy, i);
1530 else
1531 for (unsigned i = 0; i < VF; ++i)
1532 // Add the cost of extracting each element out of the data vector
1533 InsertExtractCost +=
1534 getVectorInstrCost(Instruction::ExtractElement, SrcVTy, i);
1535
1536 return MemoryOpCost + MaskUnpackCost + InsertExtractCost;
1537 }
1538
1539 /// Calculate the cost of Gather / Scatter operation
getGatherScatterOpCost(unsigned Opcode,Type * SrcVTy,Value * Ptr,bool VariableMask,unsigned Alignment)1540 int X86TTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *SrcVTy,
1541 Value *Ptr, bool VariableMask,
1542 unsigned Alignment) {
1543 assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter");
1544 unsigned VF = SrcVTy->getVectorNumElements();
1545 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
1546 if (!PtrTy && Ptr->getType()->isVectorTy())
1547 PtrTy = dyn_cast<PointerType>(Ptr->getType()->getVectorElementType());
1548 assert(PtrTy && "Unexpected type for Ptr argument");
1549 unsigned AddressSpace = PtrTy->getAddressSpace();
1550
1551 bool Scalarize = false;
1552 if ((Opcode == Instruction::Load && !isLegalMaskedGather(SrcVTy)) ||
1553 (Opcode == Instruction::Store && !isLegalMaskedScatter(SrcVTy)))
1554 Scalarize = true;
1555 // Gather / Scatter for vector 2 is not profitable on KNL / SKX
1556 // Vector-4 of gather/scatter instruction does not exist on KNL.
1557 // We can extend it to 8 elements, but zeroing upper bits of
1558 // the mask vector will add more instructions. Right now we give the scalar
1559 // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter instruction is
1560 // better in the VariableMask case.
1561 if (VF == 2 || (VF == 4 && !ST->hasVLX()))
1562 Scalarize = true;
1563
1564 if (Scalarize)
1565 return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment, AddressSpace);
1566
1567 return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace);
1568 }
1569
isLegalMaskedLoad(Type * DataTy)1570 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy) {
1571 Type *ScalarTy = DataTy->getScalarType();
1572 int DataWidth = isa<PointerType>(ScalarTy) ?
1573 DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits();
1574
1575 return (DataWidth >= 32 && ST->hasAVX()) ||
1576 (DataWidth >= 8 && ST->hasBWI());
1577 }
1578
isLegalMaskedStore(Type * DataType)1579 bool X86TTIImpl::isLegalMaskedStore(Type *DataType) {
1580 return isLegalMaskedLoad(DataType);
1581 }
1582
isLegalMaskedGather(Type * DataTy)1583 bool X86TTIImpl::isLegalMaskedGather(Type *DataTy) {
1584 // This function is called now in two cases: from the Loop Vectorizer
1585 // and from the Scalarizer.
1586 // When the Loop Vectorizer asks about legality of the feature,
1587 // the vectorization factor is not calculated yet. The Loop Vectorizer
1588 // sends a scalar type and the decision is based on the width of the
1589 // scalar element.
1590 // Later on, the cost model will estimate usage this intrinsic based on
1591 // the vector type.
1592 // The Scalarizer asks again about legality. It sends a vector type.
1593 // In this case we can reject non-power-of-2 vectors.
1594 if (isa<VectorType>(DataTy) && !isPowerOf2_32(DataTy->getVectorNumElements()))
1595 return false;
1596 Type *ScalarTy = DataTy->getScalarType();
1597 int DataWidth = isa<PointerType>(ScalarTy) ?
1598 DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits();
1599
1600 // AVX-512 allows gather and scatter
1601 return DataWidth >= 32 && ST->hasAVX512();
1602 }
1603
isLegalMaskedScatter(Type * DataType)1604 bool X86TTIImpl::isLegalMaskedScatter(Type *DataType) {
1605 return isLegalMaskedGather(DataType);
1606 }
1607
areInlineCompatible(const Function * Caller,const Function * Callee) const1608 bool X86TTIImpl::areInlineCompatible(const Function *Caller,
1609 const Function *Callee) const {
1610 const TargetMachine &TM = getTLI()->getTargetMachine();
1611
1612 // Work this as a subsetting of subtarget features.
1613 const FeatureBitset &CallerBits =
1614 TM.getSubtargetImpl(*Caller)->getFeatureBits();
1615 const FeatureBitset &CalleeBits =
1616 TM.getSubtargetImpl(*Callee)->getFeatureBits();
1617
1618 // FIXME: This is likely too limiting as it will include subtarget features
1619 // that we might not care about for inlining, but it is conservatively
1620 // correct.
1621 return (CallerBits & CalleeBits) == CalleeBits;
1622 }
1623