1 //===-- FastISel.cpp - Implementation of the FastISel class ---------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the implementation of the FastISel class.
11 //
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time. For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
19 //
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
24 //
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
32 // in -O0 compiles.
33 //
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
39 //
40 //===----------------------------------------------------------------------===//
41
42 #include "llvm/CodeGen/Analysis.h"
43 #include "llvm/ADT/Optional.h"
44 #include "llvm/ADT/Statistic.h"
45 #include "llvm/Analysis/BranchProbabilityInfo.h"
46 #include "llvm/Analysis/Loads.h"
47 #include "llvm/Analysis/TargetLibraryInfo.h"
48 #include "llvm/CodeGen/Analysis.h"
49 #include "llvm/CodeGen/FastISel.h"
50 #include "llvm/CodeGen/FunctionLoweringInfo.h"
51 #include "llvm/CodeGen/MachineFrameInfo.h"
52 #include "llvm/CodeGen/MachineInstrBuilder.h"
53 #include "llvm/CodeGen/MachineModuleInfo.h"
54 #include "llvm/CodeGen/MachineRegisterInfo.h"
55 #include "llvm/CodeGen/StackMaps.h"
56 #include "llvm/IR/DataLayout.h"
57 #include "llvm/IR/DebugInfo.h"
58 #include "llvm/IR/Function.h"
59 #include "llvm/IR/GlobalVariable.h"
60 #include "llvm/IR/Instructions.h"
61 #include "llvm/IR/IntrinsicInst.h"
62 #include "llvm/IR/Operator.h"
63 #include "llvm/Support/Debug.h"
64 #include "llvm/Support/ErrorHandling.h"
65 #include "llvm/Support/raw_ostream.h"
66 #include "llvm/Target/TargetInstrInfo.h"
67 #include "llvm/Target/TargetLowering.h"
68 #include "llvm/Target/TargetMachine.h"
69 #include "llvm/Target/TargetSubtargetInfo.h"
70 using namespace llvm;
71
72 #define DEBUG_TYPE "isel"
73
74 STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
75 "target-independent selector");
76 STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
77 "target-specific selector");
78 STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
79
setAttributes(ImmutableCallSite * CS,unsigned AttrIdx)80 void FastISel::ArgListEntry::setAttributes(ImmutableCallSite *CS,
81 unsigned AttrIdx) {
82 IsSExt = CS->paramHasAttr(AttrIdx, Attribute::SExt);
83 IsZExt = CS->paramHasAttr(AttrIdx, Attribute::ZExt);
84 IsInReg = CS->paramHasAttr(AttrIdx, Attribute::InReg);
85 IsSRet = CS->paramHasAttr(AttrIdx, Attribute::StructRet);
86 IsNest = CS->paramHasAttr(AttrIdx, Attribute::Nest);
87 IsByVal = CS->paramHasAttr(AttrIdx, Attribute::ByVal);
88 IsInAlloca = CS->paramHasAttr(AttrIdx, Attribute::InAlloca);
89 IsReturned = CS->paramHasAttr(AttrIdx, Attribute::Returned);
90 Alignment = CS->getParamAlignment(AttrIdx);
91 }
92
93 /// Set the current block to which generated machine instructions will be
94 /// appended, and clear the local CSE map.
startNewBlock()95 void FastISel::startNewBlock() {
96 LocalValueMap.clear();
97
98 // Instructions are appended to FuncInfo.MBB. If the basic block already
99 // contains labels or copies, use the last instruction as the last local
100 // value.
101 EmitStartPt = nullptr;
102 if (!FuncInfo.MBB->empty())
103 EmitStartPt = &FuncInfo.MBB->back();
104 LastLocalValue = EmitStartPt;
105 }
106
lowerArguments()107 bool FastISel::lowerArguments() {
108 if (!FuncInfo.CanLowerReturn)
109 // Fallback to SDISel argument lowering code to deal with sret pointer
110 // parameter.
111 return false;
112
113 if (!fastLowerArguments())
114 return false;
115
116 // Enter arguments into ValueMap for uses in non-entry BBs.
117 for (Function::const_arg_iterator I = FuncInfo.Fn->arg_begin(),
118 E = FuncInfo.Fn->arg_end();
119 I != E; ++I) {
120 DenseMap<const Value *, unsigned>::iterator VI = LocalValueMap.find(I);
121 assert(VI != LocalValueMap.end() && "Missed an argument?");
122 FuncInfo.ValueMap[I] = VI->second;
123 }
124 return true;
125 }
126
flushLocalValueMap()127 void FastISel::flushLocalValueMap() {
128 LocalValueMap.clear();
129 LastLocalValue = EmitStartPt;
130 recomputeInsertPt();
131 SavedInsertPt = FuncInfo.InsertPt;
132 }
133
hasTrivialKill(const Value * V)134 bool FastISel::hasTrivialKill(const Value *V) {
135 // Don't consider constants or arguments to have trivial kills.
136 const Instruction *I = dyn_cast<Instruction>(V);
137 if (!I)
138 return false;
139
140 // No-op casts are trivially coalesced by fast-isel.
141 if (const auto *Cast = dyn_cast<CastInst>(I))
142 if (Cast->isNoopCast(DL.getIntPtrType(Cast->getContext())) &&
143 !hasTrivialKill(Cast->getOperand(0)))
144 return false;
145
146 // Even the value might have only one use in the LLVM IR, it is possible that
147 // FastISel might fold the use into another instruction and now there is more
148 // than one use at the Machine Instruction level.
149 unsigned Reg = lookUpRegForValue(V);
150 if (Reg && !MRI.use_empty(Reg))
151 return false;
152
153 // GEPs with all zero indices are trivially coalesced by fast-isel.
154 if (const auto *GEP = dyn_cast<GetElementPtrInst>(I))
155 if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0)))
156 return false;
157
158 // Only instructions with a single use in the same basic block are considered
159 // to have trivial kills.
160 return I->hasOneUse() &&
161 !(I->getOpcode() == Instruction::BitCast ||
162 I->getOpcode() == Instruction::PtrToInt ||
163 I->getOpcode() == Instruction::IntToPtr) &&
164 cast<Instruction>(*I->user_begin())->getParent() == I->getParent();
165 }
166
getRegForValue(const Value * V)167 unsigned FastISel::getRegForValue(const Value *V) {
168 EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
169 // Don't handle non-simple values in FastISel.
170 if (!RealVT.isSimple())
171 return 0;
172
173 // Ignore illegal types. We must do this before looking up the value
174 // in ValueMap because Arguments are given virtual registers regardless
175 // of whether FastISel can handle them.
176 MVT VT = RealVT.getSimpleVT();
177 if (!TLI.isTypeLegal(VT)) {
178 // Handle integer promotions, though, because they're common and easy.
179 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
180 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
181 else
182 return 0;
183 }
184
185 // Look up the value to see if we already have a register for it.
186 unsigned Reg = lookUpRegForValue(V);
187 if (Reg)
188 return Reg;
189
190 // In bottom-up mode, just create the virtual register which will be used
191 // to hold the value. It will be materialized later.
192 if (isa<Instruction>(V) &&
193 (!isa<AllocaInst>(V) ||
194 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
195 return FuncInfo.InitializeRegForValue(V);
196
197 SavePoint SaveInsertPt = enterLocalValueArea();
198
199 // Materialize the value in a register. Emit any instructions in the
200 // local value area.
201 Reg = materializeRegForValue(V, VT);
202
203 leaveLocalValueArea(SaveInsertPt);
204
205 return Reg;
206 }
207
materializeConstant(const Value * V,MVT VT)208 unsigned FastISel::materializeConstant(const Value *V, MVT VT) {
209 unsigned Reg = 0;
210 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
211 if (CI->getValue().getActiveBits() <= 64)
212 Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
213 } else if (isa<AllocaInst>(V))
214 Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
215 else if (isa<ConstantPointerNull>(V))
216 // Translate this as an integer zero so that it can be
217 // local-CSE'd with actual integer zeros.
218 Reg = getRegForValue(
219 Constant::getNullValue(DL.getIntPtrType(V->getContext())));
220 else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
221 if (CF->isNullValue())
222 Reg = fastMaterializeFloatZero(CF);
223 else
224 // Try to emit the constant directly.
225 Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
226
227 if (!Reg) {
228 // Try to emit the constant by using an integer constant with a cast.
229 const APFloat &Flt = CF->getValueAPF();
230 EVT IntVT = TLI.getPointerTy();
231
232 uint64_t x[2];
233 uint32_t IntBitWidth = IntVT.getSizeInBits();
234 bool isExact;
235 (void)Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
236 APFloat::rmTowardZero, &isExact);
237 if (isExact) {
238 APInt IntVal(IntBitWidth, x);
239
240 unsigned IntegerReg =
241 getRegForValue(ConstantInt::get(V->getContext(), IntVal));
242 if (IntegerReg != 0)
243 Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg,
244 /*Kill=*/false);
245 }
246 }
247 } else if (const auto *Op = dyn_cast<Operator>(V)) {
248 if (!selectOperator(Op, Op->getOpcode()))
249 if (!isa<Instruction>(Op) ||
250 !fastSelectInstruction(cast<Instruction>(Op)))
251 return 0;
252 Reg = lookUpRegForValue(Op);
253 } else if (isa<UndefValue>(V)) {
254 Reg = createResultReg(TLI.getRegClassFor(VT));
255 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
256 TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
257 }
258 return Reg;
259 }
260
261 /// Helper for getRegForValue. This function is called when the value isn't
262 /// already available in a register and must be materialized with new
263 /// instructions.
materializeRegForValue(const Value * V,MVT VT)264 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
265 unsigned Reg = 0;
266 // Give the target-specific code a try first.
267 if (isa<Constant>(V))
268 Reg = fastMaterializeConstant(cast<Constant>(V));
269
270 // If target-specific code couldn't or didn't want to handle the value, then
271 // give target-independent code a try.
272 if (!Reg)
273 Reg = materializeConstant(V, VT);
274
275 // Don't cache constant materializations in the general ValueMap.
276 // To do so would require tracking what uses they dominate.
277 if (Reg) {
278 LocalValueMap[V] = Reg;
279 LastLocalValue = MRI.getVRegDef(Reg);
280 }
281 return Reg;
282 }
283
lookUpRegForValue(const Value * V)284 unsigned FastISel::lookUpRegForValue(const Value *V) {
285 // Look up the value to see if we already have a register for it. We
286 // cache values defined by Instructions across blocks, and other values
287 // only locally. This is because Instructions already have the SSA
288 // def-dominates-use requirement enforced.
289 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
290 if (I != FuncInfo.ValueMap.end())
291 return I->second;
292 return LocalValueMap[V];
293 }
294
updateValueMap(const Value * I,unsigned Reg,unsigned NumRegs)295 void FastISel::updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) {
296 if (!isa<Instruction>(I)) {
297 LocalValueMap[I] = Reg;
298 return;
299 }
300
301 unsigned &AssignedReg = FuncInfo.ValueMap[I];
302 if (AssignedReg == 0)
303 // Use the new register.
304 AssignedReg = Reg;
305 else if (Reg != AssignedReg) {
306 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
307 for (unsigned i = 0; i < NumRegs; i++)
308 FuncInfo.RegFixups[AssignedReg + i] = Reg + i;
309
310 AssignedReg = Reg;
311 }
312 }
313
getRegForGEPIndex(const Value * Idx)314 std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
315 unsigned IdxN = getRegForValue(Idx);
316 if (IdxN == 0)
317 // Unhandled operand. Halt "fast" selection and bail.
318 return std::pair<unsigned, bool>(0, false);
319
320 bool IdxNIsKill = hasTrivialKill(Idx);
321
322 // If the index is smaller or larger than intptr_t, truncate or extend it.
323 MVT PtrVT = TLI.getPointerTy();
324 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
325 if (IdxVT.bitsLT(PtrVT)) {
326 IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN,
327 IdxNIsKill);
328 IdxNIsKill = true;
329 } else if (IdxVT.bitsGT(PtrVT)) {
330 IdxN =
331 fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN, IdxNIsKill);
332 IdxNIsKill = true;
333 }
334 return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
335 }
336
recomputeInsertPt()337 void FastISel::recomputeInsertPt() {
338 if (getLastLocalValue()) {
339 FuncInfo.InsertPt = getLastLocalValue();
340 FuncInfo.MBB = FuncInfo.InsertPt->getParent();
341 ++FuncInfo.InsertPt;
342 } else
343 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
344
345 // Now skip past any EH_LABELs, which must remain at the beginning.
346 while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
347 FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
348 ++FuncInfo.InsertPt;
349 }
350
removeDeadCode(MachineBasicBlock::iterator I,MachineBasicBlock::iterator E)351 void FastISel::removeDeadCode(MachineBasicBlock::iterator I,
352 MachineBasicBlock::iterator E) {
353 assert(I && E && std::distance(I, E) > 0 && "Invalid iterator!");
354 while (I != E) {
355 MachineInstr *Dead = &*I;
356 ++I;
357 Dead->eraseFromParent();
358 ++NumFastIselDead;
359 }
360 recomputeInsertPt();
361 }
362
enterLocalValueArea()363 FastISel::SavePoint FastISel::enterLocalValueArea() {
364 MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
365 DebugLoc OldDL = DbgLoc;
366 recomputeInsertPt();
367 DbgLoc = DebugLoc();
368 SavePoint SP = {OldInsertPt, OldDL};
369 return SP;
370 }
371
leaveLocalValueArea(SavePoint OldInsertPt)372 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
373 if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
374 LastLocalValue = std::prev(FuncInfo.InsertPt);
375
376 // Restore the previous insert position.
377 FuncInfo.InsertPt = OldInsertPt.InsertPt;
378 DbgLoc = OldInsertPt.DL;
379 }
380
selectBinaryOp(const User * I,unsigned ISDOpcode)381 bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
382 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
383 if (VT == MVT::Other || !VT.isSimple())
384 // Unhandled type. Halt "fast" selection and bail.
385 return false;
386
387 // We only handle legal types. For example, on x86-32 the instruction
388 // selector contains all of the 64-bit instructions from x86-64,
389 // under the assumption that i64 won't be used if the target doesn't
390 // support it.
391 if (!TLI.isTypeLegal(VT)) {
392 // MVT::i1 is special. Allow AND, OR, or XOR because they
393 // don't require additional zeroing, which makes them easy.
394 if (VT == MVT::i1 && (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
395 ISDOpcode == ISD::XOR))
396 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
397 else
398 return false;
399 }
400
401 // Check if the first operand is a constant, and handle it as "ri". At -O0,
402 // we don't have anything that canonicalizes operand order.
403 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
404 if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
405 unsigned Op1 = getRegForValue(I->getOperand(1));
406 if (!Op1)
407 return false;
408 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
409
410 unsigned ResultReg =
411 fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, Op1IsKill,
412 CI->getZExtValue(), VT.getSimpleVT());
413 if (!ResultReg)
414 return false;
415
416 // We successfully emitted code for the given LLVM Instruction.
417 updateValueMap(I, ResultReg);
418 return true;
419 }
420
421 unsigned Op0 = getRegForValue(I->getOperand(0));
422 if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
423 return false;
424 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
425
426 // Check if the second operand is a constant and handle it appropriately.
427 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
428 uint64_t Imm = CI->getSExtValue();
429
430 // Transform "sdiv exact X, 8" -> "sra X, 3".
431 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
432 cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) {
433 Imm = Log2_64(Imm);
434 ISDOpcode = ISD::SRA;
435 }
436
437 // Transform "urem x, pow2" -> "and x, pow2-1".
438 if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
439 isPowerOf2_64(Imm)) {
440 --Imm;
441 ISDOpcode = ISD::AND;
442 }
443
444 unsigned ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
445 Op0IsKill, Imm, VT.getSimpleVT());
446 if (!ResultReg)
447 return false;
448
449 // We successfully emitted code for the given LLVM Instruction.
450 updateValueMap(I, ResultReg);
451 return true;
452 }
453
454 // Check if the second operand is a constant float.
455 if (const auto *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
456 unsigned ResultReg = fastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
457 ISDOpcode, Op0, Op0IsKill, CF);
458 if (ResultReg) {
459 // We successfully emitted code for the given LLVM Instruction.
460 updateValueMap(I, ResultReg);
461 return true;
462 }
463 }
464
465 unsigned Op1 = getRegForValue(I->getOperand(1));
466 if (!Op1) // Unhandled operand. Halt "fast" selection and bail.
467 return false;
468 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
469
470 // Now we have both operands in registers. Emit the instruction.
471 unsigned ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
472 ISDOpcode, Op0, Op0IsKill, Op1, Op1IsKill);
473 if (!ResultReg)
474 // Target-specific code wasn't able to find a machine opcode for
475 // the given ISD opcode and type. Halt "fast" selection and bail.
476 return false;
477
478 // We successfully emitted code for the given LLVM Instruction.
479 updateValueMap(I, ResultReg);
480 return true;
481 }
482
selectGetElementPtr(const User * I)483 bool FastISel::selectGetElementPtr(const User *I) {
484 unsigned N = getRegForValue(I->getOperand(0));
485 if (!N) // Unhandled operand. Halt "fast" selection and bail.
486 return false;
487 bool NIsKill = hasTrivialKill(I->getOperand(0));
488
489 // Keep a running tab of the total offset to coalesce multiple N = N + Offset
490 // into a single N = N + TotalOffset.
491 uint64_t TotalOffs = 0;
492 // FIXME: What's a good SWAG number for MaxOffs?
493 uint64_t MaxOffs = 2048;
494 Type *Ty = I->getOperand(0)->getType();
495 MVT VT = TLI.getPointerTy();
496 for (GetElementPtrInst::const_op_iterator OI = I->op_begin() + 1,
497 E = I->op_end();
498 OI != E; ++OI) {
499 const Value *Idx = *OI;
500 if (auto *StTy = dyn_cast<StructType>(Ty)) {
501 uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
502 if (Field) {
503 // N = N + Offset
504 TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
505 if (TotalOffs >= MaxOffs) {
506 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
507 if (!N) // Unhandled operand. Halt "fast" selection and bail.
508 return false;
509 NIsKill = true;
510 TotalOffs = 0;
511 }
512 }
513 Ty = StTy->getElementType(Field);
514 } else {
515 Ty = cast<SequentialType>(Ty)->getElementType();
516
517 // If this is a constant subscript, handle it quickly.
518 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
519 if (CI->isZero())
520 continue;
521 // N = N + Offset
522 uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
523 TotalOffs += DL.getTypeAllocSize(Ty) * IdxN;
524 if (TotalOffs >= MaxOffs) {
525 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
526 if (!N) // Unhandled operand. Halt "fast" selection and bail.
527 return false;
528 NIsKill = true;
529 TotalOffs = 0;
530 }
531 continue;
532 }
533 if (TotalOffs) {
534 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
535 if (!N) // Unhandled operand. Halt "fast" selection and bail.
536 return false;
537 NIsKill = true;
538 TotalOffs = 0;
539 }
540
541 // N = N + Idx * ElementSize;
542 uint64_t ElementSize = DL.getTypeAllocSize(Ty);
543 std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
544 unsigned IdxN = Pair.first;
545 bool IdxNIsKill = Pair.second;
546 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
547 return false;
548
549 if (ElementSize != 1) {
550 IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
551 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
552 return false;
553 IdxNIsKill = true;
554 }
555 N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
556 if (!N) // Unhandled operand. Halt "fast" selection and bail.
557 return false;
558 }
559 }
560 if (TotalOffs) {
561 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
562 if (!N) // Unhandled operand. Halt "fast" selection and bail.
563 return false;
564 }
565
566 // We successfully emitted code for the given LLVM Instruction.
567 updateValueMap(I, N);
568 return true;
569 }
570
addStackMapLiveVars(SmallVectorImpl<MachineOperand> & Ops,const CallInst * CI,unsigned StartIdx)571 bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
572 const CallInst *CI, unsigned StartIdx) {
573 for (unsigned i = StartIdx, e = CI->getNumArgOperands(); i != e; ++i) {
574 Value *Val = CI->getArgOperand(i);
575 // Check for constants and encode them with a StackMaps::ConstantOp prefix.
576 if (const auto *C = dyn_cast<ConstantInt>(Val)) {
577 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
578 Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
579 } else if (isa<ConstantPointerNull>(Val)) {
580 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
581 Ops.push_back(MachineOperand::CreateImm(0));
582 } else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
583 // Values coming from a stack location also require a sepcial encoding,
584 // but that is added later on by the target specific frame index
585 // elimination implementation.
586 auto SI = FuncInfo.StaticAllocaMap.find(AI);
587 if (SI != FuncInfo.StaticAllocaMap.end())
588 Ops.push_back(MachineOperand::CreateFI(SI->second));
589 else
590 return false;
591 } else {
592 unsigned Reg = getRegForValue(Val);
593 if (!Reg)
594 return false;
595 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
596 }
597 }
598 return true;
599 }
600
selectStackmap(const CallInst * I)601 bool FastISel::selectStackmap(const CallInst *I) {
602 // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
603 // [live variables...])
604 assert(I->getCalledFunction()->getReturnType()->isVoidTy() &&
605 "Stackmap cannot return a value.");
606
607 // The stackmap intrinsic only records the live variables (the arguments
608 // passed to it) and emits NOPS (if requested). Unlike the patchpoint
609 // intrinsic, this won't be lowered to a function call. This means we don't
610 // have to worry about calling conventions and target-specific lowering code.
611 // Instead we perform the call lowering right here.
612 //
613 // CALLSEQ_START(0)
614 // STACKMAP(id, nbytes, ...)
615 // CALLSEQ_END(0, 0)
616 //
617 SmallVector<MachineOperand, 32> Ops;
618
619 // Add the <id> and <numBytes> constants.
620 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
621 "Expected a constant integer.");
622 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
623 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
624
625 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
626 "Expected a constant integer.");
627 const auto *NumBytes =
628 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
629 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
630
631 // Push live variables for the stack map (skipping the first two arguments
632 // <id> and <numBytes>).
633 if (!addStackMapLiveVars(Ops, I, 2))
634 return false;
635
636 // We are not adding any register mask info here, because the stackmap doesn't
637 // clobber anything.
638
639 // Add scratch registers as implicit def and early clobber.
640 CallingConv::ID CC = I->getCallingConv();
641 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
642 for (unsigned i = 0; ScratchRegs[i]; ++i)
643 Ops.push_back(MachineOperand::CreateReg(
644 ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
645 /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
646
647 // Issue CALLSEQ_START
648 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
649 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown))
650 .addImm(0);
651
652 // Issue STACKMAP.
653 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
654 TII.get(TargetOpcode::STACKMAP));
655 for (auto const &MO : Ops)
656 MIB.addOperand(MO);
657
658 // Issue CALLSEQ_END
659 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
660 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
661 .addImm(0)
662 .addImm(0);
663
664 // Inform the Frame Information that we have a stackmap in this function.
665 FuncInfo.MF->getFrameInfo()->setHasStackMap();
666
667 return true;
668 }
669
670 /// \brief Lower an argument list according to the target calling convention.
671 ///
672 /// This is a helper for lowering intrinsics that follow a target calling
673 /// convention or require stack pointer adjustment. Only a subset of the
674 /// intrinsic's operands need to participate in the calling convention.
lowerCallOperands(const CallInst * CI,unsigned ArgIdx,unsigned NumArgs,const Value * Callee,bool ForceRetVoidTy,CallLoweringInfo & CLI)675 bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
676 unsigned NumArgs, const Value *Callee,
677 bool ForceRetVoidTy, CallLoweringInfo &CLI) {
678 ArgListTy Args;
679 Args.reserve(NumArgs);
680
681 // Populate the argument list.
682 // Attributes for args start at offset 1, after the return attribute.
683 ImmutableCallSite CS(CI);
684 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs, AttrI = ArgIdx + 1;
685 ArgI != ArgE; ++ArgI) {
686 Value *V = CI->getOperand(ArgI);
687
688 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
689
690 ArgListEntry Entry;
691 Entry.Val = V;
692 Entry.Ty = V->getType();
693 Entry.setAttributes(&CS, AttrI);
694 Args.push_back(Entry);
695 }
696
697 Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
698 : CI->getType();
699 CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
700
701 return lowerCallTo(CLI);
702 }
703
selectPatchpoint(const CallInst * I)704 bool FastISel::selectPatchpoint(const CallInst *I) {
705 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
706 // i32 <numBytes>,
707 // i8* <target>,
708 // i32 <numArgs>,
709 // [Args...],
710 // [live variables...])
711 CallingConv::ID CC = I->getCallingConv();
712 bool IsAnyRegCC = CC == CallingConv::AnyReg;
713 bool HasDef = !I->getType()->isVoidTy();
714 Value *Callee = I->getOperand(PatchPointOpers::TargetPos);
715
716 // Get the real number of arguments participating in the call <numArgs>
717 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
718 "Expected a constant integer.");
719 const auto *NumArgsVal =
720 cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos));
721 unsigned NumArgs = NumArgsVal->getZExtValue();
722
723 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
724 // This includes all meta-operands up to but not including CC.
725 unsigned NumMetaOpers = PatchPointOpers::CCPos;
726 assert(I->getNumArgOperands() >= NumMetaOpers + NumArgs &&
727 "Not enough arguments provided to the patchpoint intrinsic");
728
729 // For AnyRegCC the arguments are lowered later on manually.
730 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
731 CallLoweringInfo CLI;
732 CLI.setIsPatchPoint();
733 if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
734 return false;
735
736 assert(CLI.Call && "No call instruction specified.");
737
738 SmallVector<MachineOperand, 32> Ops;
739
740 // Add an explicit result reg if we use the anyreg calling convention.
741 if (IsAnyRegCC && HasDef) {
742 assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
743 CLI.ResultReg = createResultReg(TLI.getRegClassFor(MVT::i64));
744 CLI.NumResultRegs = 1;
745 Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*IsDef=*/true));
746 }
747
748 // Add the <id> and <numBytes> constants.
749 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
750 "Expected a constant integer.");
751 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
752 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
753
754 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
755 "Expected a constant integer.");
756 const auto *NumBytes =
757 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
758 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
759
760 // Assume that the callee is a constant address or null pointer.
761 // FIXME: handle function symbols in the future.
762 uint64_t CalleeAddr;
763 if (const auto *C = dyn_cast<IntToPtrInst>(Callee))
764 CalleeAddr = cast<ConstantInt>(C->getOperand(0))->getZExtValue();
765 else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
766 if (C->getOpcode() == Instruction::IntToPtr)
767 CalleeAddr = cast<ConstantInt>(C->getOperand(0))->getZExtValue();
768 else
769 llvm_unreachable("Unsupported ConstantExpr.");
770 } else if (isa<ConstantPointerNull>(Callee))
771 CalleeAddr = 0;
772 else
773 llvm_unreachable("Unsupported callee address.");
774
775 Ops.push_back(MachineOperand::CreateImm(CalleeAddr));
776
777 // Adjust <numArgs> to account for any arguments that have been passed on
778 // the stack instead.
779 unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
780 Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
781
782 // Add the calling convention
783 Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
784
785 // Add the arguments we omitted previously. The register allocator should
786 // place these in any free register.
787 if (IsAnyRegCC) {
788 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
789 unsigned Reg = getRegForValue(I->getArgOperand(i));
790 if (!Reg)
791 return false;
792 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
793 }
794 }
795
796 // Push the arguments from the call instruction.
797 for (auto Reg : CLI.OutRegs)
798 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
799
800 // Push live variables for the stack map.
801 if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
802 return false;
803
804 // Push the register mask info.
805 Ops.push_back(MachineOperand::CreateRegMask(
806 TRI.getCallPreservedMask(*FuncInfo.MF, CC)));
807
808 // Add scratch registers as implicit def and early clobber.
809 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
810 for (unsigned i = 0; ScratchRegs[i]; ++i)
811 Ops.push_back(MachineOperand::CreateReg(
812 ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
813 /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
814
815 // Add implicit defs (return values).
816 for (auto Reg : CLI.InRegs)
817 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/true,
818 /*IsImpl=*/true));
819
820 // Insert the patchpoint instruction before the call generated by the target.
821 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, DbgLoc,
822 TII.get(TargetOpcode::PATCHPOINT));
823
824 for (auto &MO : Ops)
825 MIB.addOperand(MO);
826
827 MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI);
828
829 // Delete the original call instruction.
830 CLI.Call->eraseFromParent();
831
832 // Inform the Frame Information that we have a patchpoint in this function.
833 FuncInfo.MF->getFrameInfo()->setHasPatchPoint();
834
835 if (CLI.NumResultRegs)
836 updateValueMap(I, CLI.ResultReg, CLI.NumResultRegs);
837 return true;
838 }
839
840 /// Returns an AttributeSet representing the attributes applied to the return
841 /// value of the given call.
getReturnAttrs(FastISel::CallLoweringInfo & CLI)842 static AttributeSet getReturnAttrs(FastISel::CallLoweringInfo &CLI) {
843 SmallVector<Attribute::AttrKind, 2> Attrs;
844 if (CLI.RetSExt)
845 Attrs.push_back(Attribute::SExt);
846 if (CLI.RetZExt)
847 Attrs.push_back(Attribute::ZExt);
848 if (CLI.IsInReg)
849 Attrs.push_back(Attribute::InReg);
850
851 return AttributeSet::get(CLI.RetTy->getContext(), AttributeSet::ReturnIndex,
852 Attrs);
853 }
854
lowerCallTo(const CallInst * CI,const char * SymName,unsigned NumArgs)855 bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
856 unsigned NumArgs) {
857 ImmutableCallSite CS(CI);
858
859 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
860 FunctionType *FTy = cast<FunctionType>(PT->getElementType());
861 Type *RetTy = FTy->getReturnType();
862
863 ArgListTy Args;
864 Args.reserve(NumArgs);
865
866 // Populate the argument list.
867 // Attributes for args start at offset 1, after the return attribute.
868 for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
869 Value *V = CI->getOperand(ArgI);
870
871 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
872
873 ArgListEntry Entry;
874 Entry.Val = V;
875 Entry.Ty = V->getType();
876 Entry.setAttributes(&CS, ArgI + 1);
877 Args.push_back(Entry);
878 }
879
880 CallLoweringInfo CLI;
881 CLI.setCallee(RetTy, FTy, SymName, std::move(Args), CS, NumArgs);
882
883 return lowerCallTo(CLI);
884 }
885
lowerCallTo(CallLoweringInfo & CLI)886 bool FastISel::lowerCallTo(CallLoweringInfo &CLI) {
887 // Handle the incoming return values from the call.
888 CLI.clearIns();
889 SmallVector<EVT, 4> RetTys;
890 ComputeValueVTs(TLI, CLI.RetTy, RetTys);
891
892 SmallVector<ISD::OutputArg, 4> Outs;
893 GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, TLI);
894
895 bool CanLowerReturn = TLI.CanLowerReturn(
896 CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext());
897
898 // FIXME: sret demotion isn't supported yet - bail out.
899 if (!CanLowerReturn)
900 return false;
901
902 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
903 EVT VT = RetTys[I];
904 MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
905 unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
906 for (unsigned i = 0; i != NumRegs; ++i) {
907 ISD::InputArg MyFlags;
908 MyFlags.VT = RegisterVT;
909 MyFlags.ArgVT = VT;
910 MyFlags.Used = CLI.IsReturnValueUsed;
911 if (CLI.RetSExt)
912 MyFlags.Flags.setSExt();
913 if (CLI.RetZExt)
914 MyFlags.Flags.setZExt();
915 if (CLI.IsInReg)
916 MyFlags.Flags.setInReg();
917 CLI.Ins.push_back(MyFlags);
918 }
919 }
920
921 // Handle all of the outgoing arguments.
922 CLI.clearOuts();
923 for (auto &Arg : CLI.getArgs()) {
924 Type *FinalType = Arg.Ty;
925 if (Arg.IsByVal)
926 FinalType = cast<PointerType>(Arg.Ty)->getElementType();
927 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
928 FinalType, CLI.CallConv, CLI.IsVarArg);
929
930 ISD::ArgFlagsTy Flags;
931 if (Arg.IsZExt)
932 Flags.setZExt();
933 if (Arg.IsSExt)
934 Flags.setSExt();
935 if (Arg.IsInReg)
936 Flags.setInReg();
937 if (Arg.IsSRet)
938 Flags.setSRet();
939 if (Arg.IsByVal)
940 Flags.setByVal();
941 if (Arg.IsInAlloca) {
942 Flags.setInAlloca();
943 // Set the byval flag for CCAssignFn callbacks that don't know about
944 // inalloca. This way we can know how many bytes we should've allocated
945 // and how many bytes a callee cleanup function will pop. If we port
946 // inalloca to more targets, we'll have to add custom inalloca handling in
947 // the various CC lowering callbacks.
948 Flags.setByVal();
949 }
950 if (Arg.IsByVal || Arg.IsInAlloca) {
951 PointerType *Ty = cast<PointerType>(Arg.Ty);
952 Type *ElementTy = Ty->getElementType();
953 unsigned FrameSize = DL.getTypeAllocSize(ElementTy);
954 // For ByVal, alignment should come from FE. BE will guess if this info is
955 // not there, but there are cases it cannot get right.
956 unsigned FrameAlign = Arg.Alignment;
957 if (!FrameAlign)
958 FrameAlign = TLI.getByValTypeAlignment(ElementTy);
959 Flags.setByValSize(FrameSize);
960 Flags.setByValAlign(FrameAlign);
961 }
962 if (Arg.IsNest)
963 Flags.setNest();
964 if (NeedsRegBlock)
965 Flags.setInConsecutiveRegs();
966 unsigned OriginalAlignment = DL.getABITypeAlignment(Arg.Ty);
967 Flags.setOrigAlign(OriginalAlignment);
968
969 CLI.OutVals.push_back(Arg.Val);
970 CLI.OutFlags.push_back(Flags);
971 }
972
973 if (!fastLowerCall(CLI))
974 return false;
975
976 // Set all unused physreg defs as dead.
977 assert(CLI.Call && "No call instruction specified.");
978 CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI);
979
980 if (CLI.NumResultRegs && CLI.CS)
981 updateValueMap(CLI.CS->getInstruction(), CLI.ResultReg, CLI.NumResultRegs);
982
983 return true;
984 }
985
lowerCall(const CallInst * CI)986 bool FastISel::lowerCall(const CallInst *CI) {
987 ImmutableCallSite CS(CI);
988
989 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
990 FunctionType *FuncTy = cast<FunctionType>(PT->getElementType());
991 Type *RetTy = FuncTy->getReturnType();
992
993 ArgListTy Args;
994 ArgListEntry Entry;
995 Args.reserve(CS.arg_size());
996
997 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
998 i != e; ++i) {
999 Value *V = *i;
1000
1001 // Skip empty types
1002 if (V->getType()->isEmptyTy())
1003 continue;
1004
1005 Entry.Val = V;
1006 Entry.Ty = V->getType();
1007
1008 // Skip the first return-type Attribute to get to params.
1009 Entry.setAttributes(&CS, i - CS.arg_begin() + 1);
1010 Args.push_back(Entry);
1011 }
1012
1013 // Check if target-independent constraints permit a tail call here.
1014 // Target-dependent constraints are checked within fastLowerCall.
1015 bool IsTailCall = CI->isTailCall();
1016 if (IsTailCall && !isInTailCallPosition(CS, TM))
1017 IsTailCall = false;
1018
1019 CallLoweringInfo CLI;
1020 CLI.setCallee(RetTy, FuncTy, CI->getCalledValue(), std::move(Args), CS)
1021 .setTailCall(IsTailCall);
1022
1023 return lowerCallTo(CLI);
1024 }
1025
selectCall(const User * I)1026 bool FastISel::selectCall(const User *I) {
1027 const CallInst *Call = cast<CallInst>(I);
1028
1029 // Handle simple inline asms.
1030 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledValue())) {
1031 // If the inline asm has side effects, then make sure that no local value
1032 // lives across by flushing the local value map.
1033 if (IA->hasSideEffects())
1034 flushLocalValueMap();
1035
1036 // Don't attempt to handle constraints.
1037 if (!IA->getConstraintString().empty())
1038 return false;
1039
1040 unsigned ExtraInfo = 0;
1041 if (IA->hasSideEffects())
1042 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1043 if (IA->isAlignStack())
1044 ExtraInfo |= InlineAsm::Extra_IsAlignStack;
1045
1046 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1047 TII.get(TargetOpcode::INLINEASM))
1048 .addExternalSymbol(IA->getAsmString().c_str())
1049 .addImm(ExtraInfo);
1050 return true;
1051 }
1052
1053 MachineModuleInfo &MMI = FuncInfo.MF->getMMI();
1054 ComputeUsesVAFloatArgument(*Call, &MMI);
1055
1056 // Handle intrinsic function calls.
1057 if (const auto *II = dyn_cast<IntrinsicInst>(Call))
1058 return selectIntrinsicCall(II);
1059
1060 // Usually, it does not make sense to initialize a value,
1061 // make an unrelated function call and use the value, because
1062 // it tends to be spilled on the stack. So, we move the pointer
1063 // to the last local value to the beginning of the block, so that
1064 // all the values which have already been materialized,
1065 // appear after the call. It also makes sense to skip intrinsics
1066 // since they tend to be inlined.
1067 flushLocalValueMap();
1068
1069 return lowerCall(Call);
1070 }
1071
selectIntrinsicCall(const IntrinsicInst * II)1072 bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
1073 switch (II->getIntrinsicID()) {
1074 default:
1075 break;
1076 // At -O0 we don't care about the lifetime intrinsics.
1077 case Intrinsic::lifetime_start:
1078 case Intrinsic::lifetime_end:
1079 // The donothing intrinsic does, well, nothing.
1080 case Intrinsic::donothing:
1081 return true;
1082 case Intrinsic::eh_actions: {
1083 unsigned ResultReg = getRegForValue(UndefValue::get(II->getType()));
1084 if (!ResultReg)
1085 return false;
1086 updateValueMap(II, ResultReg);
1087 return true;
1088 }
1089 case Intrinsic::dbg_declare: {
1090 const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
1091 DIVariable DIVar = DI->getVariable();
1092 if (!DIVar || !FuncInfo.MF->getMMI().hasDebugInfo()) {
1093 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1094 return true;
1095 }
1096
1097 const Value *Address = DI->getAddress();
1098 if (!Address || isa<UndefValue>(Address)) {
1099 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1100 return true;
1101 }
1102
1103 unsigned Offset = 0;
1104 Optional<MachineOperand> Op;
1105 if (const auto *Arg = dyn_cast<Argument>(Address))
1106 // Some arguments' frame index is recorded during argument lowering.
1107 Offset = FuncInfo.getArgumentFrameIndex(Arg);
1108 if (Offset)
1109 Op = MachineOperand::CreateFI(Offset);
1110 if (!Op)
1111 if (unsigned Reg = lookUpRegForValue(Address))
1112 Op = MachineOperand::CreateReg(Reg, false);
1113
1114 // If we have a VLA that has a "use" in a metadata node that's then used
1115 // here but it has no other uses, then we have a problem. E.g.,
1116 //
1117 // int foo (const int *x) {
1118 // char a[*x];
1119 // return 0;
1120 // }
1121 //
1122 // If we assign 'a' a vreg and fast isel later on has to use the selection
1123 // DAG isel, it will want to copy the value to the vreg. However, there are
1124 // no uses, which goes counter to what selection DAG isel expects.
1125 if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
1126 (!isa<AllocaInst>(Address) ||
1127 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
1128 Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address),
1129 false);
1130
1131 if (Op) {
1132 assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
1133 "Expected inlined-at fields to agree");
1134 if (Op->isReg()) {
1135 Op->setIsDebug(true);
1136 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1137 TII.get(TargetOpcode::DBG_VALUE), false, Op->getReg(), 0,
1138 DI->getVariable(), DI->getExpression());
1139 } else
1140 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1141 TII.get(TargetOpcode::DBG_VALUE))
1142 .addOperand(*Op)
1143 .addImm(0)
1144 .addMetadata(DI->getVariable())
1145 .addMetadata(DI->getExpression());
1146 } else {
1147 // We can't yet handle anything else here because it would require
1148 // generating code, thus altering codegen because of debug info.
1149 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1150 }
1151 return true;
1152 }
1153 case Intrinsic::dbg_value: {
1154 // This form of DBG_VALUE is target-independent.
1155 const DbgValueInst *DI = cast<DbgValueInst>(II);
1156 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
1157 const Value *V = DI->getValue();
1158 assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
1159 "Expected inlined-at fields to agree");
1160 if (!V) {
1161 // Currently the optimizer can produce this; insert an undef to
1162 // help debugging. Probably the optimizer should not do this.
1163 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1164 .addReg(0U)
1165 .addImm(DI->getOffset())
1166 .addMetadata(DI->getVariable())
1167 .addMetadata(DI->getExpression());
1168 } else if (const auto *CI = dyn_cast<ConstantInt>(V)) {
1169 if (CI->getBitWidth() > 64)
1170 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1171 .addCImm(CI)
1172 .addImm(DI->getOffset())
1173 .addMetadata(DI->getVariable())
1174 .addMetadata(DI->getExpression());
1175 else
1176 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1177 .addImm(CI->getZExtValue())
1178 .addImm(DI->getOffset())
1179 .addMetadata(DI->getVariable())
1180 .addMetadata(DI->getExpression());
1181 } else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
1182 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1183 .addFPImm(CF)
1184 .addImm(DI->getOffset())
1185 .addMetadata(DI->getVariable())
1186 .addMetadata(DI->getExpression());
1187 } else if (unsigned Reg = lookUpRegForValue(V)) {
1188 // FIXME: This does not handle register-indirect values at offset 0.
1189 bool IsIndirect = DI->getOffset() != 0;
1190 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect, Reg,
1191 DI->getOffset(), DI->getVariable(), DI->getExpression());
1192 } else {
1193 // We can't yet handle anything else here because it would require
1194 // generating code, thus altering codegen because of debug info.
1195 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1196 }
1197 return true;
1198 }
1199 case Intrinsic::objectsize: {
1200 ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1));
1201 unsigned long long Res = CI->isZero() ? -1ULL : 0;
1202 Constant *ResCI = ConstantInt::get(II->getType(), Res);
1203 unsigned ResultReg = getRegForValue(ResCI);
1204 if (!ResultReg)
1205 return false;
1206 updateValueMap(II, ResultReg);
1207 return true;
1208 }
1209 case Intrinsic::expect: {
1210 unsigned ResultReg = getRegForValue(II->getArgOperand(0));
1211 if (!ResultReg)
1212 return false;
1213 updateValueMap(II, ResultReg);
1214 return true;
1215 }
1216 case Intrinsic::experimental_stackmap:
1217 return selectStackmap(II);
1218 case Intrinsic::experimental_patchpoint_void:
1219 case Intrinsic::experimental_patchpoint_i64:
1220 return selectPatchpoint(II);
1221 }
1222
1223 return fastLowerIntrinsicCall(II);
1224 }
1225
selectCast(const User * I,unsigned Opcode)1226 bool FastISel::selectCast(const User *I, unsigned Opcode) {
1227 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
1228 EVT DstVT = TLI.getValueType(I->getType());
1229
1230 if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
1231 !DstVT.isSimple())
1232 // Unhandled type. Halt "fast" selection and bail.
1233 return false;
1234
1235 // Check if the destination type is legal.
1236 if (!TLI.isTypeLegal(DstVT))
1237 return false;
1238
1239 // Check if the source operand is legal.
1240 if (!TLI.isTypeLegal(SrcVT))
1241 return false;
1242
1243 unsigned InputReg = getRegForValue(I->getOperand(0));
1244 if (!InputReg)
1245 // Unhandled operand. Halt "fast" selection and bail.
1246 return false;
1247
1248 bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
1249
1250 unsigned ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
1251 Opcode, InputReg, InputRegIsKill);
1252 if (!ResultReg)
1253 return false;
1254
1255 updateValueMap(I, ResultReg);
1256 return true;
1257 }
1258
selectBitCast(const User * I)1259 bool FastISel::selectBitCast(const User *I) {
1260 // If the bitcast doesn't change the type, just use the operand value.
1261 if (I->getType() == I->getOperand(0)->getType()) {
1262 unsigned Reg = getRegForValue(I->getOperand(0));
1263 if (!Reg)
1264 return false;
1265 updateValueMap(I, Reg);
1266 return true;
1267 }
1268
1269 // Bitcasts of other values become reg-reg copies or BITCAST operators.
1270 EVT SrcEVT = TLI.getValueType(I->getOperand(0)->getType());
1271 EVT DstEVT = TLI.getValueType(I->getType());
1272 if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
1273 !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
1274 // Unhandled type. Halt "fast" selection and bail.
1275 return false;
1276
1277 MVT SrcVT = SrcEVT.getSimpleVT();
1278 MVT DstVT = DstEVT.getSimpleVT();
1279 unsigned Op0 = getRegForValue(I->getOperand(0));
1280 if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
1281 return false;
1282 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
1283
1284 // First, try to perform the bitcast by inserting a reg-reg copy.
1285 unsigned ResultReg = 0;
1286 if (SrcVT == DstVT) {
1287 const TargetRegisterClass *SrcClass = TLI.getRegClassFor(SrcVT);
1288 const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT);
1289 // Don't attempt a cross-class copy. It will likely fail.
1290 if (SrcClass == DstClass) {
1291 ResultReg = createResultReg(DstClass);
1292 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1293 TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0);
1294 }
1295 }
1296
1297 // If the reg-reg copy failed, select a BITCAST opcode.
1298 if (!ResultReg)
1299 ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill);
1300
1301 if (!ResultReg)
1302 return false;
1303
1304 updateValueMap(I, ResultReg);
1305 return true;
1306 }
1307
selectInstruction(const Instruction * I)1308 bool FastISel::selectInstruction(const Instruction *I) {
1309 // Just before the terminator instruction, insert instructions to
1310 // feed PHI nodes in successor blocks.
1311 if (isa<TerminatorInst>(I))
1312 if (!handlePHINodesInSuccessorBlocks(I->getParent()))
1313 return false;
1314
1315 DbgLoc = I->getDebugLoc();
1316
1317 SavedInsertPt = FuncInfo.InsertPt;
1318
1319 if (const auto *Call = dyn_cast<CallInst>(I)) {
1320 const Function *F = Call->getCalledFunction();
1321 LibFunc::Func Func;
1322
1323 // As a special case, don't handle calls to builtin library functions that
1324 // may be translated directly to target instructions.
1325 if (F && !F->hasLocalLinkage() && F->hasName() &&
1326 LibInfo->getLibFunc(F->getName(), Func) &&
1327 LibInfo->hasOptimizedCodeGen(Func))
1328 return false;
1329
1330 // Don't handle Intrinsic::trap if a trap funciton is specified.
1331 if (F && F->getIntrinsicID() == Intrinsic::trap &&
1332 !TM.Options.getTrapFunctionName().empty())
1333 return false;
1334 }
1335
1336 // First, try doing target-independent selection.
1337 if (!SkipTargetIndependentISel) {
1338 if (selectOperator(I, I->getOpcode())) {
1339 ++NumFastIselSuccessIndependent;
1340 DbgLoc = DebugLoc();
1341 return true;
1342 }
1343 // Remove dead code.
1344 recomputeInsertPt();
1345 if (SavedInsertPt != FuncInfo.InsertPt)
1346 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1347 SavedInsertPt = FuncInfo.InsertPt;
1348 }
1349 // Next, try calling the target to attempt to handle the instruction.
1350 if (fastSelectInstruction(I)) {
1351 ++NumFastIselSuccessTarget;
1352 DbgLoc = DebugLoc();
1353 return true;
1354 }
1355 // Remove dead code.
1356 recomputeInsertPt();
1357 if (SavedInsertPt != FuncInfo.InsertPt)
1358 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1359
1360 DbgLoc = DebugLoc();
1361 // Undo phi node updates, because they will be added again by SelectionDAG.
1362 if (isa<TerminatorInst>(I))
1363 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
1364 return false;
1365 }
1366
1367 /// Emit an unconditional branch to the given block, unless it is the immediate
1368 /// (fall-through) successor, and update the CFG.
fastEmitBranch(MachineBasicBlock * MSucc,DebugLoc DbgLoc)1369 void FastISel::fastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DbgLoc) {
1370 if (FuncInfo.MBB->getBasicBlock()->size() > 1 &&
1371 FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
1372 // For more accurate line information if this is the only instruction
1373 // in the block then emit it, otherwise we have the unconditional
1374 // fall-through case, which needs no instructions.
1375 } else {
1376 // The unconditional branch case.
1377 TII.InsertBranch(*FuncInfo.MBB, MSucc, nullptr,
1378 SmallVector<MachineOperand, 0>(), DbgLoc);
1379 }
1380 uint32_t BranchWeight = 0;
1381 if (FuncInfo.BPI)
1382 BranchWeight = FuncInfo.BPI->getEdgeWeight(FuncInfo.MBB->getBasicBlock(),
1383 MSucc->getBasicBlock());
1384 FuncInfo.MBB->addSuccessor(MSucc, BranchWeight);
1385 }
1386
1387 /// Emit an FNeg operation.
selectFNeg(const User * I)1388 bool FastISel::selectFNeg(const User *I) {
1389 unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
1390 if (!OpReg)
1391 return false;
1392 bool OpRegIsKill = hasTrivialKill(I);
1393
1394 // If the target has ISD::FNEG, use it.
1395 EVT VT = TLI.getValueType(I->getType());
1396 unsigned ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
1397 OpReg, OpRegIsKill);
1398 if (ResultReg) {
1399 updateValueMap(I, ResultReg);
1400 return true;
1401 }
1402
1403 // Bitcast the value to integer, twiddle the sign bit with xor,
1404 // and then bitcast it back to floating-point.
1405 if (VT.getSizeInBits() > 64)
1406 return false;
1407 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
1408 if (!TLI.isTypeLegal(IntVT))
1409 return false;
1410
1411 unsigned IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
1412 ISD::BITCAST, OpReg, OpRegIsKill);
1413 if (!IntReg)
1414 return false;
1415
1416 unsigned IntResultReg = fastEmit_ri_(
1417 IntVT.getSimpleVT(), ISD::XOR, IntReg, /*IsKill=*/true,
1418 UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
1419 if (!IntResultReg)
1420 return false;
1421
1422 ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
1423 IntResultReg, /*IsKill=*/true);
1424 if (!ResultReg)
1425 return false;
1426
1427 updateValueMap(I, ResultReg);
1428 return true;
1429 }
1430
selectExtractValue(const User * U)1431 bool FastISel::selectExtractValue(const User *U) {
1432 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
1433 if (!EVI)
1434 return false;
1435
1436 // Make sure we only try to handle extracts with a legal result. But also
1437 // allow i1 because it's easy.
1438 EVT RealVT = TLI.getValueType(EVI->getType(), /*AllowUnknown=*/true);
1439 if (!RealVT.isSimple())
1440 return false;
1441 MVT VT = RealVT.getSimpleVT();
1442 if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
1443 return false;
1444
1445 const Value *Op0 = EVI->getOperand(0);
1446 Type *AggTy = Op0->getType();
1447
1448 // Get the base result register.
1449 unsigned ResultReg;
1450 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(Op0);
1451 if (I != FuncInfo.ValueMap.end())
1452 ResultReg = I->second;
1453 else if (isa<Instruction>(Op0))
1454 ResultReg = FuncInfo.InitializeRegForValue(Op0);
1455 else
1456 return false; // fast-isel can't handle aggregate constants at the moment
1457
1458 // Get the actual result register, which is an offset from the base register.
1459 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
1460
1461 SmallVector<EVT, 4> AggValueVTs;
1462 ComputeValueVTs(TLI, AggTy, AggValueVTs);
1463
1464 for (unsigned i = 0; i < VTIndex; i++)
1465 ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
1466
1467 updateValueMap(EVI, ResultReg);
1468 return true;
1469 }
1470
selectOperator(const User * I,unsigned Opcode)1471 bool FastISel::selectOperator(const User *I, unsigned Opcode) {
1472 switch (Opcode) {
1473 case Instruction::Add:
1474 return selectBinaryOp(I, ISD::ADD);
1475 case Instruction::FAdd:
1476 return selectBinaryOp(I, ISD::FADD);
1477 case Instruction::Sub:
1478 return selectBinaryOp(I, ISD::SUB);
1479 case Instruction::FSub:
1480 // FNeg is currently represented in LLVM IR as a special case of FSub.
1481 if (BinaryOperator::isFNeg(I))
1482 return selectFNeg(I);
1483 return selectBinaryOp(I, ISD::FSUB);
1484 case Instruction::Mul:
1485 return selectBinaryOp(I, ISD::MUL);
1486 case Instruction::FMul:
1487 return selectBinaryOp(I, ISD::FMUL);
1488 case Instruction::SDiv:
1489 return selectBinaryOp(I, ISD::SDIV);
1490 case Instruction::UDiv:
1491 return selectBinaryOp(I, ISD::UDIV);
1492 case Instruction::FDiv:
1493 return selectBinaryOp(I, ISD::FDIV);
1494 case Instruction::SRem:
1495 return selectBinaryOp(I, ISD::SREM);
1496 case Instruction::URem:
1497 return selectBinaryOp(I, ISD::UREM);
1498 case Instruction::FRem:
1499 return selectBinaryOp(I, ISD::FREM);
1500 case Instruction::Shl:
1501 return selectBinaryOp(I, ISD::SHL);
1502 case Instruction::LShr:
1503 return selectBinaryOp(I, ISD::SRL);
1504 case Instruction::AShr:
1505 return selectBinaryOp(I, ISD::SRA);
1506 case Instruction::And:
1507 return selectBinaryOp(I, ISD::AND);
1508 case Instruction::Or:
1509 return selectBinaryOp(I, ISD::OR);
1510 case Instruction::Xor:
1511 return selectBinaryOp(I, ISD::XOR);
1512
1513 case Instruction::GetElementPtr:
1514 return selectGetElementPtr(I);
1515
1516 case Instruction::Br: {
1517 const BranchInst *BI = cast<BranchInst>(I);
1518
1519 if (BI->isUnconditional()) {
1520 const BasicBlock *LLVMSucc = BI->getSuccessor(0);
1521 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
1522 fastEmitBranch(MSucc, BI->getDebugLoc());
1523 return true;
1524 }
1525
1526 // Conditional branches are not handed yet.
1527 // Halt "fast" selection and bail.
1528 return false;
1529 }
1530
1531 case Instruction::Unreachable:
1532 if (TM.Options.TrapUnreachable)
1533 return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
1534 else
1535 return true;
1536
1537 case Instruction::Alloca:
1538 // FunctionLowering has the static-sized case covered.
1539 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
1540 return true;
1541
1542 // Dynamic-sized alloca is not handled yet.
1543 return false;
1544
1545 case Instruction::Call:
1546 return selectCall(I);
1547
1548 case Instruction::BitCast:
1549 return selectBitCast(I);
1550
1551 case Instruction::FPToSI:
1552 return selectCast(I, ISD::FP_TO_SINT);
1553 case Instruction::ZExt:
1554 return selectCast(I, ISD::ZERO_EXTEND);
1555 case Instruction::SExt:
1556 return selectCast(I, ISD::SIGN_EXTEND);
1557 case Instruction::Trunc:
1558 return selectCast(I, ISD::TRUNCATE);
1559 case Instruction::SIToFP:
1560 return selectCast(I, ISD::SINT_TO_FP);
1561
1562 case Instruction::IntToPtr: // Deliberate fall-through.
1563 case Instruction::PtrToInt: {
1564 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
1565 EVT DstVT = TLI.getValueType(I->getType());
1566 if (DstVT.bitsGT(SrcVT))
1567 return selectCast(I, ISD::ZERO_EXTEND);
1568 if (DstVT.bitsLT(SrcVT))
1569 return selectCast(I, ISD::TRUNCATE);
1570 unsigned Reg = getRegForValue(I->getOperand(0));
1571 if (!Reg)
1572 return false;
1573 updateValueMap(I, Reg);
1574 return true;
1575 }
1576
1577 case Instruction::ExtractValue:
1578 return selectExtractValue(I);
1579
1580 case Instruction::PHI:
1581 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1582
1583 default:
1584 // Unhandled instruction. Halt "fast" selection and bail.
1585 return false;
1586 }
1587 }
1588
FastISel(FunctionLoweringInfo & FuncInfo,const TargetLibraryInfo * LibInfo,bool SkipTargetIndependentISel)1589 FastISel::FastISel(FunctionLoweringInfo &FuncInfo,
1590 const TargetLibraryInfo *LibInfo,
1591 bool SkipTargetIndependentISel)
1592 : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
1593 MFI(*FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
1594 TM(FuncInfo.MF->getTarget()), DL(*TM.getDataLayout()),
1595 TII(*MF->getSubtarget().getInstrInfo()),
1596 TLI(*MF->getSubtarget().getTargetLowering()),
1597 TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
1598 SkipTargetIndependentISel(SkipTargetIndependentISel) {}
1599
~FastISel()1600 FastISel::~FastISel() {}
1601
fastLowerArguments()1602 bool FastISel::fastLowerArguments() { return false; }
1603
fastLowerCall(CallLoweringInfo &)1604 bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; }
1605
fastLowerIntrinsicCall(const IntrinsicInst *)1606 bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst * /*II*/) {
1607 return false;
1608 }
1609
fastEmit_(MVT,MVT,unsigned)1610 unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; }
1611
fastEmit_r(MVT,MVT,unsigned,unsigned,bool)1612 unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/,
1613 bool /*Op0IsKill*/) {
1614 return 0;
1615 }
1616
fastEmit_rr(MVT,MVT,unsigned,unsigned,bool,unsigned,bool)1617 unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
1618 bool /*Op0IsKill*/, unsigned /*Op1*/,
1619 bool /*Op1IsKill*/) {
1620 return 0;
1621 }
1622
fastEmit_i(MVT,MVT,unsigned,uint64_t)1623 unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
1624 return 0;
1625 }
1626
fastEmit_f(MVT,MVT,unsigned,const ConstantFP *)1627 unsigned FastISel::fastEmit_f(MVT, MVT, unsigned,
1628 const ConstantFP * /*FPImm*/) {
1629 return 0;
1630 }
1631
fastEmit_ri(MVT,MVT,unsigned,unsigned,bool,uint64_t)1632 unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
1633 bool /*Op0IsKill*/, uint64_t /*Imm*/) {
1634 return 0;
1635 }
1636
fastEmit_rf(MVT,MVT,unsigned,unsigned,bool,const ConstantFP *)1637 unsigned FastISel::fastEmit_rf(MVT, MVT, unsigned, unsigned /*Op0*/,
1638 bool /*Op0IsKill*/,
1639 const ConstantFP * /*FPImm*/) {
1640 return 0;
1641 }
1642
fastEmit_rri(MVT,MVT,unsigned,unsigned,bool,unsigned,bool,uint64_t)1643 unsigned FastISel::fastEmit_rri(MVT, MVT, unsigned, unsigned /*Op0*/,
1644 bool /*Op0IsKill*/, unsigned /*Op1*/,
1645 bool /*Op1IsKill*/, uint64_t /*Imm*/) {
1646 return 0;
1647 }
1648
1649 /// This method is a wrapper of fastEmit_ri. It first tries to emit an
1650 /// instruction with an immediate operand using fastEmit_ri.
1651 /// If that fails, it materializes the immediate into a register and try
1652 /// fastEmit_rr instead.
fastEmit_ri_(MVT VT,unsigned Opcode,unsigned Op0,bool Op0IsKill,uint64_t Imm,MVT ImmType)1653 unsigned FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
1654 bool Op0IsKill, uint64_t Imm, MVT ImmType) {
1655 // If this is a multiply by a power of two, emit this as a shift left.
1656 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1657 Opcode = ISD::SHL;
1658 Imm = Log2_64(Imm);
1659 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1660 // div x, 8 -> srl x, 3
1661 Opcode = ISD::SRL;
1662 Imm = Log2_64(Imm);
1663 }
1664
1665 // Horrible hack (to be removed), check to make sure shift amounts are
1666 // in-range.
1667 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1668 Imm >= VT.getSizeInBits())
1669 return 0;
1670
1671 // First check if immediate type is legal. If not, we can't use the ri form.
1672 unsigned ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
1673 if (ResultReg)
1674 return ResultReg;
1675 unsigned MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1676 if (!MaterialReg) {
1677 // This is a bit ugly/slow, but failing here means falling out of
1678 // fast-isel, which would be very slow.
1679 IntegerType *ITy =
1680 IntegerType::get(FuncInfo.Fn->getContext(), VT.getSizeInBits());
1681 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1682 if (!MaterialReg)
1683 return 0;
1684 }
1685 return fastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg,
1686 /*IsKill=*/true);
1687 }
1688
createResultReg(const TargetRegisterClass * RC)1689 unsigned FastISel::createResultReg(const TargetRegisterClass *RC) {
1690 return MRI.createVirtualRegister(RC);
1691 }
1692
constrainOperandRegClass(const MCInstrDesc & II,unsigned Op,unsigned OpNum)1693 unsigned FastISel::constrainOperandRegClass(const MCInstrDesc &II, unsigned Op,
1694 unsigned OpNum) {
1695 if (TargetRegisterInfo::isVirtualRegister(Op)) {
1696 const TargetRegisterClass *RegClass =
1697 TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
1698 if (!MRI.constrainRegClass(Op, RegClass)) {
1699 // If it's not legal to COPY between the register classes, something
1700 // has gone very wrong before we got here.
1701 unsigned NewOp = createResultReg(RegClass);
1702 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1703 TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
1704 return NewOp;
1705 }
1706 }
1707 return Op;
1708 }
1709
fastEmitInst_(unsigned MachineInstOpcode,const TargetRegisterClass * RC)1710 unsigned FastISel::fastEmitInst_(unsigned MachineInstOpcode,
1711 const TargetRegisterClass *RC) {
1712 unsigned ResultReg = createResultReg(RC);
1713 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1714
1715 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg);
1716 return ResultReg;
1717 }
1718
fastEmitInst_r(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill)1719 unsigned FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
1720 const TargetRegisterClass *RC, unsigned Op0,
1721 bool Op0IsKill) {
1722 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1723
1724 unsigned ResultReg = createResultReg(RC);
1725 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1726
1727 if (II.getNumDefs() >= 1)
1728 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1729 .addReg(Op0, getKillRegState(Op0IsKill));
1730 else {
1731 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1732 .addReg(Op0, getKillRegState(Op0IsKill));
1733 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1734 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1735 }
1736
1737 return ResultReg;
1738 }
1739
fastEmitInst_rr(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill,unsigned Op1,bool Op1IsKill)1740 unsigned FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
1741 const TargetRegisterClass *RC, unsigned Op0,
1742 bool Op0IsKill, unsigned Op1,
1743 bool Op1IsKill) {
1744 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1745
1746 unsigned ResultReg = createResultReg(RC);
1747 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1748 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1749
1750 if (II.getNumDefs() >= 1)
1751 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1752 .addReg(Op0, getKillRegState(Op0IsKill))
1753 .addReg(Op1, getKillRegState(Op1IsKill));
1754 else {
1755 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1756 .addReg(Op0, getKillRegState(Op0IsKill))
1757 .addReg(Op1, getKillRegState(Op1IsKill));
1758 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1759 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1760 }
1761 return ResultReg;
1762 }
1763
fastEmitInst_rrr(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill,unsigned Op1,bool Op1IsKill,unsigned Op2,bool Op2IsKill)1764 unsigned FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
1765 const TargetRegisterClass *RC, unsigned Op0,
1766 bool Op0IsKill, unsigned Op1,
1767 bool Op1IsKill, unsigned Op2,
1768 bool Op2IsKill) {
1769 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1770
1771 unsigned ResultReg = createResultReg(RC);
1772 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1773 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1774 Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
1775
1776 if (II.getNumDefs() >= 1)
1777 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1778 .addReg(Op0, getKillRegState(Op0IsKill))
1779 .addReg(Op1, getKillRegState(Op1IsKill))
1780 .addReg(Op2, getKillRegState(Op2IsKill));
1781 else {
1782 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1783 .addReg(Op0, getKillRegState(Op0IsKill))
1784 .addReg(Op1, getKillRegState(Op1IsKill))
1785 .addReg(Op2, getKillRegState(Op2IsKill));
1786 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1787 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1788 }
1789 return ResultReg;
1790 }
1791
fastEmitInst_ri(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill,uint64_t Imm)1792 unsigned FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
1793 const TargetRegisterClass *RC, unsigned Op0,
1794 bool Op0IsKill, uint64_t Imm) {
1795 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1796
1797 unsigned ResultReg = createResultReg(RC);
1798 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1799
1800 if (II.getNumDefs() >= 1)
1801 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1802 .addReg(Op0, getKillRegState(Op0IsKill))
1803 .addImm(Imm);
1804 else {
1805 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1806 .addReg(Op0, getKillRegState(Op0IsKill))
1807 .addImm(Imm);
1808 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1809 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1810 }
1811 return ResultReg;
1812 }
1813
fastEmitInst_rii(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill,uint64_t Imm1,uint64_t Imm2)1814 unsigned FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
1815 const TargetRegisterClass *RC, unsigned Op0,
1816 bool Op0IsKill, uint64_t Imm1,
1817 uint64_t Imm2) {
1818 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1819
1820 unsigned ResultReg = createResultReg(RC);
1821 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1822
1823 if (II.getNumDefs() >= 1)
1824 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1825 .addReg(Op0, getKillRegState(Op0IsKill))
1826 .addImm(Imm1)
1827 .addImm(Imm2);
1828 else {
1829 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1830 .addReg(Op0, getKillRegState(Op0IsKill))
1831 .addImm(Imm1)
1832 .addImm(Imm2);
1833 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1834 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1835 }
1836 return ResultReg;
1837 }
1838
fastEmitInst_rf(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill,const ConstantFP * FPImm)1839 unsigned FastISel::fastEmitInst_rf(unsigned MachineInstOpcode,
1840 const TargetRegisterClass *RC, unsigned Op0,
1841 bool Op0IsKill, const ConstantFP *FPImm) {
1842 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1843
1844 unsigned ResultReg = createResultReg(RC);
1845 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1846
1847 if (II.getNumDefs() >= 1)
1848 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1849 .addReg(Op0, getKillRegState(Op0IsKill))
1850 .addFPImm(FPImm);
1851 else {
1852 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1853 .addReg(Op0, getKillRegState(Op0IsKill))
1854 .addFPImm(FPImm);
1855 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1856 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1857 }
1858 return ResultReg;
1859 }
1860
fastEmitInst_rri(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill,unsigned Op1,bool Op1IsKill,uint64_t Imm)1861 unsigned FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
1862 const TargetRegisterClass *RC, unsigned Op0,
1863 bool Op0IsKill, unsigned Op1,
1864 bool Op1IsKill, uint64_t Imm) {
1865 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1866
1867 unsigned ResultReg = createResultReg(RC);
1868 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1869 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1870
1871 if (II.getNumDefs() >= 1)
1872 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1873 .addReg(Op0, getKillRegState(Op0IsKill))
1874 .addReg(Op1, getKillRegState(Op1IsKill))
1875 .addImm(Imm);
1876 else {
1877 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1878 .addReg(Op0, getKillRegState(Op0IsKill))
1879 .addReg(Op1, getKillRegState(Op1IsKill))
1880 .addImm(Imm);
1881 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1882 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1883 }
1884 return ResultReg;
1885 }
1886
fastEmitInst_rrii(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill,unsigned Op1,bool Op1IsKill,uint64_t Imm1,uint64_t Imm2)1887 unsigned FastISel::fastEmitInst_rrii(unsigned MachineInstOpcode,
1888 const TargetRegisterClass *RC,
1889 unsigned Op0, bool Op0IsKill, unsigned Op1,
1890 bool Op1IsKill, uint64_t Imm1,
1891 uint64_t Imm2) {
1892 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1893
1894 unsigned ResultReg = createResultReg(RC);
1895 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1896 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1897
1898 if (II.getNumDefs() >= 1)
1899 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1900 .addReg(Op0, getKillRegState(Op0IsKill))
1901 .addReg(Op1, getKillRegState(Op1IsKill))
1902 .addImm(Imm1)
1903 .addImm(Imm2);
1904 else {
1905 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1906 .addReg(Op0, getKillRegState(Op0IsKill))
1907 .addReg(Op1, getKillRegState(Op1IsKill))
1908 .addImm(Imm1)
1909 .addImm(Imm2);
1910 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1911 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1912 }
1913 return ResultReg;
1914 }
1915
fastEmitInst_i(unsigned MachineInstOpcode,const TargetRegisterClass * RC,uint64_t Imm)1916 unsigned FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
1917 const TargetRegisterClass *RC, uint64_t Imm) {
1918 unsigned ResultReg = createResultReg(RC);
1919 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1920
1921 if (II.getNumDefs() >= 1)
1922 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1923 .addImm(Imm);
1924 else {
1925 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm);
1926 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1927 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1928 }
1929 return ResultReg;
1930 }
1931
fastEmitInst_ii(unsigned MachineInstOpcode,const TargetRegisterClass * RC,uint64_t Imm1,uint64_t Imm2)1932 unsigned FastISel::fastEmitInst_ii(unsigned MachineInstOpcode,
1933 const TargetRegisterClass *RC, uint64_t Imm1,
1934 uint64_t Imm2) {
1935 unsigned ResultReg = createResultReg(RC);
1936 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1937
1938 if (II.getNumDefs() >= 1)
1939 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1940 .addImm(Imm1)
1941 .addImm(Imm2);
1942 else {
1943 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm1)
1944 .addImm(Imm2);
1945 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1946 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1947 }
1948 return ResultReg;
1949 }
1950
fastEmitInst_extractsubreg(MVT RetVT,unsigned Op0,bool Op0IsKill,uint32_t Idx)1951 unsigned FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
1952 bool Op0IsKill, uint32_t Idx) {
1953 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
1954 assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
1955 "Cannot yet extract from physregs");
1956 const TargetRegisterClass *RC = MRI.getRegClass(Op0);
1957 MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx));
1958 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
1959 ResultReg).addReg(Op0, getKillRegState(Op0IsKill), Idx);
1960 return ResultReg;
1961 }
1962
1963 /// Emit MachineInstrs to compute the value of Op with all but the least
1964 /// significant bit set to zero.
fastEmitZExtFromI1(MVT VT,unsigned Op0,bool Op0IsKill)1965 unsigned FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
1966 return fastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
1967 }
1968
1969 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
1970 /// Emit code to ensure constants are copied into registers when needed.
1971 /// Remember the virtual registers that need to be added to the Machine PHI
1972 /// nodes as input. We cannot just directly add them, because expansion
1973 /// might result in multiple MBB's for one BB. As such, the start of the
1974 /// BB might correspond to a different MBB than the end.
handlePHINodesInSuccessorBlocks(const BasicBlock * LLVMBB)1975 bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
1976 const TerminatorInst *TI = LLVMBB->getTerminator();
1977
1978 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
1979 FuncInfo.OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
1980
1981 // Check successor nodes' PHI nodes that expect a constant to be available
1982 // from this block.
1983 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
1984 const BasicBlock *SuccBB = TI->getSuccessor(succ);
1985 if (!isa<PHINode>(SuccBB->begin()))
1986 continue;
1987 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
1988
1989 // If this terminator has multiple identical successors (common for
1990 // switches), only handle each succ once.
1991 if (!SuccsHandled.insert(SuccMBB).second)
1992 continue;
1993
1994 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
1995
1996 // At this point we know that there is a 1-1 correspondence between LLVM PHI
1997 // nodes and Machine PHI nodes, but the incoming operands have not been
1998 // emitted yet.
1999 for (BasicBlock::const_iterator I = SuccBB->begin();
2000 const auto *PN = dyn_cast<PHINode>(I); ++I) {
2001
2002 // Ignore dead phi's.
2003 if (PN->use_empty())
2004 continue;
2005
2006 // Only handle legal types. Two interesting things to note here. First,
2007 // by bailing out early, we may leave behind some dead instructions,
2008 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
2009 // own moves. Second, this check is necessary because FastISel doesn't
2010 // use CreateRegs to create registers, so it always creates
2011 // exactly one register for each non-void instruction.
2012 EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
2013 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
2014 // Handle integer promotions, though, because they're common and easy.
2015 if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
2016 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
2017 return false;
2018 }
2019 }
2020
2021 const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
2022
2023 // Set the DebugLoc for the copy. Prefer the location of the operand
2024 // if there is one; use the location of the PHI otherwise.
2025 DbgLoc = PN->getDebugLoc();
2026 if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
2027 DbgLoc = Inst->getDebugLoc();
2028
2029 unsigned Reg = getRegForValue(PHIOp);
2030 if (!Reg) {
2031 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
2032 return false;
2033 }
2034 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));
2035 DbgLoc = DebugLoc();
2036 }
2037 }
2038
2039 return true;
2040 }
2041
tryToFoldLoad(const LoadInst * LI,const Instruction * FoldInst)2042 bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
2043 assert(LI->hasOneUse() &&
2044 "tryToFoldLoad expected a LoadInst with a single use");
2045 // We know that the load has a single use, but don't know what it is. If it
2046 // isn't one of the folded instructions, then we can't succeed here. Handle
2047 // this by scanning the single-use users of the load until we get to FoldInst.
2048 unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
2049
2050 const Instruction *TheUser = LI->user_back();
2051 while (TheUser != FoldInst && // Scan up until we find FoldInst.
2052 // Stay in the right block.
2053 TheUser->getParent() == FoldInst->getParent() &&
2054 --MaxUsers) { // Don't scan too far.
2055 // If there are multiple or no uses of this instruction, then bail out.
2056 if (!TheUser->hasOneUse())
2057 return false;
2058
2059 TheUser = TheUser->user_back();
2060 }
2061
2062 // If we didn't find the fold instruction, then we failed to collapse the
2063 // sequence.
2064 if (TheUser != FoldInst)
2065 return false;
2066
2067 // Don't try to fold volatile loads. Target has to deal with alignment
2068 // constraints.
2069 if (LI->isVolatile())
2070 return false;
2071
2072 // Figure out which vreg this is going into. If there is no assigned vreg yet
2073 // then there actually was no reference to it. Perhaps the load is referenced
2074 // by a dead instruction.
2075 unsigned LoadReg = getRegForValue(LI);
2076 if (!LoadReg)
2077 return false;
2078
2079 // We can't fold if this vreg has no uses or more than one use. Multiple uses
2080 // may mean that the instruction got lowered to multiple MIs, or the use of
2081 // the loaded value ended up being multiple operands of the result.
2082 if (!MRI.hasOneUse(LoadReg))
2083 return false;
2084
2085 MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg);
2086 MachineInstr *User = RI->getParent();
2087
2088 // Set the insertion point properly. Folding the load can cause generation of
2089 // other random instructions (like sign extends) for addressing modes; make
2090 // sure they get inserted in a logical place before the new instruction.
2091 FuncInfo.InsertPt = User;
2092 FuncInfo.MBB = User->getParent();
2093
2094 // Ask the target to try folding the load.
2095 return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
2096 }
2097
canFoldAddIntoGEP(const User * GEP,const Value * Add)2098 bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) {
2099 // Must be an add.
2100 if (!isa<AddOperator>(Add))
2101 return false;
2102 // Type size needs to match.
2103 if (DL.getTypeSizeInBits(GEP->getType()) !=
2104 DL.getTypeSizeInBits(Add->getType()))
2105 return false;
2106 // Must be in the same basic block.
2107 if (isa<Instruction>(Add) &&
2108 FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB)
2109 return false;
2110 // Must have a constant operand.
2111 return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
2112 }
2113
2114 MachineMemOperand *
createMachineMemOperandFor(const Instruction * I) const2115 FastISel::createMachineMemOperandFor(const Instruction *I) const {
2116 const Value *Ptr;
2117 Type *ValTy;
2118 unsigned Alignment;
2119 unsigned Flags;
2120 bool IsVolatile;
2121
2122 if (const auto *LI = dyn_cast<LoadInst>(I)) {
2123 Alignment = LI->getAlignment();
2124 IsVolatile = LI->isVolatile();
2125 Flags = MachineMemOperand::MOLoad;
2126 Ptr = LI->getPointerOperand();
2127 ValTy = LI->getType();
2128 } else if (const auto *SI = dyn_cast<StoreInst>(I)) {
2129 Alignment = SI->getAlignment();
2130 IsVolatile = SI->isVolatile();
2131 Flags = MachineMemOperand::MOStore;
2132 Ptr = SI->getPointerOperand();
2133 ValTy = SI->getValueOperand()->getType();
2134 } else
2135 return nullptr;
2136
2137 bool IsNonTemporal = I->getMetadata(LLVMContext::MD_nontemporal) != nullptr;
2138 bool IsInvariant = I->getMetadata(LLVMContext::MD_invariant_load) != nullptr;
2139 const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
2140
2141 AAMDNodes AAInfo;
2142 I->getAAMetadata(AAInfo);
2143
2144 if (Alignment == 0) // Ensure that codegen never sees alignment 0.
2145 Alignment = DL.getABITypeAlignment(ValTy);
2146
2147 unsigned Size = DL.getTypeStoreSize(ValTy);
2148
2149 if (IsVolatile)
2150 Flags |= MachineMemOperand::MOVolatile;
2151 if (IsNonTemporal)
2152 Flags |= MachineMemOperand::MONonTemporal;
2153 if (IsInvariant)
2154 Flags |= MachineMemOperand::MOInvariant;
2155
2156 return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size,
2157 Alignment, AAInfo, Ranges);
2158 }
2159
optimizeCmpPredicate(const CmpInst * CI) const2160 CmpInst::Predicate FastISel::optimizeCmpPredicate(const CmpInst *CI) const {
2161 // If both operands are the same, then try to optimize or fold the cmp.
2162 CmpInst::Predicate Predicate = CI->getPredicate();
2163 if (CI->getOperand(0) != CI->getOperand(1))
2164 return Predicate;
2165
2166 switch (Predicate) {
2167 default: llvm_unreachable("Invalid predicate!");
2168 case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break;
2169 case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break;
2170 case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break;
2171 case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break;
2172 case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break;
2173 case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break;
2174 case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break;
2175 case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break;
2176 case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break;
2177 case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break;
2178 case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break;
2179 case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2180 case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break;
2181 case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2182 case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break;
2183 case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break;
2184
2185 case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break;
2186 case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break;
2187 case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break;
2188 case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2189 case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break;
2190 case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2191 case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break;
2192 case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break;
2193 case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break;
2194 case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break;
2195 }
2196
2197 return Predicate;
2198 }
2199