1 //===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines a DAG pattern matching instruction selector for X86,
11 // converting from a legalized dag to a X86 dag.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "X86.h"
16 #include "X86InstrBuilder.h"
17 #include "X86MachineFunctionInfo.h"
18 #include "X86RegisterInfo.h"
19 #include "X86Subtarget.h"
20 #include "X86TargetMachine.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/SelectionDAGISel.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/IR/Instructions.h"
29 #include "llvm/IR/Intrinsics.h"
30 #include "llvm/IR/Type.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/MathExtras.h"
34 #include "llvm/Support/raw_ostream.h"
35 #include "llvm/Target/TargetMachine.h"
36 #include "llvm/Target/TargetOptions.h"
37 #include <stdint.h>
38 using namespace llvm;
39 
40 #define DEBUG_TYPE "x86-isel"
41 
42 STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
43 
44 //===----------------------------------------------------------------------===//
45 //                      Pattern Matcher Implementation
46 //===----------------------------------------------------------------------===//
47 
48 namespace {
49   /// This corresponds to X86AddressMode, but uses SDValue's instead of register
50   /// numbers for the leaves of the matched tree.
51   struct X86ISelAddressMode {
52     enum {
53       RegBase,
54       FrameIndexBase
55     } BaseType;
56 
57     // This is really a union, discriminated by BaseType!
58     SDValue Base_Reg;
59     int Base_FrameIndex;
60 
61     unsigned Scale;
62     SDValue IndexReg;
63     int32_t Disp;
64     SDValue Segment;
65     const GlobalValue *GV;
66     const Constant *CP;
67     const BlockAddress *BlockAddr;
68     const char *ES;
69     MCSymbol *MCSym;
70     int JT;
71     unsigned Align;    // CP alignment.
72     unsigned char SymbolFlags;  // X86II::MO_*
73 
X86ISelAddressMode__anonfec1f42b0111::X86ISelAddressMode74     X86ISelAddressMode()
75         : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0),
76           Segment(), GV(nullptr), CP(nullptr), BlockAddr(nullptr), ES(nullptr),
77           MCSym(nullptr), JT(-1), Align(0), SymbolFlags(X86II::MO_NO_FLAG) {}
78 
hasSymbolicDisplacement__anonfec1f42b0111::X86ISelAddressMode79     bool hasSymbolicDisplacement() const {
80       return GV != nullptr || CP != nullptr || ES != nullptr ||
81              MCSym != nullptr || JT != -1 || BlockAddr != nullptr;
82     }
83 
hasBaseOrIndexReg__anonfec1f42b0111::X86ISelAddressMode84     bool hasBaseOrIndexReg() const {
85       return BaseType == FrameIndexBase ||
86              IndexReg.getNode() != nullptr || Base_Reg.getNode() != nullptr;
87     }
88 
89     /// Return true if this addressing mode is already RIP-relative.
isRIPRelative__anonfec1f42b0111::X86ISelAddressMode90     bool isRIPRelative() const {
91       if (BaseType != RegBase) return false;
92       if (RegisterSDNode *RegNode =
93             dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode()))
94         return RegNode->getReg() == X86::RIP;
95       return false;
96     }
97 
setBaseReg__anonfec1f42b0111::X86ISelAddressMode98     void setBaseReg(SDValue Reg) {
99       BaseType = RegBase;
100       Base_Reg = Reg;
101     }
102 
103 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump__anonfec1f42b0111::X86ISelAddressMode104     void dump() {
105       dbgs() << "X86ISelAddressMode " << this << '\n';
106       dbgs() << "Base_Reg ";
107       if (Base_Reg.getNode())
108         Base_Reg.getNode()->dump();
109       else
110         dbgs() << "nul";
111       dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n'
112              << " Scale" << Scale << '\n'
113              << "IndexReg ";
114       if (IndexReg.getNode())
115         IndexReg.getNode()->dump();
116       else
117         dbgs() << "nul";
118       dbgs() << " Disp " << Disp << '\n'
119              << "GV ";
120       if (GV)
121         GV->dump();
122       else
123         dbgs() << "nul";
124       dbgs() << " CP ";
125       if (CP)
126         CP->dump();
127       else
128         dbgs() << "nul";
129       dbgs() << '\n'
130              << "ES ";
131       if (ES)
132         dbgs() << ES;
133       else
134         dbgs() << "nul";
135       dbgs() << " MCSym ";
136       if (MCSym)
137         dbgs() << MCSym;
138       else
139         dbgs() << "nul";
140       dbgs() << " JT" << JT << " Align" << Align << '\n';
141     }
142 #endif
143   };
144 }
145 
146 namespace {
147   //===--------------------------------------------------------------------===//
148   /// ISel - X86-specific code to select X86 machine instructions for
149   /// SelectionDAG operations.
150   ///
151   class X86DAGToDAGISel final : public SelectionDAGISel {
152     /// Keep a pointer to the X86Subtarget around so that we can
153     /// make the right decision when generating code for different targets.
154     const X86Subtarget *Subtarget;
155 
156     /// If true, selector should try to optimize for code size instead of
157     /// performance.
158     bool OptForSize;
159 
160     /// If true, selector should try to optimize for minimum code size.
161     bool OptForMinSize;
162 
163   public:
X86DAGToDAGISel(X86TargetMachine & tm,CodeGenOpt::Level OptLevel)164     explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel)
165         : SelectionDAGISel(tm, OptLevel), OptForSize(false),
166           OptForMinSize(false) {}
167 
getPassName() const168     const char *getPassName() const override {
169       return "X86 DAG->DAG Instruction Selection";
170     }
171 
runOnMachineFunction(MachineFunction & MF)172     bool runOnMachineFunction(MachineFunction &MF) override {
173       // Reset the subtarget each time through.
174       Subtarget = &MF.getSubtarget<X86Subtarget>();
175       SelectionDAGISel::runOnMachineFunction(MF);
176       return true;
177     }
178 
179     void EmitFunctionEntryCode() override;
180 
181     bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const override;
182 
183     void PreprocessISelDAG() override;
184 
immSext8(SDNode * N) const185     inline bool immSext8(SDNode *N) const {
186       return isInt<8>(cast<ConstantSDNode>(N)->getSExtValue());
187     }
188 
189     // True if the 64-bit immediate fits in a 32-bit sign-extended field.
i64immSExt32(SDNode * N) const190     inline bool i64immSExt32(SDNode *N) const {
191       uint64_t v = cast<ConstantSDNode>(N)->getZExtValue();
192       return (int64_t)v == (int32_t)v;
193     }
194 
195 // Include the pieces autogenerated from the target description.
196 #include "X86GenDAGISel.inc"
197 
198   private:
199     void Select(SDNode *N) override;
200     bool tryGather(SDNode *N, unsigned Opc);
201 
202     bool foldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM);
203     bool matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM);
204     bool matchWrapper(SDValue N, X86ISelAddressMode &AM);
205     bool matchAddress(SDValue N, X86ISelAddressMode &AM);
206     bool matchAdd(SDValue N, X86ISelAddressMode &AM, unsigned Depth);
207     bool matchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
208                                  unsigned Depth);
209     bool matchAddressBase(SDValue N, X86ISelAddressMode &AM);
210     bool selectAddr(SDNode *Parent, SDValue N, SDValue &Base,
211                     SDValue &Scale, SDValue &Index, SDValue &Disp,
212                     SDValue &Segment);
213     bool selectVectorAddr(SDNode *Parent, SDValue N, SDValue &Base,
214                           SDValue &Scale, SDValue &Index, SDValue &Disp,
215                           SDValue &Segment);
216     bool selectMOV64Imm32(SDValue N, SDValue &Imm);
217     bool selectLEAAddr(SDValue N, SDValue &Base,
218                        SDValue &Scale, SDValue &Index, SDValue &Disp,
219                        SDValue &Segment);
220     bool selectLEA64_32Addr(SDValue N, SDValue &Base,
221                             SDValue &Scale, SDValue &Index, SDValue &Disp,
222                             SDValue &Segment);
223     bool selectTLSADDRAddr(SDValue N, SDValue &Base,
224                            SDValue &Scale, SDValue &Index, SDValue &Disp,
225                            SDValue &Segment);
226     bool selectScalarSSELoad(SDNode *Root, SDValue N,
227                              SDValue &Base, SDValue &Scale,
228                              SDValue &Index, SDValue &Disp,
229                              SDValue &Segment,
230                              SDValue &NodeWithChain);
231 
232     bool tryFoldLoad(SDNode *P, SDValue N,
233                      SDValue &Base, SDValue &Scale,
234                      SDValue &Index, SDValue &Disp,
235                      SDValue &Segment);
236 
237     /// Implement addressing mode selection for inline asm expressions.
238     bool SelectInlineAsmMemoryOperand(const SDValue &Op,
239                                       unsigned ConstraintID,
240                                       std::vector<SDValue> &OutOps) override;
241 
242     void emitSpecialCodeForMain();
243 
getAddressOperands(X86ISelAddressMode & AM,const SDLoc & DL,SDValue & Base,SDValue & Scale,SDValue & Index,SDValue & Disp,SDValue & Segment)244     inline void getAddressOperands(X86ISelAddressMode &AM, const SDLoc &DL,
245                                    SDValue &Base, SDValue &Scale,
246                                    SDValue &Index, SDValue &Disp,
247                                    SDValue &Segment) {
248       Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
249                  ? CurDAG->getTargetFrameIndex(
250                        AM.Base_FrameIndex,
251                        TLI->getPointerTy(CurDAG->getDataLayout()))
252                  : AM.Base_Reg;
253       Scale = getI8Imm(AM.Scale, DL);
254       Index = AM.IndexReg;
255       // These are 32-bit even in 64-bit mode since RIP-relative offset
256       // is 32-bit.
257       if (AM.GV)
258         Disp = CurDAG->getTargetGlobalAddress(AM.GV, SDLoc(),
259                                               MVT::i32, AM.Disp,
260                                               AM.SymbolFlags);
261       else if (AM.CP)
262         Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32,
263                                              AM.Align, AM.Disp, AM.SymbolFlags);
264       else if (AM.ES) {
265         assert(!AM.Disp && "Non-zero displacement is ignored with ES.");
266         Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags);
267       } else if (AM.MCSym) {
268         assert(!AM.Disp && "Non-zero displacement is ignored with MCSym.");
269         assert(AM.SymbolFlags == 0 && "oo");
270         Disp = CurDAG->getMCSymbol(AM.MCSym, MVT::i32);
271       } else if (AM.JT != -1) {
272         assert(!AM.Disp && "Non-zero displacement is ignored with JT.");
273         Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags);
274       } else if (AM.BlockAddr)
275         Disp = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, AM.Disp,
276                                              AM.SymbolFlags);
277       else
278         Disp = CurDAG->getTargetConstant(AM.Disp, DL, MVT::i32);
279 
280       if (AM.Segment.getNode())
281         Segment = AM.Segment;
282       else
283         Segment = CurDAG->getRegister(0, MVT::i32);
284     }
285 
286     // Utility function to determine whether we should avoid selecting
287     // immediate forms of instructions for better code size or not.
288     // At a high level, we'd like to avoid such instructions when
289     // we have similar constants used within the same basic block
290     // that can be kept in a register.
291     //
shouldAvoidImmediateInstFormsForSize(SDNode * N) const292     bool shouldAvoidImmediateInstFormsForSize(SDNode *N) const {
293       uint32_t UseCount = 0;
294 
295       // Do not want to hoist if we're not optimizing for size.
296       // TODO: We'd like to remove this restriction.
297       // See the comment in X86InstrInfo.td for more info.
298       if (!OptForSize)
299         return false;
300 
301       // Walk all the users of the immediate.
302       for (SDNode::use_iterator UI = N->use_begin(),
303            UE = N->use_end(); (UI != UE) && (UseCount < 2); ++UI) {
304 
305         SDNode *User = *UI;
306 
307         // This user is already selected. Count it as a legitimate use and
308         // move on.
309         if (User->isMachineOpcode()) {
310           UseCount++;
311           continue;
312         }
313 
314         // We want to count stores of immediates as real uses.
315         if (User->getOpcode() == ISD::STORE &&
316             User->getOperand(1).getNode() == N) {
317           UseCount++;
318           continue;
319         }
320 
321         // We don't currently match users that have > 2 operands (except
322         // for stores, which are handled above)
323         // Those instruction won't match in ISEL, for now, and would
324         // be counted incorrectly.
325         // This may change in the future as we add additional instruction
326         // types.
327         if (User->getNumOperands() != 2)
328           continue;
329 
330         // Immediates that are used for offsets as part of stack
331         // manipulation should be left alone. These are typically
332         // used to indicate SP offsets for argument passing and
333         // will get pulled into stores/pushes (implicitly).
334         if (User->getOpcode() == X86ISD::ADD ||
335             User->getOpcode() == ISD::ADD    ||
336             User->getOpcode() == X86ISD::SUB ||
337             User->getOpcode() == ISD::SUB) {
338 
339           // Find the other operand of the add/sub.
340           SDValue OtherOp = User->getOperand(0);
341           if (OtherOp.getNode() == N)
342             OtherOp = User->getOperand(1);
343 
344           // Don't count if the other operand is SP.
345           RegisterSDNode *RegNode;
346           if (OtherOp->getOpcode() == ISD::CopyFromReg &&
347               (RegNode = dyn_cast_or_null<RegisterSDNode>(
348                  OtherOp->getOperand(1).getNode())))
349             if ((RegNode->getReg() == X86::ESP) ||
350                 (RegNode->getReg() == X86::RSP))
351               continue;
352         }
353 
354         // ... otherwise, count this and move on.
355         UseCount++;
356       }
357 
358       // If we have more than 1 use, then recommend for hoisting.
359       return (UseCount > 1);
360     }
361 
362     /// Return a target constant with the specified value of type i8.
getI8Imm(unsigned Imm,const SDLoc & DL)363     inline SDValue getI8Imm(unsigned Imm, const SDLoc &DL) {
364       return CurDAG->getTargetConstant(Imm, DL, MVT::i8);
365     }
366 
367     /// Return a target constant with the specified value, of type i32.
getI32Imm(unsigned Imm,const SDLoc & DL)368     inline SDValue getI32Imm(unsigned Imm, const SDLoc &DL) {
369       return CurDAG->getTargetConstant(Imm, DL, MVT::i32);
370     }
371 
372     /// Return an SDNode that returns the value of the global base register.
373     /// Output instructions required to initialize the global base register,
374     /// if necessary.
375     SDNode *getGlobalBaseReg();
376 
377     /// Return a reference to the TargetMachine, casted to the target-specific
378     /// type.
getTargetMachine() const379     const X86TargetMachine &getTargetMachine() const {
380       return static_cast<const X86TargetMachine &>(TM);
381     }
382 
383     /// Return a reference to the TargetInstrInfo, casted to the target-specific
384     /// type.
getInstrInfo() const385     const X86InstrInfo *getInstrInfo() const {
386       return Subtarget->getInstrInfo();
387     }
388 
389     /// \brief Address-mode matching performs shift-of-and to and-of-shift
390     /// reassociation in order to expose more scaled addressing
391     /// opportunities.
ComplexPatternFuncMutatesDAG() const392     bool ComplexPatternFuncMutatesDAG() const override {
393       return true;
394     }
395   };
396 }
397 
398 
399 bool
IsProfitableToFold(SDValue N,SDNode * U,SDNode * Root) const400 X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const {
401   if (OptLevel == CodeGenOpt::None) return false;
402 
403   if (!N.hasOneUse())
404     return false;
405 
406   if (N.getOpcode() != ISD::LOAD)
407     return true;
408 
409   // If N is a load, do additional profitability checks.
410   if (U == Root) {
411     switch (U->getOpcode()) {
412     default: break;
413     case X86ISD::ADD:
414     case X86ISD::SUB:
415     case X86ISD::AND:
416     case X86ISD::XOR:
417     case X86ISD::OR:
418     case ISD::ADD:
419     case ISD::ADDC:
420     case ISD::ADDE:
421     case ISD::AND:
422     case ISD::OR:
423     case ISD::XOR: {
424       SDValue Op1 = U->getOperand(1);
425 
426       // If the other operand is a 8-bit immediate we should fold the immediate
427       // instead. This reduces code size.
428       // e.g.
429       // movl 4(%esp), %eax
430       // addl $4, %eax
431       // vs.
432       // movl $4, %eax
433       // addl 4(%esp), %eax
434       // The former is 2 bytes shorter. In case where the increment is 1, then
435       // the saving can be 4 bytes (by using incl %eax).
436       if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1))
437         if (Imm->getAPIntValue().isSignedIntN(8))
438           return false;
439 
440       // If the other operand is a TLS address, we should fold it instead.
441       // This produces
442       // movl    %gs:0, %eax
443       // leal    i@NTPOFF(%eax), %eax
444       // instead of
445       // movl    $i@NTPOFF, %eax
446       // addl    %gs:0, %eax
447       // if the block also has an access to a second TLS address this will save
448       // a load.
449       // FIXME: This is probably also true for non-TLS addresses.
450       if (Op1.getOpcode() == X86ISD::Wrapper) {
451         SDValue Val = Op1.getOperand(0);
452         if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
453           return false;
454       }
455     }
456     }
457   }
458 
459   return true;
460 }
461 
462 /// Replace the original chain operand of the call with
463 /// load's chain operand and move load below the call's chain operand.
moveBelowOrigChain(SelectionDAG * CurDAG,SDValue Load,SDValue Call,SDValue OrigChain)464 static void moveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load,
465                                SDValue Call, SDValue OrigChain) {
466   SmallVector<SDValue, 8> Ops;
467   SDValue Chain = OrigChain.getOperand(0);
468   if (Chain.getNode() == Load.getNode())
469     Ops.push_back(Load.getOperand(0));
470   else {
471     assert(Chain.getOpcode() == ISD::TokenFactor &&
472            "Unexpected chain operand");
473     for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
474       if (Chain.getOperand(i).getNode() == Load.getNode())
475         Ops.push_back(Load.getOperand(0));
476       else
477         Ops.push_back(Chain.getOperand(i));
478     SDValue NewChain =
479       CurDAG->getNode(ISD::TokenFactor, SDLoc(Load), MVT::Other, Ops);
480     Ops.clear();
481     Ops.push_back(NewChain);
482   }
483   Ops.append(OrigChain->op_begin() + 1, OrigChain->op_end());
484   CurDAG->UpdateNodeOperands(OrigChain.getNode(), Ops);
485   CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0),
486                              Load.getOperand(1), Load.getOperand(2));
487 
488   Ops.clear();
489   Ops.push_back(SDValue(Load.getNode(), 1));
490   Ops.append(Call->op_begin() + 1, Call->op_end());
491   CurDAG->UpdateNodeOperands(Call.getNode(), Ops);
492 }
493 
494 /// Return true if call address is a load and it can be
495 /// moved below CALLSEQ_START and the chains leading up to the call.
496 /// Return the CALLSEQ_START by reference as a second output.
497 /// In the case of a tail call, there isn't a callseq node between the call
498 /// chain and the load.
isCalleeLoad(SDValue Callee,SDValue & Chain,bool HasCallSeq)499 static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
500   // The transformation is somewhat dangerous if the call's chain was glued to
501   // the call. After MoveBelowOrigChain the load is moved between the call and
502   // the chain, this can create a cycle if the load is not folded. So it is
503   // *really* important that we are sure the load will be folded.
504   if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
505     return false;
506   LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
507   if (!LD ||
508       LD->isVolatile() ||
509       LD->getAddressingMode() != ISD::UNINDEXED ||
510       LD->getExtensionType() != ISD::NON_EXTLOAD)
511     return false;
512 
513   // Now let's find the callseq_start.
514   while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) {
515     if (!Chain.hasOneUse())
516       return false;
517     Chain = Chain.getOperand(0);
518   }
519 
520   if (!Chain.getNumOperands())
521     return false;
522   // Since we are not checking for AA here, conservatively abort if the chain
523   // writes to memory. It's not safe to move the callee (a load) across a store.
524   if (isa<MemSDNode>(Chain.getNode()) &&
525       cast<MemSDNode>(Chain.getNode())->writeMem())
526     return false;
527   if (Chain.getOperand(0).getNode() == Callee.getNode())
528     return true;
529   if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
530       Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) &&
531       Callee.getValue(1).hasOneUse())
532     return true;
533   return false;
534 }
535 
PreprocessISelDAG()536 void X86DAGToDAGISel::PreprocessISelDAG() {
537   // OptFor[Min]Size are used in pattern predicates that isel is matching.
538   OptForSize = MF->getFunction()->optForSize();
539   OptForMinSize = MF->getFunction()->optForMinSize();
540   assert((!OptForMinSize || OptForSize) && "OptForMinSize implies OptForSize");
541 
542   for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
543        E = CurDAG->allnodes_end(); I != E; ) {
544     SDNode *N = &*I++; // Preincrement iterator to avoid invalidation issues.
545 
546     if (OptLevel != CodeGenOpt::None &&
547         // Only does this when target favors doesn't favor register indirect
548         // call.
549         ((N->getOpcode() == X86ISD::CALL && !Subtarget->callRegIndirect()) ||
550          (N->getOpcode() == X86ISD::TC_RETURN &&
551           // Only does this if load can be folded into TC_RETURN.
552           (Subtarget->is64Bit() ||
553            !getTargetMachine().isPositionIndependent())))) {
554       /// Also try moving call address load from outside callseq_start to just
555       /// before the call to allow it to be folded.
556       ///
557       ///     [Load chain]
558       ///         ^
559       ///         |
560       ///       [Load]
561       ///       ^    ^
562       ///       |    |
563       ///      /      \--
564       ///     /          |
565       ///[CALLSEQ_START] |
566       ///     ^          |
567       ///     |          |
568       /// [LOAD/C2Reg]   |
569       ///     |          |
570       ///      \        /
571       ///       \      /
572       ///       [CALL]
573       bool HasCallSeq = N->getOpcode() == X86ISD::CALL;
574       SDValue Chain = N->getOperand(0);
575       SDValue Load  = N->getOperand(1);
576       if (!isCalleeLoad(Load, Chain, HasCallSeq))
577         continue;
578       moveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain);
579       ++NumLoadMoved;
580       continue;
581     }
582 
583     // Lower fpround and fpextend nodes that target the FP stack to be store and
584     // load to the stack.  This is a gross hack.  We would like to simply mark
585     // these as being illegal, but when we do that, legalize produces these when
586     // it expands calls, then expands these in the same legalize pass.  We would
587     // like dag combine to be able to hack on these between the call expansion
588     // and the node legalization.  As such this pass basically does "really
589     // late" legalization of these inline with the X86 isel pass.
590     // FIXME: This should only happen when not compiled with -O0.
591     if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND)
592       continue;
593 
594     MVT SrcVT = N->getOperand(0).getSimpleValueType();
595     MVT DstVT = N->getSimpleValueType(0);
596 
597     // If any of the sources are vectors, no fp stack involved.
598     if (SrcVT.isVector() || DstVT.isVector())
599       continue;
600 
601     // If the source and destination are SSE registers, then this is a legal
602     // conversion that should not be lowered.
603     const X86TargetLowering *X86Lowering =
604         static_cast<const X86TargetLowering *>(TLI);
605     bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT);
606     bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT);
607     if (SrcIsSSE && DstIsSSE)
608       continue;
609 
610     if (!SrcIsSSE && !DstIsSSE) {
611       // If this is an FPStack extension, it is a noop.
612       if (N->getOpcode() == ISD::FP_EXTEND)
613         continue;
614       // If this is a value-preserving FPStack truncation, it is a noop.
615       if (N->getConstantOperandVal(1))
616         continue;
617     }
618 
619     // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
620     // FPStack has extload and truncstore.  SSE can fold direct loads into other
621     // operations.  Based on this, decide what we want to do.
622     MVT MemVT;
623     if (N->getOpcode() == ISD::FP_ROUND)
624       MemVT = DstVT;  // FP_ROUND must use DstVT, we can't do a 'trunc load'.
625     else
626       MemVT = SrcIsSSE ? SrcVT : DstVT;
627 
628     SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
629     SDLoc dl(N);
630 
631     // FIXME: optimize the case where the src/dest is a load or store?
632     SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl,
633                                           N->getOperand(0),
634                                           MemTmp, MachinePointerInfo(), MemVT,
635                                           false, false, 0);
636     SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp,
637                                         MachinePointerInfo(),
638                                         MemVT, false, false, false, 0);
639 
640     // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
641     // extload we created.  This will cause general havok on the dag because
642     // anything below the conversion could be folded into other existing nodes.
643     // To avoid invalidating 'I', back it up to the convert node.
644     --I;
645     CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
646 
647     // Now that we did that, the node is dead.  Increment the iterator to the
648     // next node to process, then delete N.
649     ++I;
650     CurDAG->DeleteNode(N);
651   }
652 }
653 
654 
655 /// Emit any code that needs to be executed only in the main function.
emitSpecialCodeForMain()656 void X86DAGToDAGISel::emitSpecialCodeForMain() {
657   if (Subtarget->isTargetCygMing()) {
658     TargetLowering::ArgListTy Args;
659     auto &DL = CurDAG->getDataLayout();
660 
661     TargetLowering::CallLoweringInfo CLI(*CurDAG);
662     CLI.setChain(CurDAG->getRoot())
663         .setCallee(CallingConv::C, Type::getVoidTy(*CurDAG->getContext()),
664                    CurDAG->getExternalSymbol("__main", TLI->getPointerTy(DL)),
665                    std::move(Args));
666     const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
667     std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
668     CurDAG->setRoot(Result.second);
669   }
670 }
671 
EmitFunctionEntryCode()672 void X86DAGToDAGISel::EmitFunctionEntryCode() {
673   // If this is main, emit special code for main.
674   if (const Function *Fn = MF->getFunction())
675     if (Fn->hasExternalLinkage() && Fn->getName() == "main")
676       emitSpecialCodeForMain();
677 }
678 
isDispSafeForFrameIndex(int64_t Val)679 static bool isDispSafeForFrameIndex(int64_t Val) {
680   // On 64-bit platforms, we can run into an issue where a frame index
681   // includes a displacement that, when added to the explicit displacement,
682   // will overflow the displacement field. Assuming that the frame index
683   // displacement fits into a 31-bit integer  (which is only slightly more
684   // aggressive than the current fundamental assumption that it fits into
685   // a 32-bit integer), a 31-bit disp should always be safe.
686   return isInt<31>(Val);
687 }
688 
foldOffsetIntoAddress(uint64_t Offset,X86ISelAddressMode & AM)689 bool X86DAGToDAGISel::foldOffsetIntoAddress(uint64_t Offset,
690                                             X86ISelAddressMode &AM) {
691   // Cannot combine ExternalSymbol displacements with integer offsets.
692   if (Offset != 0 && (AM.ES || AM.MCSym))
693     return true;
694   int64_t Val = AM.Disp + Offset;
695   CodeModel::Model M = TM.getCodeModel();
696   if (Subtarget->is64Bit()) {
697     if (!X86::isOffsetSuitableForCodeModel(Val, M,
698                                            AM.hasSymbolicDisplacement()))
699       return true;
700     // In addition to the checks required for a register base, check that
701     // we do not try to use an unsafe Disp with a frame index.
702     if (AM.BaseType == X86ISelAddressMode::FrameIndexBase &&
703         !isDispSafeForFrameIndex(Val))
704       return true;
705   }
706   AM.Disp = Val;
707   return false;
708 
709 }
710 
matchLoadInAddress(LoadSDNode * N,X86ISelAddressMode & AM)711 bool X86DAGToDAGISel::matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){
712   SDValue Address = N->getOperand(1);
713 
714   // load gs:0 -> GS segment register.
715   // load fs:0 -> FS segment register.
716   //
717   // This optimization is valid because the GNU TLS model defines that
718   // gs:0 (or fs:0 on X86-64) contains its own address.
719   // For more information see http://people.redhat.com/drepper/tls.pdf
720   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address))
721     if (C->getSExtValue() == 0 && AM.Segment.getNode() == nullptr &&
722         Subtarget->isTargetGlibc())
723       switch (N->getPointerInfo().getAddrSpace()) {
724       case 256:
725         AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
726         return false;
727       case 257:
728         AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
729         return false;
730       // Address space 258 is not handled here, because it is not used to
731       // address TLS areas.
732       }
733 
734   return true;
735 }
736 
737 /// Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes into an addressing
738 /// mode. These wrap things that will resolve down into a symbol reference.
739 /// If no match is possible, this returns true, otherwise it returns false.
matchWrapper(SDValue N,X86ISelAddressMode & AM)740 bool X86DAGToDAGISel::matchWrapper(SDValue N, X86ISelAddressMode &AM) {
741   // If the addressing mode already has a symbol as the displacement, we can
742   // never match another symbol.
743   if (AM.hasSymbolicDisplacement())
744     return true;
745 
746   SDValue N0 = N.getOperand(0);
747   CodeModel::Model M = TM.getCodeModel();
748 
749   // Handle X86-64 rip-relative addresses.  We check this before checking direct
750   // folding because RIP is preferable to non-RIP accesses.
751   if (Subtarget->is64Bit() && N.getOpcode() == X86ISD::WrapperRIP &&
752       // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
753       // they cannot be folded into immediate fields.
754       // FIXME: This can be improved for kernel and other models?
755       (M == CodeModel::Small || M == CodeModel::Kernel)) {
756     // Base and index reg must be 0 in order to use %rip as base.
757     if (AM.hasBaseOrIndexReg())
758       return true;
759     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
760       X86ISelAddressMode Backup = AM;
761       AM.GV = G->getGlobal();
762       AM.SymbolFlags = G->getTargetFlags();
763       if (foldOffsetIntoAddress(G->getOffset(), AM)) {
764         AM = Backup;
765         return true;
766       }
767     } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
768       X86ISelAddressMode Backup = AM;
769       AM.CP = CP->getConstVal();
770       AM.Align = CP->getAlignment();
771       AM.SymbolFlags = CP->getTargetFlags();
772       if (foldOffsetIntoAddress(CP->getOffset(), AM)) {
773         AM = Backup;
774         return true;
775       }
776     } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
777       AM.ES = S->getSymbol();
778       AM.SymbolFlags = S->getTargetFlags();
779     } else if (auto *S = dyn_cast<MCSymbolSDNode>(N0)) {
780       AM.MCSym = S->getMCSymbol();
781     } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
782       AM.JT = J->getIndex();
783       AM.SymbolFlags = J->getTargetFlags();
784     } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) {
785       X86ISelAddressMode Backup = AM;
786       AM.BlockAddr = BA->getBlockAddress();
787       AM.SymbolFlags = BA->getTargetFlags();
788       if (foldOffsetIntoAddress(BA->getOffset(), AM)) {
789         AM = Backup;
790         return true;
791       }
792     } else
793       llvm_unreachable("Unhandled symbol reference node.");
794 
795     if (N.getOpcode() == X86ISD::WrapperRIP)
796       AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
797     return false;
798   }
799 
800   // Handle the case when globals fit in our immediate field: This is true for
801   // X86-32 always and X86-64 when in -mcmodel=small mode.  In 64-bit
802   // mode, this only applies to a non-RIP-relative computation.
803   if (!Subtarget->is64Bit() ||
804       M == CodeModel::Small || M == CodeModel::Kernel) {
805     assert(N.getOpcode() != X86ISD::WrapperRIP &&
806            "RIP-relative addressing already handled");
807     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
808       AM.GV = G->getGlobal();
809       AM.Disp += G->getOffset();
810       AM.SymbolFlags = G->getTargetFlags();
811     } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
812       AM.CP = CP->getConstVal();
813       AM.Align = CP->getAlignment();
814       AM.Disp += CP->getOffset();
815       AM.SymbolFlags = CP->getTargetFlags();
816     } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
817       AM.ES = S->getSymbol();
818       AM.SymbolFlags = S->getTargetFlags();
819     } else if (auto *S = dyn_cast<MCSymbolSDNode>(N0)) {
820       AM.MCSym = S->getMCSymbol();
821     } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
822       AM.JT = J->getIndex();
823       AM.SymbolFlags = J->getTargetFlags();
824     } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) {
825       AM.BlockAddr = BA->getBlockAddress();
826       AM.Disp += BA->getOffset();
827       AM.SymbolFlags = BA->getTargetFlags();
828     } else
829       llvm_unreachable("Unhandled symbol reference node.");
830     return false;
831   }
832 
833   return true;
834 }
835 
836 /// Add the specified node to the specified addressing mode, returning true if
837 /// it cannot be done. This just pattern matches for the addressing mode.
matchAddress(SDValue N,X86ISelAddressMode & AM)838 bool X86DAGToDAGISel::matchAddress(SDValue N, X86ISelAddressMode &AM) {
839   if (matchAddressRecursively(N, AM, 0))
840     return true;
841 
842   // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has
843   // a smaller encoding and avoids a scaled-index.
844   if (AM.Scale == 2 &&
845       AM.BaseType == X86ISelAddressMode::RegBase &&
846       AM.Base_Reg.getNode() == nullptr) {
847     AM.Base_Reg = AM.IndexReg;
848     AM.Scale = 1;
849   }
850 
851   // Post-processing: Convert foo to foo(%rip), even in non-PIC mode,
852   // because it has a smaller encoding.
853   // TODO: Which other code models can use this?
854   if (TM.getCodeModel() == CodeModel::Small &&
855       Subtarget->is64Bit() &&
856       AM.Scale == 1 &&
857       AM.BaseType == X86ISelAddressMode::RegBase &&
858       AM.Base_Reg.getNode() == nullptr &&
859       AM.IndexReg.getNode() == nullptr &&
860       AM.SymbolFlags == X86II::MO_NO_FLAG &&
861       AM.hasSymbolicDisplacement())
862     AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64);
863 
864   return false;
865 }
866 
matchAdd(SDValue N,X86ISelAddressMode & AM,unsigned Depth)867 bool X86DAGToDAGISel::matchAdd(SDValue N, X86ISelAddressMode &AM,
868                                unsigned Depth) {
869   // Add an artificial use to this node so that we can keep track of
870   // it if it gets CSE'd with a different node.
871   HandleSDNode Handle(N);
872 
873   X86ISelAddressMode Backup = AM;
874   if (!matchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
875       !matchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1))
876     return false;
877   AM = Backup;
878 
879   // Try again after commuting the operands.
880   if (!matchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1) &&
881       !matchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth+1))
882     return false;
883   AM = Backup;
884 
885   // If we couldn't fold both operands into the address at the same time,
886   // see if we can just put each operand into a register and fold at least
887   // the add.
888   if (AM.BaseType == X86ISelAddressMode::RegBase &&
889       !AM.Base_Reg.getNode() &&
890       !AM.IndexReg.getNode()) {
891     N = Handle.getValue();
892     AM.Base_Reg = N.getOperand(0);
893     AM.IndexReg = N.getOperand(1);
894     AM.Scale = 1;
895     return false;
896   }
897   N = Handle.getValue();
898   return true;
899 }
900 
901 // Insert a node into the DAG at least before the Pos node's position. This
902 // will reposition the node as needed, and will assign it a node ID that is <=
903 // the Pos node's ID. Note that this does *not* preserve the uniqueness of node
904 // IDs! The selection DAG must no longer depend on their uniqueness when this
905 // is used.
insertDAGNode(SelectionDAG & DAG,SDValue Pos,SDValue N)906 static void insertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) {
907   if (N.getNode()->getNodeId() == -1 ||
908       N.getNode()->getNodeId() > Pos.getNode()->getNodeId()) {
909     DAG.RepositionNode(Pos.getNode()->getIterator(), N.getNode());
910     N.getNode()->setNodeId(Pos.getNode()->getNodeId());
911   }
912 }
913 
914 // Transform "(X >> (8-C1)) & (0xff << C1)" to "((X >> 8) & 0xff) << C1" if
915 // safe. This allows us to convert the shift and and into an h-register
916 // extract and a scaled index. Returns false if the simplification is
917 // performed.
foldMaskAndShiftToExtract(SelectionDAG & DAG,SDValue N,uint64_t Mask,SDValue Shift,SDValue X,X86ISelAddressMode & AM)918 static bool foldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N,
919                                       uint64_t Mask,
920                                       SDValue Shift, SDValue X,
921                                       X86ISelAddressMode &AM) {
922   if (Shift.getOpcode() != ISD::SRL ||
923       !isa<ConstantSDNode>(Shift.getOperand(1)) ||
924       !Shift.hasOneUse())
925     return true;
926 
927   int ScaleLog = 8 - Shift.getConstantOperandVal(1);
928   if (ScaleLog <= 0 || ScaleLog >= 4 ||
929       Mask != (0xffu << ScaleLog))
930     return true;
931 
932   MVT VT = N.getSimpleValueType();
933   SDLoc DL(N);
934   SDValue Eight = DAG.getConstant(8, DL, MVT::i8);
935   SDValue NewMask = DAG.getConstant(0xff, DL, VT);
936   SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight);
937   SDValue And = DAG.getNode(ISD::AND, DL, VT, Srl, NewMask);
938   SDValue ShlCount = DAG.getConstant(ScaleLog, DL, MVT::i8);
939   SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And, ShlCount);
940 
941   // Insert the new nodes into the topological ordering. We must do this in
942   // a valid topological ordering as nothing is going to go back and re-sort
943   // these nodes. We continually insert before 'N' in sequence as this is
944   // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
945   // hierarchy left to express.
946   insertDAGNode(DAG, N, Eight);
947   insertDAGNode(DAG, N, Srl);
948   insertDAGNode(DAG, N, NewMask);
949   insertDAGNode(DAG, N, And);
950   insertDAGNode(DAG, N, ShlCount);
951   insertDAGNode(DAG, N, Shl);
952   DAG.ReplaceAllUsesWith(N, Shl);
953   AM.IndexReg = And;
954   AM.Scale = (1 << ScaleLog);
955   return false;
956 }
957 
958 // Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this
959 // allows us to fold the shift into this addressing mode. Returns false if the
960 // transform succeeded.
foldMaskedShiftToScaledMask(SelectionDAG & DAG,SDValue N,uint64_t Mask,SDValue Shift,SDValue X,X86ISelAddressMode & AM)961 static bool foldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N,
962                                         uint64_t Mask,
963                                         SDValue Shift, SDValue X,
964                                         X86ISelAddressMode &AM) {
965   if (Shift.getOpcode() != ISD::SHL ||
966       !isa<ConstantSDNode>(Shift.getOperand(1)))
967     return true;
968 
969   // Not likely to be profitable if either the AND or SHIFT node has more
970   // than one use (unless all uses are for address computation). Besides,
971   // isel mechanism requires their node ids to be reused.
972   if (!N.hasOneUse() || !Shift.hasOneUse())
973     return true;
974 
975   // Verify that the shift amount is something we can fold.
976   unsigned ShiftAmt = Shift.getConstantOperandVal(1);
977   if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3)
978     return true;
979 
980   MVT VT = N.getSimpleValueType();
981   SDLoc DL(N);
982   SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, DL, VT);
983   SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask);
984   SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1));
985 
986   // Insert the new nodes into the topological ordering. We must do this in
987   // a valid topological ordering as nothing is going to go back and re-sort
988   // these nodes. We continually insert before 'N' in sequence as this is
989   // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
990   // hierarchy left to express.
991   insertDAGNode(DAG, N, NewMask);
992   insertDAGNode(DAG, N, NewAnd);
993   insertDAGNode(DAG, N, NewShift);
994   DAG.ReplaceAllUsesWith(N, NewShift);
995 
996   AM.Scale = 1 << ShiftAmt;
997   AM.IndexReg = NewAnd;
998   return false;
999 }
1000 
1001 // Implement some heroics to detect shifts of masked values where the mask can
1002 // be replaced by extending the shift and undoing that in the addressing mode
1003 // scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and
1004 // (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in
1005 // the addressing mode. This results in code such as:
1006 //
1007 //   int f(short *y, int *lookup_table) {
1008 //     ...
1009 //     return *y + lookup_table[*y >> 11];
1010 //   }
1011 //
1012 // Turning into:
1013 //   movzwl (%rdi), %eax
1014 //   movl %eax, %ecx
1015 //   shrl $11, %ecx
1016 //   addl (%rsi,%rcx,4), %eax
1017 //
1018 // Instead of:
1019 //   movzwl (%rdi), %eax
1020 //   movl %eax, %ecx
1021 //   shrl $9, %ecx
1022 //   andl $124, %rcx
1023 //   addl (%rsi,%rcx), %eax
1024 //
1025 // Note that this function assumes the mask is provided as a mask *after* the
1026 // value is shifted. The input chain may or may not match that, but computing
1027 // such a mask is trivial.
foldMaskAndShiftToScale(SelectionDAG & DAG,SDValue N,uint64_t Mask,SDValue Shift,SDValue X,X86ISelAddressMode & AM)1028 static bool foldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
1029                                     uint64_t Mask,
1030                                     SDValue Shift, SDValue X,
1031                                     X86ISelAddressMode &AM) {
1032   if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() ||
1033       !isa<ConstantSDNode>(Shift.getOperand(1)))
1034     return true;
1035 
1036   unsigned ShiftAmt = Shift.getConstantOperandVal(1);
1037   unsigned MaskLZ = countLeadingZeros(Mask);
1038   unsigned MaskTZ = countTrailingZeros(Mask);
1039 
1040   // The amount of shift we're trying to fit into the addressing mode is taken
1041   // from the trailing zeros of the mask.
1042   unsigned AMShiftAmt = MaskTZ;
1043 
1044   // There is nothing we can do here unless the mask is removing some bits.
1045   // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
1046   if (AMShiftAmt <= 0 || AMShiftAmt > 3) return true;
1047 
1048   // We also need to ensure that mask is a continuous run of bits.
1049   if (countTrailingOnes(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true;
1050 
1051   // Scale the leading zero count down based on the actual size of the value.
1052   // Also scale it down based on the size of the shift.
1053   MaskLZ -= (64 - X.getSimpleValueType().getSizeInBits()) + ShiftAmt;
1054 
1055   // The final check is to ensure that any masked out high bits of X are
1056   // already known to be zero. Otherwise, the mask has a semantic impact
1057   // other than masking out a couple of low bits. Unfortunately, because of
1058   // the mask, zero extensions will be removed from operands in some cases.
1059   // This code works extra hard to look through extensions because we can
1060   // replace them with zero extensions cheaply if necessary.
1061   bool ReplacingAnyExtend = false;
1062   if (X.getOpcode() == ISD::ANY_EXTEND) {
1063     unsigned ExtendBits = X.getSimpleValueType().getSizeInBits() -
1064                           X.getOperand(0).getSimpleValueType().getSizeInBits();
1065     // Assume that we'll replace the any-extend with a zero-extend, and
1066     // narrow the search to the extended value.
1067     X = X.getOperand(0);
1068     MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits;
1069     ReplacingAnyExtend = true;
1070   }
1071   APInt MaskedHighBits =
1072     APInt::getHighBitsSet(X.getSimpleValueType().getSizeInBits(), MaskLZ);
1073   APInt KnownZero, KnownOne;
1074   DAG.computeKnownBits(X, KnownZero, KnownOne);
1075   if (MaskedHighBits != KnownZero) return true;
1076 
1077   // We've identified a pattern that can be transformed into a single shift
1078   // and an addressing mode. Make it so.
1079   MVT VT = N.getSimpleValueType();
1080   if (ReplacingAnyExtend) {
1081     assert(X.getValueType() != VT);
1082     // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND.
1083     SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(X), VT, X);
1084     insertDAGNode(DAG, N, NewX);
1085     X = NewX;
1086   }
1087   SDLoc DL(N);
1088   SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, DL, MVT::i8);
1089   SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
1090   SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, DL, MVT::i8);
1091   SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt);
1092 
1093   // Insert the new nodes into the topological ordering. We must do this in
1094   // a valid topological ordering as nothing is going to go back and re-sort
1095   // these nodes. We continually insert before 'N' in sequence as this is
1096   // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1097   // hierarchy left to express.
1098   insertDAGNode(DAG, N, NewSRLAmt);
1099   insertDAGNode(DAG, N, NewSRL);
1100   insertDAGNode(DAG, N, NewSHLAmt);
1101   insertDAGNode(DAG, N, NewSHL);
1102   DAG.ReplaceAllUsesWith(N, NewSHL);
1103 
1104   AM.Scale = 1 << AMShiftAmt;
1105   AM.IndexReg = NewSRL;
1106   return false;
1107 }
1108 
matchAddressRecursively(SDValue N,X86ISelAddressMode & AM,unsigned Depth)1109 bool X86DAGToDAGISel::matchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
1110                                               unsigned Depth) {
1111   SDLoc dl(N);
1112   DEBUG({
1113       dbgs() << "MatchAddress: ";
1114       AM.dump();
1115     });
1116   // Limit recursion.
1117   if (Depth > 5)
1118     return matchAddressBase(N, AM);
1119 
1120   // If this is already a %rip relative address, we can only merge immediates
1121   // into it.  Instead of handling this in every case, we handle it here.
1122   // RIP relative addressing: %rip + 32-bit displacement!
1123   if (AM.isRIPRelative()) {
1124     // FIXME: JumpTable and ExternalSymbol address currently don't like
1125     // displacements.  It isn't very important, but this should be fixed for
1126     // consistency.
1127     if (!(AM.ES || AM.MCSym) && AM.JT != -1)
1128       return true;
1129 
1130     if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N))
1131       if (!foldOffsetIntoAddress(Cst->getSExtValue(), AM))
1132         return false;
1133     return true;
1134   }
1135 
1136   switch (N.getOpcode()) {
1137   default: break;
1138   case ISD::LOCAL_RECOVER: {
1139     if (!AM.hasSymbolicDisplacement() && AM.Disp == 0)
1140       if (const auto *ESNode = dyn_cast<MCSymbolSDNode>(N.getOperand(0))) {
1141         // Use the symbol and don't prefix it.
1142         AM.MCSym = ESNode->getMCSymbol();
1143         return false;
1144       }
1145     break;
1146   }
1147   case ISD::Constant: {
1148     uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
1149     if (!foldOffsetIntoAddress(Val, AM))
1150       return false;
1151     break;
1152   }
1153 
1154   case X86ISD::Wrapper:
1155   case X86ISD::WrapperRIP:
1156     if (!matchWrapper(N, AM))
1157       return false;
1158     break;
1159 
1160   case ISD::LOAD:
1161     if (!matchLoadInAddress(cast<LoadSDNode>(N), AM))
1162       return false;
1163     break;
1164 
1165   case ISD::FrameIndex:
1166     if (AM.BaseType == X86ISelAddressMode::RegBase &&
1167         AM.Base_Reg.getNode() == nullptr &&
1168         (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) {
1169       AM.BaseType = X86ISelAddressMode::FrameIndexBase;
1170       AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
1171       return false;
1172     }
1173     break;
1174 
1175   case ISD::SHL:
1176     if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1)
1177       break;
1178 
1179     if (ConstantSDNode
1180           *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) {
1181       unsigned Val = CN->getZExtValue();
1182       // Note that we handle x<<1 as (,x,2) rather than (x,x) here so
1183       // that the base operand remains free for further matching. If
1184       // the base doesn't end up getting used, a post-processing step
1185       // in MatchAddress turns (,x,2) into (x,x), which is cheaper.
1186       if (Val == 1 || Val == 2 || Val == 3) {
1187         AM.Scale = 1 << Val;
1188         SDValue ShVal = N.getNode()->getOperand(0);
1189 
1190         // Okay, we know that we have a scale by now.  However, if the scaled
1191         // value is an add of something and a constant, we can fold the
1192         // constant into the disp field here.
1193         if (CurDAG->isBaseWithConstantOffset(ShVal)) {
1194           AM.IndexReg = ShVal.getNode()->getOperand(0);
1195           ConstantSDNode *AddVal =
1196             cast<ConstantSDNode>(ShVal.getNode()->getOperand(1));
1197           uint64_t Disp = (uint64_t)AddVal->getSExtValue() << Val;
1198           if (!foldOffsetIntoAddress(Disp, AM))
1199             return false;
1200         }
1201 
1202         AM.IndexReg = ShVal;
1203         return false;
1204       }
1205     }
1206     break;
1207 
1208   case ISD::SRL: {
1209     // Scale must not be used already.
1210     if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
1211 
1212     SDValue And = N.getOperand(0);
1213     if (And.getOpcode() != ISD::AND) break;
1214     SDValue X = And.getOperand(0);
1215 
1216     // We only handle up to 64-bit values here as those are what matter for
1217     // addressing mode optimizations.
1218     if (X.getSimpleValueType().getSizeInBits() > 64) break;
1219 
1220     // The mask used for the transform is expected to be post-shift, but we
1221     // found the shift first so just apply the shift to the mask before passing
1222     // it down.
1223     if (!isa<ConstantSDNode>(N.getOperand(1)) ||
1224         !isa<ConstantSDNode>(And.getOperand(1)))
1225       break;
1226     uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1);
1227 
1228     // Try to fold the mask and shift into the scale, and return false if we
1229     // succeed.
1230     if (!foldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM))
1231       return false;
1232     break;
1233   }
1234 
1235   case ISD::SMUL_LOHI:
1236   case ISD::UMUL_LOHI:
1237     // A mul_lohi where we need the low part can be folded as a plain multiply.
1238     if (N.getResNo() != 0) break;
1239     // FALL THROUGH
1240   case ISD::MUL:
1241   case X86ISD::MUL_IMM:
1242     // X*[3,5,9] -> X+X*[2,4,8]
1243     if (AM.BaseType == X86ISelAddressMode::RegBase &&
1244         AM.Base_Reg.getNode() == nullptr &&
1245         AM.IndexReg.getNode() == nullptr) {
1246       if (ConstantSDNode
1247             *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1)))
1248         if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
1249             CN->getZExtValue() == 9) {
1250           AM.Scale = unsigned(CN->getZExtValue())-1;
1251 
1252           SDValue MulVal = N.getNode()->getOperand(0);
1253           SDValue Reg;
1254 
1255           // Okay, we know that we have a scale by now.  However, if the scaled
1256           // value is an add of something and a constant, we can fold the
1257           // constant into the disp field here.
1258           if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
1259               isa<ConstantSDNode>(MulVal.getNode()->getOperand(1))) {
1260             Reg = MulVal.getNode()->getOperand(0);
1261             ConstantSDNode *AddVal =
1262               cast<ConstantSDNode>(MulVal.getNode()->getOperand(1));
1263             uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue();
1264             if (foldOffsetIntoAddress(Disp, AM))
1265               Reg = N.getNode()->getOperand(0);
1266           } else {
1267             Reg = N.getNode()->getOperand(0);
1268           }
1269 
1270           AM.IndexReg = AM.Base_Reg = Reg;
1271           return false;
1272         }
1273     }
1274     break;
1275 
1276   case ISD::SUB: {
1277     // Given A-B, if A can be completely folded into the address and
1278     // the index field with the index field unused, use -B as the index.
1279     // This is a win if a has multiple parts that can be folded into
1280     // the address. Also, this saves a mov if the base register has
1281     // other uses, since it avoids a two-address sub instruction, however
1282     // it costs an additional mov if the index register has other uses.
1283 
1284     // Add an artificial use to this node so that we can keep track of
1285     // it if it gets CSE'd with a different node.
1286     HandleSDNode Handle(N);
1287 
1288     // Test if the LHS of the sub can be folded.
1289     X86ISelAddressMode Backup = AM;
1290     if (matchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1)) {
1291       AM = Backup;
1292       break;
1293     }
1294     // Test if the index field is free for use.
1295     if (AM.IndexReg.getNode() || AM.isRIPRelative()) {
1296       AM = Backup;
1297       break;
1298     }
1299 
1300     int Cost = 0;
1301     SDValue RHS = Handle.getValue().getNode()->getOperand(1);
1302     // If the RHS involves a register with multiple uses, this
1303     // transformation incurs an extra mov, due to the neg instruction
1304     // clobbering its operand.
1305     if (!RHS.getNode()->hasOneUse() ||
1306         RHS.getNode()->getOpcode() == ISD::CopyFromReg ||
1307         RHS.getNode()->getOpcode() == ISD::TRUNCATE ||
1308         RHS.getNode()->getOpcode() == ISD::ANY_EXTEND ||
1309         (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND &&
1310          RHS.getNode()->getOperand(0).getValueType() == MVT::i32))
1311       ++Cost;
1312     // If the base is a register with multiple uses, this
1313     // transformation may save a mov.
1314     if ((AM.BaseType == X86ISelAddressMode::RegBase &&
1315          AM.Base_Reg.getNode() &&
1316          !AM.Base_Reg.getNode()->hasOneUse()) ||
1317         AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1318       --Cost;
1319     // If the folded LHS was interesting, this transformation saves
1320     // address arithmetic.
1321     if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) +
1322         ((AM.Disp != 0) && (Backup.Disp == 0)) +
1323         (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2)
1324       --Cost;
1325     // If it doesn't look like it may be an overall win, don't do it.
1326     if (Cost >= 0) {
1327       AM = Backup;
1328       break;
1329     }
1330 
1331     // Ok, the transformation is legal and appears profitable. Go for it.
1332     SDValue Zero = CurDAG->getConstant(0, dl, N.getValueType());
1333     SDValue Neg = CurDAG->getNode(ISD::SUB, dl, N.getValueType(), Zero, RHS);
1334     AM.IndexReg = Neg;
1335     AM.Scale = 1;
1336 
1337     // Insert the new nodes into the topological ordering.
1338     insertDAGNode(*CurDAG, N, Zero);
1339     insertDAGNode(*CurDAG, N, Neg);
1340     return false;
1341   }
1342 
1343   case ISD::ADD:
1344     if (!matchAdd(N, AM, Depth))
1345       return false;
1346     break;
1347 
1348   case ISD::OR:
1349     // We want to look through a transform in InstCombine and DAGCombiner that
1350     // turns 'add' into 'or', so we can treat this 'or' exactly like an 'add'.
1351     // Example: (or (and x, 1), (shl y, 3)) --> (add (and x, 1), (shl y, 3))
1352     // An 'lea' can then be used to match the shift (multiply) and add:
1353     // and $1, %esi
1354     // lea (%rsi, %rdi, 8), %rax
1355     if (CurDAG->haveNoCommonBitsSet(N.getOperand(0), N.getOperand(1)) &&
1356         !matchAdd(N, AM, Depth))
1357       return false;
1358     break;
1359 
1360   case ISD::AND: {
1361     // Perform some heroic transforms on an and of a constant-count shift
1362     // with a constant to enable use of the scaled offset field.
1363 
1364     // Scale must not be used already.
1365     if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
1366 
1367     SDValue Shift = N.getOperand(0);
1368     if (Shift.getOpcode() != ISD::SRL && Shift.getOpcode() != ISD::SHL) break;
1369     SDValue X = Shift.getOperand(0);
1370 
1371     // We only handle up to 64-bit values here as those are what matter for
1372     // addressing mode optimizations.
1373     if (X.getSimpleValueType().getSizeInBits() > 64) break;
1374 
1375     if (!isa<ConstantSDNode>(N.getOperand(1)))
1376       break;
1377     uint64_t Mask = N.getConstantOperandVal(1);
1378 
1379     // Try to fold the mask and shift into an extract and scale.
1380     if (!foldMaskAndShiftToExtract(*CurDAG, N, Mask, Shift, X, AM))
1381       return false;
1382 
1383     // Try to fold the mask and shift directly into the scale.
1384     if (!foldMaskAndShiftToScale(*CurDAG, N, Mask, Shift, X, AM))
1385       return false;
1386 
1387     // Try to swap the mask and shift to place shifts which can be done as
1388     // a scale on the outside of the mask.
1389     if (!foldMaskedShiftToScaledMask(*CurDAG, N, Mask, Shift, X, AM))
1390       return false;
1391     break;
1392   }
1393   }
1394 
1395   return matchAddressBase(N, AM);
1396 }
1397 
1398 /// Helper for MatchAddress. Add the specified node to the
1399 /// specified addressing mode without any further recursion.
matchAddressBase(SDValue N,X86ISelAddressMode & AM)1400 bool X86DAGToDAGISel::matchAddressBase(SDValue N, X86ISelAddressMode &AM) {
1401   // Is the base register already occupied?
1402   if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) {
1403     // If so, check to see if the scale index register is set.
1404     if (!AM.IndexReg.getNode()) {
1405       AM.IndexReg = N;
1406       AM.Scale = 1;
1407       return false;
1408     }
1409 
1410     // Otherwise, we cannot select it.
1411     return true;
1412   }
1413 
1414   // Default, generate it as a register.
1415   AM.BaseType = X86ISelAddressMode::RegBase;
1416   AM.Base_Reg = N;
1417   return false;
1418 }
1419 
selectVectorAddr(SDNode * Parent,SDValue N,SDValue & Base,SDValue & Scale,SDValue & Index,SDValue & Disp,SDValue & Segment)1420 bool X86DAGToDAGISel::selectVectorAddr(SDNode *Parent, SDValue N, SDValue &Base,
1421                                       SDValue &Scale, SDValue &Index,
1422                                       SDValue &Disp, SDValue &Segment) {
1423 
1424   MaskedGatherScatterSDNode *Mgs = dyn_cast<MaskedGatherScatterSDNode>(Parent);
1425   if (!Mgs)
1426     return false;
1427   X86ISelAddressMode AM;
1428   unsigned AddrSpace = Mgs->getPointerInfo().getAddrSpace();
1429   // AddrSpace 256 -> GS, 257 -> FS, 258 -> SS.
1430   if (AddrSpace == 256)
1431     AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
1432   if (AddrSpace == 257)
1433     AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
1434   if (AddrSpace == 258)
1435     AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16);
1436 
1437   SDLoc DL(N);
1438   Base = Mgs->getBasePtr();
1439   Index = Mgs->getIndex();
1440   unsigned ScalarSize = Mgs->getValue().getValueType().getScalarSizeInBits();
1441   Scale = getI8Imm(ScalarSize/8, DL);
1442 
1443   // If Base is 0, the whole address is in index and the Scale is 1
1444   if (isa<ConstantSDNode>(Base)) {
1445     assert(cast<ConstantSDNode>(Base)->isNullValue() &&
1446            "Unexpected base in gather/scatter");
1447     Scale = getI8Imm(1, DL);
1448     Base = CurDAG->getRegister(0, MVT::i32);
1449   }
1450   if (AM.Segment.getNode())
1451     Segment = AM.Segment;
1452   else
1453     Segment = CurDAG->getRegister(0, MVT::i32);
1454   Disp = CurDAG->getTargetConstant(0, DL, MVT::i32);
1455   return true;
1456 }
1457 
1458 /// Returns true if it is able to pattern match an addressing mode.
1459 /// It returns the operands which make up the maximal addressing mode it can
1460 /// match by reference.
1461 ///
1462 /// Parent is the parent node of the addr operand that is being matched.  It
1463 /// is always a load, store, atomic node, or null.  It is only null when
1464 /// checking memory operands for inline asm nodes.
selectAddr(SDNode * Parent,SDValue N,SDValue & Base,SDValue & Scale,SDValue & Index,SDValue & Disp,SDValue & Segment)1465 bool X86DAGToDAGISel::selectAddr(SDNode *Parent, SDValue N, SDValue &Base,
1466                                  SDValue &Scale, SDValue &Index,
1467                                  SDValue &Disp, SDValue &Segment) {
1468   X86ISelAddressMode AM;
1469 
1470   if (Parent &&
1471       // This list of opcodes are all the nodes that have an "addr:$ptr" operand
1472       // that are not a MemSDNode, and thus don't have proper addrspace info.
1473       Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme
1474       Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores
1475       Parent->getOpcode() != X86ISD::TLSCALL && // Fixme
1476       Parent->getOpcode() != X86ISD::EH_SJLJ_SETJMP && // setjmp
1477       Parent->getOpcode() != X86ISD::EH_SJLJ_LONGJMP) { // longjmp
1478     unsigned AddrSpace =
1479       cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
1480     // AddrSpace 256 -> GS, 257 -> FS, 258 -> SS.
1481     if (AddrSpace == 256)
1482       AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
1483     if (AddrSpace == 257)
1484       AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
1485     if (AddrSpace == 258)
1486       AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16);
1487   }
1488 
1489   if (matchAddress(N, AM))
1490     return false;
1491 
1492   MVT VT = N.getSimpleValueType();
1493   if (AM.BaseType == X86ISelAddressMode::RegBase) {
1494     if (!AM.Base_Reg.getNode())
1495       AM.Base_Reg = CurDAG->getRegister(0, VT);
1496   }
1497 
1498   if (!AM.IndexReg.getNode())
1499     AM.IndexReg = CurDAG->getRegister(0, VT);
1500 
1501   getAddressOperands(AM, SDLoc(N), Base, Scale, Index, Disp, Segment);
1502   return true;
1503 }
1504 
1505 /// Match a scalar SSE load. In particular, we want to match a load whose top
1506 /// elements are either undef or zeros. The load flavor is derived from the
1507 /// type of N, which is either v4f32 or v2f64.
1508 ///
1509 /// We also return:
1510 ///   PatternChainNode: this is the matched node that has a chain input and
1511 ///   output.
selectScalarSSELoad(SDNode * Root,SDValue N,SDValue & Base,SDValue & Scale,SDValue & Index,SDValue & Disp,SDValue & Segment,SDValue & PatternNodeWithChain)1512 bool X86DAGToDAGISel::selectScalarSSELoad(SDNode *Root,
1513                                           SDValue N, SDValue &Base,
1514                                           SDValue &Scale, SDValue &Index,
1515                                           SDValue &Disp, SDValue &Segment,
1516                                           SDValue &PatternNodeWithChain) {
1517   if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) {
1518     PatternNodeWithChain = N.getOperand(0);
1519     if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) &&
1520         PatternNodeWithChain.hasOneUse() &&
1521         IsProfitableToFold(N.getOperand(0), N.getNode(), Root) &&
1522         IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) {
1523       LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain);
1524       if (!selectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
1525         return false;
1526       return true;
1527     }
1528   }
1529 
1530   // Also handle the case where we explicitly require zeros in the top
1531   // elements.  This is a vector shuffle from the zero vector.
1532   if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() &&
1533       // Check to see if the top elements are all zeros (or bitcast of zeros).
1534       N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
1535       N.getOperand(0).getNode()->hasOneUse() &&
1536       ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) &&
1537       N.getOperand(0).getOperand(0).hasOneUse() &&
1538       IsProfitableToFold(N.getOperand(0), N.getNode(), Root) &&
1539       IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) {
1540     // Okay, this is a zero extending load.  Fold it.
1541     LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0));
1542     if (!selectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
1543       return false;
1544     PatternNodeWithChain = SDValue(LD, 0);
1545     return true;
1546   }
1547   return false;
1548 }
1549 
1550 
selectMOV64Imm32(SDValue N,SDValue & Imm)1551 bool X86DAGToDAGISel::selectMOV64Imm32(SDValue N, SDValue &Imm) {
1552   if (const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
1553     uint64_t ImmVal = CN->getZExtValue();
1554     if ((uint32_t)ImmVal != (uint64_t)ImmVal)
1555       return false;
1556 
1557     Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), MVT::i64);
1558     return true;
1559   }
1560 
1561   // In static codegen with small code model, we can get the address of a label
1562   // into a register with 'movl'. TableGen has already made sure we're looking
1563   // at a label of some kind.
1564   assert(N->getOpcode() == X86ISD::Wrapper &&
1565          "Unexpected node type for MOV32ri64");
1566   N = N.getOperand(0);
1567 
1568   if (N->getOpcode() != ISD::TargetConstantPool &&
1569       N->getOpcode() != ISD::TargetJumpTable &&
1570       N->getOpcode() != ISD::TargetGlobalAddress &&
1571       N->getOpcode() != ISD::TargetExternalSymbol &&
1572       N->getOpcode() != ISD::MCSymbol &&
1573       N->getOpcode() != ISD::TargetBlockAddress)
1574     return false;
1575 
1576   Imm = N;
1577   return TM.getCodeModel() == CodeModel::Small;
1578 }
1579 
selectLEA64_32Addr(SDValue N,SDValue & Base,SDValue & Scale,SDValue & Index,SDValue & Disp,SDValue & Segment)1580 bool X86DAGToDAGISel::selectLEA64_32Addr(SDValue N, SDValue &Base,
1581                                          SDValue &Scale, SDValue &Index,
1582                                          SDValue &Disp, SDValue &Segment) {
1583   // Save the debug loc before calling selectLEAAddr, in case it invalidates N.
1584   SDLoc DL(N);
1585 
1586   if (!selectLEAAddr(N, Base, Scale, Index, Disp, Segment))
1587     return false;
1588 
1589   RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Base);
1590   if (RN && RN->getReg() == 0)
1591     Base = CurDAG->getRegister(0, MVT::i64);
1592   else if (Base.getValueType() == MVT::i32 && !dyn_cast<FrameIndexSDNode>(Base)) {
1593     // Base could already be %rip, particularly in the x32 ABI.
1594     Base = SDValue(CurDAG->getMachineNode(
1595                        TargetOpcode::SUBREG_TO_REG, DL, MVT::i64,
1596                        CurDAG->getTargetConstant(0, DL, MVT::i64),
1597                        Base,
1598                        CurDAG->getTargetConstant(X86::sub_32bit, DL, MVT::i32)),
1599                    0);
1600   }
1601 
1602   RN = dyn_cast<RegisterSDNode>(Index);
1603   if (RN && RN->getReg() == 0)
1604     Index = CurDAG->getRegister(0, MVT::i64);
1605   else {
1606     assert(Index.getValueType() == MVT::i32 &&
1607            "Expect to be extending 32-bit registers for use in LEA");
1608     Index = SDValue(CurDAG->getMachineNode(
1609                         TargetOpcode::SUBREG_TO_REG, DL, MVT::i64,
1610                         CurDAG->getTargetConstant(0, DL, MVT::i64),
1611                         Index,
1612                         CurDAG->getTargetConstant(X86::sub_32bit, DL,
1613                                                   MVT::i32)),
1614                     0);
1615   }
1616 
1617   return true;
1618 }
1619 
1620 /// Calls SelectAddr and determines if the maximal addressing
1621 /// mode it matches can be cost effectively emitted as an LEA instruction.
selectLEAAddr(SDValue N,SDValue & Base,SDValue & Scale,SDValue & Index,SDValue & Disp,SDValue & Segment)1622 bool X86DAGToDAGISel::selectLEAAddr(SDValue N,
1623                                     SDValue &Base, SDValue &Scale,
1624                                     SDValue &Index, SDValue &Disp,
1625                                     SDValue &Segment) {
1626   X86ISelAddressMode AM;
1627 
1628   // Save the DL and VT before calling matchAddress, it can invalidate N.
1629   SDLoc DL(N);
1630   MVT VT = N.getSimpleValueType();
1631 
1632   // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
1633   // segments.
1634   SDValue Copy = AM.Segment;
1635   SDValue T = CurDAG->getRegister(0, MVT::i32);
1636   AM.Segment = T;
1637   if (matchAddress(N, AM))
1638     return false;
1639   assert (T == AM.Segment);
1640   AM.Segment = Copy;
1641 
1642   unsigned Complexity = 0;
1643   if (AM.BaseType == X86ISelAddressMode::RegBase)
1644     if (AM.Base_Reg.getNode())
1645       Complexity = 1;
1646     else
1647       AM.Base_Reg = CurDAG->getRegister(0, VT);
1648   else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1649     Complexity = 4;
1650 
1651   if (AM.IndexReg.getNode())
1652     Complexity++;
1653   else
1654     AM.IndexReg = CurDAG->getRegister(0, VT);
1655 
1656   // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
1657   // a simple shift.
1658   if (AM.Scale > 1)
1659     Complexity++;
1660 
1661   // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
1662   // to a LEA. This is determined with some experimentation but is by no means
1663   // optimal (especially for code size consideration). LEA is nice because of
1664   // its three-address nature. Tweak the cost function again when we can run
1665   // convertToThreeAddress() at register allocation time.
1666   if (AM.hasSymbolicDisplacement()) {
1667     // For X86-64, always use LEA to materialize RIP-relative addresses.
1668     if (Subtarget->is64Bit())
1669       Complexity = 4;
1670     else
1671       Complexity += 2;
1672   }
1673 
1674   if (AM.Disp && (AM.Base_Reg.getNode() || AM.IndexReg.getNode()))
1675     Complexity++;
1676 
1677   // If it isn't worth using an LEA, reject it.
1678   if (Complexity <= 2)
1679     return false;
1680 
1681   getAddressOperands(AM, DL, Base, Scale, Index, Disp, Segment);
1682   return true;
1683 }
1684 
1685 /// This is only run on TargetGlobalTLSAddress nodes.
selectTLSADDRAddr(SDValue N,SDValue & Base,SDValue & Scale,SDValue & Index,SDValue & Disp,SDValue & Segment)1686 bool X86DAGToDAGISel::selectTLSADDRAddr(SDValue N, SDValue &Base,
1687                                         SDValue &Scale, SDValue &Index,
1688                                         SDValue &Disp, SDValue &Segment) {
1689   assert(N.getOpcode() == ISD::TargetGlobalTLSAddress);
1690   const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
1691 
1692   X86ISelAddressMode AM;
1693   AM.GV = GA->getGlobal();
1694   AM.Disp += GA->getOffset();
1695   AM.Base_Reg = CurDAG->getRegister(0, N.getValueType());
1696   AM.SymbolFlags = GA->getTargetFlags();
1697 
1698   if (N.getValueType() == MVT::i32) {
1699     AM.Scale = 1;
1700     AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32);
1701   } else {
1702     AM.IndexReg = CurDAG->getRegister(0, MVT::i64);
1703   }
1704 
1705   getAddressOperands(AM, SDLoc(N), Base, Scale, Index, Disp, Segment);
1706   return true;
1707 }
1708 
1709 
tryFoldLoad(SDNode * P,SDValue N,SDValue & Base,SDValue & Scale,SDValue & Index,SDValue & Disp,SDValue & Segment)1710 bool X86DAGToDAGISel::tryFoldLoad(SDNode *P, SDValue N,
1711                                   SDValue &Base, SDValue &Scale,
1712                                   SDValue &Index, SDValue &Disp,
1713                                   SDValue &Segment) {
1714   if (!ISD::isNON_EXTLoad(N.getNode()) ||
1715       !IsProfitableToFold(N, P, P) ||
1716       !IsLegalToFold(N, P, P, OptLevel))
1717     return false;
1718 
1719   return selectAddr(N.getNode(),
1720                     N.getOperand(1), Base, Scale, Index, Disp, Segment);
1721 }
1722 
1723 /// Return an SDNode that returns the value of the global base register.
1724 /// Output instructions required to initialize the global base register,
1725 /// if necessary.
getGlobalBaseReg()1726 SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
1727   unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
1728   auto &DL = MF->getDataLayout();
1729   return CurDAG->getRegister(GlobalBaseReg, TLI->getPointerTy(DL)).getNode();
1730 }
1731 
1732 /// Test whether the given X86ISD::CMP node has any uses which require the SF
1733 /// or OF bits to be accurate.
hasNoSignedComparisonUses(SDNode * N)1734 static bool hasNoSignedComparisonUses(SDNode *N) {
1735   // Examine each user of the node.
1736   for (SDNode::use_iterator UI = N->use_begin(),
1737          UE = N->use_end(); UI != UE; ++UI) {
1738     // Only examine CopyToReg uses.
1739     if (UI->getOpcode() != ISD::CopyToReg)
1740       return false;
1741     // Only examine CopyToReg uses that copy to EFLAGS.
1742     if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() !=
1743           X86::EFLAGS)
1744       return false;
1745     // Examine each user of the CopyToReg use.
1746     for (SDNode::use_iterator FlagUI = UI->use_begin(),
1747            FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
1748       // Only examine the Flag result.
1749       if (FlagUI.getUse().getResNo() != 1) continue;
1750       // Anything unusual: assume conservatively.
1751       if (!FlagUI->isMachineOpcode()) return false;
1752       // Examine the opcode of the user.
1753       switch (FlagUI->getMachineOpcode()) {
1754       // These comparisons don't treat the most significant bit specially.
1755       case X86::SETAr: case X86::SETAEr: case X86::SETBr: case X86::SETBEr:
1756       case X86::SETEr: case X86::SETNEr: case X86::SETPr: case X86::SETNPr:
1757       case X86::SETAm: case X86::SETAEm: case X86::SETBm: case X86::SETBEm:
1758       case X86::SETEm: case X86::SETNEm: case X86::SETPm: case X86::SETNPm:
1759       case X86::JA_1: case X86::JAE_1: case X86::JB_1: case X86::JBE_1:
1760       case X86::JE_1: case X86::JNE_1: case X86::JP_1: case X86::JNP_1:
1761       case X86::CMOVA16rr: case X86::CMOVA16rm:
1762       case X86::CMOVA32rr: case X86::CMOVA32rm:
1763       case X86::CMOVA64rr: case X86::CMOVA64rm:
1764       case X86::CMOVAE16rr: case X86::CMOVAE16rm:
1765       case X86::CMOVAE32rr: case X86::CMOVAE32rm:
1766       case X86::CMOVAE64rr: case X86::CMOVAE64rm:
1767       case X86::CMOVB16rr: case X86::CMOVB16rm:
1768       case X86::CMOVB32rr: case X86::CMOVB32rm:
1769       case X86::CMOVB64rr: case X86::CMOVB64rm:
1770       case X86::CMOVBE16rr: case X86::CMOVBE16rm:
1771       case X86::CMOVBE32rr: case X86::CMOVBE32rm:
1772       case X86::CMOVBE64rr: case X86::CMOVBE64rm:
1773       case X86::CMOVE16rr: case X86::CMOVE16rm:
1774       case X86::CMOVE32rr: case X86::CMOVE32rm:
1775       case X86::CMOVE64rr: case X86::CMOVE64rm:
1776       case X86::CMOVNE16rr: case X86::CMOVNE16rm:
1777       case X86::CMOVNE32rr: case X86::CMOVNE32rm:
1778       case X86::CMOVNE64rr: case X86::CMOVNE64rm:
1779       case X86::CMOVNP16rr: case X86::CMOVNP16rm:
1780       case X86::CMOVNP32rr: case X86::CMOVNP32rm:
1781       case X86::CMOVNP64rr: case X86::CMOVNP64rm:
1782       case X86::CMOVP16rr: case X86::CMOVP16rm:
1783       case X86::CMOVP32rr: case X86::CMOVP32rm:
1784       case X86::CMOVP64rr: case X86::CMOVP64rm:
1785         continue;
1786       // Anything else: assume conservatively.
1787       default: return false;
1788       }
1789     }
1790   }
1791   return true;
1792 }
1793 
1794 /// Check whether or not the chain ending in StoreNode is suitable for doing
1795 /// the {load; increment or decrement; store} to modify transformation.
isLoadIncOrDecStore(StoreSDNode * StoreNode,unsigned Opc,SDValue StoredVal,SelectionDAG * CurDAG,LoadSDNode * & LoadNode,SDValue & InputChain)1796 static bool isLoadIncOrDecStore(StoreSDNode *StoreNode, unsigned Opc,
1797                                 SDValue StoredVal, SelectionDAG *CurDAG,
1798                                 LoadSDNode* &LoadNode, SDValue &InputChain) {
1799 
1800   // is the value stored the result of a DEC or INC?
1801   if (!(Opc == X86ISD::DEC || Opc == X86ISD::INC)) return false;
1802 
1803   // is the stored value result 0 of the load?
1804   if (StoredVal.getResNo() != 0) return false;
1805 
1806   // are there other uses of the loaded value than the inc or dec?
1807   if (!StoredVal.getNode()->hasNUsesOfValue(1, 0)) return false;
1808 
1809   // is the store non-extending and non-indexed?
1810   if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal())
1811     return false;
1812 
1813   SDValue Load = StoredVal->getOperand(0);
1814   // Is the stored value a non-extending and non-indexed load?
1815   if (!ISD::isNormalLoad(Load.getNode())) return false;
1816 
1817   // Return LoadNode by reference.
1818   LoadNode = cast<LoadSDNode>(Load);
1819   // is the size of the value one that we can handle? (i.e. 64, 32, 16, or 8)
1820   EVT LdVT = LoadNode->getMemoryVT();
1821   if (LdVT != MVT::i64 && LdVT != MVT::i32 && LdVT != MVT::i16 &&
1822       LdVT != MVT::i8)
1823     return false;
1824 
1825   // Is store the only read of the loaded value?
1826   if (!Load.hasOneUse())
1827     return false;
1828 
1829   // Is the address of the store the same as the load?
1830   if (LoadNode->getBasePtr() != StoreNode->getBasePtr() ||
1831       LoadNode->getOffset() != StoreNode->getOffset())
1832     return false;
1833 
1834   // Check if the chain is produced by the load or is a TokenFactor with
1835   // the load output chain as an operand. Return InputChain by reference.
1836   SDValue Chain = StoreNode->getChain();
1837 
1838   bool ChainCheck = false;
1839   if (Chain == Load.getValue(1)) {
1840     ChainCheck = true;
1841     InputChain = LoadNode->getChain();
1842   } else if (Chain.getOpcode() == ISD::TokenFactor) {
1843     SmallVector<SDValue, 4> ChainOps;
1844     for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) {
1845       SDValue Op = Chain.getOperand(i);
1846       if (Op == Load.getValue(1)) {
1847         ChainCheck = true;
1848         continue;
1849       }
1850 
1851       // Make sure using Op as part of the chain would not cause a cycle here.
1852       // In theory, we could check whether the chain node is a predecessor of
1853       // the load. But that can be very expensive. Instead visit the uses and
1854       // make sure they all have smaller node id than the load.
1855       int LoadId = LoadNode->getNodeId();
1856       for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
1857              UE = UI->use_end(); UI != UE; ++UI) {
1858         if (UI.getUse().getResNo() != 0)
1859           continue;
1860         if (UI->getNodeId() > LoadId)
1861           return false;
1862       }
1863 
1864       ChainOps.push_back(Op);
1865     }
1866 
1867     if (ChainCheck)
1868       // Make a new TokenFactor with all the other input chains except
1869       // for the load.
1870       InputChain = CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain),
1871                                    MVT::Other, ChainOps);
1872   }
1873   if (!ChainCheck)
1874     return false;
1875 
1876   return true;
1877 }
1878 
1879 /// Get the appropriate X86 opcode for an in-memory increment or decrement.
1880 /// Opc should be X86ISD::DEC or X86ISD::INC.
getFusedLdStOpcode(EVT & LdVT,unsigned Opc)1881 static unsigned getFusedLdStOpcode(EVT &LdVT, unsigned Opc) {
1882   if (Opc == X86ISD::DEC) {
1883     if (LdVT == MVT::i64) return X86::DEC64m;
1884     if (LdVT == MVT::i32) return X86::DEC32m;
1885     if (LdVT == MVT::i16) return X86::DEC16m;
1886     if (LdVT == MVT::i8)  return X86::DEC8m;
1887   } else {
1888     assert(Opc == X86ISD::INC && "unrecognized opcode");
1889     if (LdVT == MVT::i64) return X86::INC64m;
1890     if (LdVT == MVT::i32) return X86::INC32m;
1891     if (LdVT == MVT::i16) return X86::INC16m;
1892     if (LdVT == MVT::i8)  return X86::INC8m;
1893   }
1894   llvm_unreachable("unrecognized size for LdVT");
1895 }
1896 
1897 /// Customized ISel for GATHER operations.
tryGather(SDNode * Node,unsigned Opc)1898 bool X86DAGToDAGISel::tryGather(SDNode *Node, unsigned Opc) {
1899   // Operands of Gather: VSrc, Base, VIdx, VMask, Scale
1900   SDValue Chain = Node->getOperand(0);
1901   SDValue VSrc = Node->getOperand(2);
1902   SDValue Base = Node->getOperand(3);
1903   SDValue VIdx = Node->getOperand(4);
1904   SDValue VMask = Node->getOperand(5);
1905   ConstantSDNode *Scale = dyn_cast<ConstantSDNode>(Node->getOperand(6));
1906   if (!Scale)
1907     return false;
1908 
1909   SDVTList VTs = CurDAG->getVTList(VSrc.getValueType(), VSrc.getValueType(),
1910                                    MVT::Other);
1911 
1912   SDLoc DL(Node);
1913 
1914   // Memory Operands: Base, Scale, Index, Disp, Segment
1915   SDValue Disp = CurDAG->getTargetConstant(0, DL, MVT::i32);
1916   SDValue Segment = CurDAG->getRegister(0, MVT::i32);
1917   const SDValue Ops[] = { VSrc, Base, getI8Imm(Scale->getSExtValue(), DL), VIdx,
1918                           Disp, Segment, VMask, Chain};
1919   SDNode *ResNode = CurDAG->getMachineNode(Opc, DL, VTs, Ops);
1920   // Node has 2 outputs: VDst and MVT::Other.
1921   // ResNode has 3 outputs: VDst, VMask_wb, and MVT::Other.
1922   // We replace VDst of Node with VDst of ResNode, and Other of Node with Other
1923   // of ResNode.
1924   ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0));
1925   ReplaceUses(SDValue(Node, 1), SDValue(ResNode, 2));
1926   CurDAG->RemoveDeadNode(Node);
1927   return true;
1928 }
1929 
Select(SDNode * Node)1930 void X86DAGToDAGISel::Select(SDNode *Node) {
1931   MVT NVT = Node->getSimpleValueType(0);
1932   unsigned Opc, MOpc;
1933   unsigned Opcode = Node->getOpcode();
1934   SDLoc dl(Node);
1935 
1936   DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << '\n');
1937 
1938   if (Node->isMachineOpcode()) {
1939     DEBUG(dbgs() << "== ";  Node->dump(CurDAG); dbgs() << '\n');
1940     Node->setNodeId(-1);
1941     return;   // Already selected.
1942   }
1943 
1944   switch (Opcode) {
1945   default: break;
1946   case ISD::BRIND: {
1947     if (Subtarget->isTargetNaCl())
1948       // NaCl has its own pass where jmp %r32 are converted to jmp %r64. We
1949       // leave the instruction alone.
1950       break;
1951     if (Subtarget->isTarget64BitILP32()) {
1952       // Converts a 32-bit register to a 64-bit, zero-extended version of
1953       // it. This is needed because x86-64 can do many things, but jmp %r32
1954       // ain't one of them.
1955       const SDValue &Target = Node->getOperand(1);
1956       assert(Target.getSimpleValueType() == llvm::MVT::i32);
1957       SDValue ZextTarget = CurDAG->getZExtOrTrunc(Target, dl, EVT(MVT::i64));
1958       SDValue Brind = CurDAG->getNode(ISD::BRIND, dl, MVT::Other,
1959                                       Node->getOperand(0), ZextTarget);
1960       ReplaceNode(Node, Brind.getNode());
1961       SelectCode(ZextTarget.getNode());
1962       SelectCode(Brind.getNode());
1963       return;
1964     }
1965     break;
1966   }
1967   case ISD::INTRINSIC_W_CHAIN: {
1968     unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1969     switch (IntNo) {
1970     default: break;
1971     case Intrinsic::x86_avx2_gather_d_pd:
1972     case Intrinsic::x86_avx2_gather_d_pd_256:
1973     case Intrinsic::x86_avx2_gather_q_pd:
1974     case Intrinsic::x86_avx2_gather_q_pd_256:
1975     case Intrinsic::x86_avx2_gather_d_ps:
1976     case Intrinsic::x86_avx2_gather_d_ps_256:
1977     case Intrinsic::x86_avx2_gather_q_ps:
1978     case Intrinsic::x86_avx2_gather_q_ps_256:
1979     case Intrinsic::x86_avx2_gather_d_q:
1980     case Intrinsic::x86_avx2_gather_d_q_256:
1981     case Intrinsic::x86_avx2_gather_q_q:
1982     case Intrinsic::x86_avx2_gather_q_q_256:
1983     case Intrinsic::x86_avx2_gather_d_d:
1984     case Intrinsic::x86_avx2_gather_d_d_256:
1985     case Intrinsic::x86_avx2_gather_q_d:
1986     case Intrinsic::x86_avx2_gather_q_d_256: {
1987       if (!Subtarget->hasAVX2())
1988         break;
1989       unsigned Opc;
1990       switch (IntNo) {
1991       default: llvm_unreachable("Impossible intrinsic");
1992       case Intrinsic::x86_avx2_gather_d_pd:     Opc = X86::VGATHERDPDrm;  break;
1993       case Intrinsic::x86_avx2_gather_d_pd_256: Opc = X86::VGATHERDPDYrm; break;
1994       case Intrinsic::x86_avx2_gather_q_pd:     Opc = X86::VGATHERQPDrm;  break;
1995       case Intrinsic::x86_avx2_gather_q_pd_256: Opc = X86::VGATHERQPDYrm; break;
1996       case Intrinsic::x86_avx2_gather_d_ps:     Opc = X86::VGATHERDPSrm;  break;
1997       case Intrinsic::x86_avx2_gather_d_ps_256: Opc = X86::VGATHERDPSYrm; break;
1998       case Intrinsic::x86_avx2_gather_q_ps:     Opc = X86::VGATHERQPSrm;  break;
1999       case Intrinsic::x86_avx2_gather_q_ps_256: Opc = X86::VGATHERQPSYrm; break;
2000       case Intrinsic::x86_avx2_gather_d_q:      Opc = X86::VPGATHERDQrm;  break;
2001       case Intrinsic::x86_avx2_gather_d_q_256:  Opc = X86::VPGATHERDQYrm; break;
2002       case Intrinsic::x86_avx2_gather_q_q:      Opc = X86::VPGATHERQQrm;  break;
2003       case Intrinsic::x86_avx2_gather_q_q_256:  Opc = X86::VPGATHERQQYrm; break;
2004       case Intrinsic::x86_avx2_gather_d_d:      Opc = X86::VPGATHERDDrm;  break;
2005       case Intrinsic::x86_avx2_gather_d_d_256:  Opc = X86::VPGATHERDDYrm; break;
2006       case Intrinsic::x86_avx2_gather_q_d:      Opc = X86::VPGATHERQDrm;  break;
2007       case Intrinsic::x86_avx2_gather_q_d_256:  Opc = X86::VPGATHERQDYrm; break;
2008       }
2009       if (tryGather(Node, Opc))
2010         return;
2011       break;
2012     }
2013     }
2014     break;
2015   }
2016   case X86ISD::GlobalBaseReg:
2017     ReplaceNode(Node, getGlobalBaseReg());
2018     return;
2019 
2020   case X86ISD::SHRUNKBLEND: {
2021     // SHRUNKBLEND selects like a regular VSELECT.
2022     SDValue VSelect = CurDAG->getNode(
2023         ISD::VSELECT, SDLoc(Node), Node->getValueType(0), Node->getOperand(0),
2024         Node->getOperand(1), Node->getOperand(2));
2025     ReplaceUses(SDValue(Node, 0), VSelect);
2026     SelectCode(VSelect.getNode());
2027     // We already called ReplaceUses.
2028     return;
2029   }
2030 
2031   case ISD::AND:
2032   case ISD::OR:
2033   case ISD::XOR: {
2034     // For operations of the form (x << C1) op C2, check if we can use a smaller
2035     // encoding for C2 by transforming it into (x op (C2>>C1)) << C1.
2036     SDValue N0 = Node->getOperand(0);
2037     SDValue N1 = Node->getOperand(1);
2038 
2039     if (N0->getOpcode() != ISD::SHL || !N0->hasOneUse())
2040       break;
2041 
2042     // i8 is unshrinkable, i16 should be promoted to i32.
2043     if (NVT != MVT::i32 && NVT != MVT::i64)
2044       break;
2045 
2046     ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
2047     ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(N0->getOperand(1));
2048     if (!Cst || !ShlCst)
2049       break;
2050 
2051     int64_t Val = Cst->getSExtValue();
2052     uint64_t ShlVal = ShlCst->getZExtValue();
2053 
2054     // Make sure that we don't change the operation by removing bits.
2055     // This only matters for OR and XOR, AND is unaffected.
2056     uint64_t RemovedBitsMask = (1ULL << ShlVal) - 1;
2057     if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0)
2058       break;
2059 
2060     unsigned ShlOp, AddOp, Op;
2061     MVT CstVT = NVT;
2062 
2063     // Check the minimum bitwidth for the new constant.
2064     // TODO: AND32ri is the same as AND64ri32 with zext imm.
2065     // TODO: MOV32ri+OR64r is cheaper than MOV64ri64+OR64rr
2066     // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32.
2067     if (!isInt<8>(Val) && isInt<8>(Val >> ShlVal))
2068       CstVT = MVT::i8;
2069     else if (!isInt<32>(Val) && isInt<32>(Val >> ShlVal))
2070       CstVT = MVT::i32;
2071 
2072     // Bail if there is no smaller encoding.
2073     if (NVT == CstVT)
2074       break;
2075 
2076     switch (NVT.SimpleTy) {
2077     default: llvm_unreachable("Unsupported VT!");
2078     case MVT::i32:
2079       assert(CstVT == MVT::i8);
2080       ShlOp = X86::SHL32ri;
2081       AddOp = X86::ADD32rr;
2082 
2083       switch (Opcode) {
2084       default: llvm_unreachable("Impossible opcode");
2085       case ISD::AND: Op = X86::AND32ri8; break;
2086       case ISD::OR:  Op =  X86::OR32ri8; break;
2087       case ISD::XOR: Op = X86::XOR32ri8; break;
2088       }
2089       break;
2090     case MVT::i64:
2091       assert(CstVT == MVT::i8 || CstVT == MVT::i32);
2092       ShlOp = X86::SHL64ri;
2093       AddOp = X86::ADD64rr;
2094 
2095       switch (Opcode) {
2096       default: llvm_unreachable("Impossible opcode");
2097       case ISD::AND: Op = CstVT==MVT::i8? X86::AND64ri8 : X86::AND64ri32; break;
2098       case ISD::OR:  Op = CstVT==MVT::i8?  X86::OR64ri8 :  X86::OR64ri32; break;
2099       case ISD::XOR: Op = CstVT==MVT::i8? X86::XOR64ri8 : X86::XOR64ri32; break;
2100       }
2101       break;
2102     }
2103 
2104     // Emit the smaller op and the shift.
2105     SDValue NewCst = CurDAG->getTargetConstant(Val >> ShlVal, dl, CstVT);
2106     SDNode *New = CurDAG->getMachineNode(Op, dl, NVT, N0->getOperand(0),NewCst);
2107     if (ShlVal == 1)
2108       CurDAG->SelectNodeTo(Node, AddOp, NVT, SDValue(New, 0),
2109                            SDValue(New, 0));
2110     else
2111       CurDAG->SelectNodeTo(Node, ShlOp, NVT, SDValue(New, 0),
2112                            getI8Imm(ShlVal, dl));
2113     return;
2114   }
2115   case X86ISD::UMUL8:
2116   case X86ISD::SMUL8: {
2117     SDValue N0 = Node->getOperand(0);
2118     SDValue N1 = Node->getOperand(1);
2119 
2120     Opc = (Opcode == X86ISD::SMUL8 ? X86::IMUL8r : X86::MUL8r);
2121 
2122     SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::AL,
2123                                           N0, SDValue()).getValue(1);
2124 
2125     SDVTList VTs = CurDAG->getVTList(NVT, MVT::i32);
2126     SDValue Ops[] = {N1, InFlag};
2127     SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
2128 
2129     ReplaceNode(Node, CNode);
2130     return;
2131   }
2132 
2133   case X86ISD::UMUL: {
2134     SDValue N0 = Node->getOperand(0);
2135     SDValue N1 = Node->getOperand(1);
2136 
2137     unsigned LoReg;
2138     switch (NVT.SimpleTy) {
2139     default: llvm_unreachable("Unsupported VT!");
2140     case MVT::i8:  LoReg = X86::AL;  Opc = X86::MUL8r; break;
2141     case MVT::i16: LoReg = X86::AX;  Opc = X86::MUL16r; break;
2142     case MVT::i32: LoReg = X86::EAX; Opc = X86::MUL32r; break;
2143     case MVT::i64: LoReg = X86::RAX; Opc = X86::MUL64r; break;
2144     }
2145 
2146     SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
2147                                           N0, SDValue()).getValue(1);
2148 
2149     SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::i32);
2150     SDValue Ops[] = {N1, InFlag};
2151     SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
2152 
2153     ReplaceNode(Node, CNode);
2154     return;
2155   }
2156 
2157   case ISD::SMUL_LOHI:
2158   case ISD::UMUL_LOHI: {
2159     SDValue N0 = Node->getOperand(0);
2160     SDValue N1 = Node->getOperand(1);
2161 
2162     bool isSigned = Opcode == ISD::SMUL_LOHI;
2163     bool hasBMI2 = Subtarget->hasBMI2();
2164     if (!isSigned) {
2165       switch (NVT.SimpleTy) {
2166       default: llvm_unreachable("Unsupported VT!");
2167       case MVT::i8:  Opc = X86::MUL8r;  MOpc = X86::MUL8m;  break;
2168       case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
2169       case MVT::i32: Opc = hasBMI2 ? X86::MULX32rr : X86::MUL32r;
2170                      MOpc = hasBMI2 ? X86::MULX32rm : X86::MUL32m; break;
2171       case MVT::i64: Opc = hasBMI2 ? X86::MULX64rr : X86::MUL64r;
2172                      MOpc = hasBMI2 ? X86::MULX64rm : X86::MUL64m; break;
2173       }
2174     } else {
2175       switch (NVT.SimpleTy) {
2176       default: llvm_unreachable("Unsupported VT!");
2177       case MVT::i8:  Opc = X86::IMUL8r;  MOpc = X86::IMUL8m;  break;
2178       case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
2179       case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
2180       case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
2181       }
2182     }
2183 
2184     unsigned SrcReg, LoReg, HiReg;
2185     switch (Opc) {
2186     default: llvm_unreachable("Unknown MUL opcode!");
2187     case X86::IMUL8r:
2188     case X86::MUL8r:
2189       SrcReg = LoReg = X86::AL; HiReg = X86::AH;
2190       break;
2191     case X86::IMUL16r:
2192     case X86::MUL16r:
2193       SrcReg = LoReg = X86::AX; HiReg = X86::DX;
2194       break;
2195     case X86::IMUL32r:
2196     case X86::MUL32r:
2197       SrcReg = LoReg = X86::EAX; HiReg = X86::EDX;
2198       break;
2199     case X86::IMUL64r:
2200     case X86::MUL64r:
2201       SrcReg = LoReg = X86::RAX; HiReg = X86::RDX;
2202       break;
2203     case X86::MULX32rr:
2204       SrcReg = X86::EDX; LoReg = HiReg = 0;
2205       break;
2206     case X86::MULX64rr:
2207       SrcReg = X86::RDX; LoReg = HiReg = 0;
2208       break;
2209     }
2210 
2211     SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
2212     bool foldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
2213     // Multiply is commmutative.
2214     if (!foldedLoad) {
2215       foldedLoad = tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
2216       if (foldedLoad)
2217         std::swap(N0, N1);
2218     }
2219 
2220     SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, SrcReg,
2221                                           N0, SDValue()).getValue(1);
2222     SDValue ResHi, ResLo;
2223 
2224     if (foldedLoad) {
2225       SDValue Chain;
2226       MachineSDNode *CNode = nullptr;
2227       SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
2228                         InFlag };
2229       if (MOpc == X86::MULX32rm || MOpc == X86::MULX64rm) {
2230         SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Other, MVT::Glue);
2231         CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
2232         ResHi = SDValue(CNode, 0);
2233         ResLo = SDValue(CNode, 1);
2234         Chain = SDValue(CNode, 2);
2235         InFlag = SDValue(CNode, 3);
2236       } else {
2237         SDVTList VTs = CurDAG->getVTList(MVT::Other, MVT::Glue);
2238         CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
2239         Chain = SDValue(CNode, 0);
2240         InFlag = SDValue(CNode, 1);
2241       }
2242 
2243       // Update the chain.
2244       ReplaceUses(N1.getValue(1), Chain);
2245       // Record the mem-refs
2246       LoadSDNode *LoadNode = cast<LoadSDNode>(N1);
2247       if (LoadNode) {
2248         MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2249         MemOp[0] = LoadNode->getMemOperand();
2250         CNode->setMemRefs(MemOp, MemOp + 1);
2251       }
2252     } else {
2253       SDValue Ops[] = { N1, InFlag };
2254       if (Opc == X86::MULX32rr || Opc == X86::MULX64rr) {
2255         SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Glue);
2256         SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
2257         ResHi = SDValue(CNode, 0);
2258         ResLo = SDValue(CNode, 1);
2259         InFlag = SDValue(CNode, 2);
2260       } else {
2261         SDVTList VTs = CurDAG->getVTList(MVT::Glue);
2262         SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
2263         InFlag = SDValue(CNode, 0);
2264       }
2265     }
2266 
2267     // Prevent use of AH in a REX instruction by referencing AX instead.
2268     if (HiReg == X86::AH && Subtarget->is64Bit() &&
2269         !SDValue(Node, 1).use_empty()) {
2270       SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2271                                               X86::AX, MVT::i16, InFlag);
2272       InFlag = Result.getValue(2);
2273       // Get the low part if needed. Don't use getCopyFromReg for aliasing
2274       // registers.
2275       if (!SDValue(Node, 0).use_empty())
2276         ReplaceUses(SDValue(Node, 1),
2277           CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2278 
2279       // Shift AX down 8 bits.
2280       Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
2281                                               Result,
2282                                      CurDAG->getTargetConstant(8, dl, MVT::i8)),
2283                        0);
2284       // Then truncate it down to i8.
2285       ReplaceUses(SDValue(Node, 1),
2286         CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2287     }
2288     // Copy the low half of the result, if it is needed.
2289     if (!SDValue(Node, 0).use_empty()) {
2290       if (!ResLo.getNode()) {
2291         assert(LoReg && "Register for low half is not defined!");
2292         ResLo = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, LoReg, NVT,
2293                                        InFlag);
2294         InFlag = ResLo.getValue(2);
2295       }
2296       ReplaceUses(SDValue(Node, 0), ResLo);
2297       DEBUG(dbgs() << "=> "; ResLo.getNode()->dump(CurDAG); dbgs() << '\n');
2298     }
2299     // Copy the high half of the result, if it is needed.
2300     if (!SDValue(Node, 1).use_empty()) {
2301       if (!ResHi.getNode()) {
2302         assert(HiReg && "Register for high half is not defined!");
2303         ResHi = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, HiReg, NVT,
2304                                        InFlag);
2305         InFlag = ResHi.getValue(2);
2306       }
2307       ReplaceUses(SDValue(Node, 1), ResHi);
2308       DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG); dbgs() << '\n');
2309     }
2310 
2311     return;
2312   }
2313 
2314   case ISD::SDIVREM:
2315   case ISD::UDIVREM:
2316   case X86ISD::SDIVREM8_SEXT_HREG:
2317   case X86ISD::UDIVREM8_ZEXT_HREG: {
2318     SDValue N0 = Node->getOperand(0);
2319     SDValue N1 = Node->getOperand(1);
2320 
2321     bool isSigned = (Opcode == ISD::SDIVREM ||
2322                      Opcode == X86ISD::SDIVREM8_SEXT_HREG);
2323     if (!isSigned) {
2324       switch (NVT.SimpleTy) {
2325       default: llvm_unreachable("Unsupported VT!");
2326       case MVT::i8:  Opc = X86::DIV8r;  MOpc = X86::DIV8m;  break;
2327       case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
2328       case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
2329       case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
2330       }
2331     } else {
2332       switch (NVT.SimpleTy) {
2333       default: llvm_unreachable("Unsupported VT!");
2334       case MVT::i8:  Opc = X86::IDIV8r;  MOpc = X86::IDIV8m;  break;
2335       case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
2336       case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
2337       case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
2338       }
2339     }
2340 
2341     unsigned LoReg, HiReg, ClrReg;
2342     unsigned SExtOpcode;
2343     switch (NVT.SimpleTy) {
2344     default: llvm_unreachable("Unsupported VT!");
2345     case MVT::i8:
2346       LoReg = X86::AL;  ClrReg = HiReg = X86::AH;
2347       SExtOpcode = X86::CBW;
2348       break;
2349     case MVT::i16:
2350       LoReg = X86::AX;  HiReg = X86::DX;
2351       ClrReg = X86::DX;
2352       SExtOpcode = X86::CWD;
2353       break;
2354     case MVT::i32:
2355       LoReg = X86::EAX; ClrReg = HiReg = X86::EDX;
2356       SExtOpcode = X86::CDQ;
2357       break;
2358     case MVT::i64:
2359       LoReg = X86::RAX; ClrReg = HiReg = X86::RDX;
2360       SExtOpcode = X86::CQO;
2361       break;
2362     }
2363 
2364     SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
2365     bool foldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
2366     bool signBitIsZero = CurDAG->SignBitIsZero(N0);
2367 
2368     SDValue InFlag;
2369     if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) {
2370       // Special case for div8, just use a move with zero extension to AX to
2371       // clear the upper 8 bits (AH).
2372       SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain;
2373       if (tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
2374         SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
2375         Move =
2376           SDValue(CurDAG->getMachineNode(X86::MOVZX32rm8, dl, MVT::i32,
2377                                          MVT::Other, Ops), 0);
2378         Chain = Move.getValue(1);
2379         ReplaceUses(N0.getValue(1), Chain);
2380       } else {
2381         Move =
2382           SDValue(CurDAG->getMachineNode(X86::MOVZX32rr8, dl, MVT::i32, N0),0);
2383         Chain = CurDAG->getEntryNode();
2384       }
2385       Chain  = CurDAG->getCopyToReg(Chain, dl, X86::EAX, Move, SDValue());
2386       InFlag = Chain.getValue(1);
2387     } else {
2388       InFlag =
2389         CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl,
2390                              LoReg, N0, SDValue()).getValue(1);
2391       if (isSigned && !signBitIsZero) {
2392         // Sign extend the low part into the high part.
2393         InFlag =
2394           SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InFlag),0);
2395       } else {
2396         // Zero out the high part, effectively zero extending the input.
2397         SDValue ClrNode = SDValue(CurDAG->getMachineNode(X86::MOV32r0, dl, NVT), 0);
2398         switch (NVT.SimpleTy) {
2399         case MVT::i16:
2400           ClrNode =
2401               SDValue(CurDAG->getMachineNode(
2402                           TargetOpcode::EXTRACT_SUBREG, dl, MVT::i16, ClrNode,
2403                           CurDAG->getTargetConstant(X86::sub_16bit, dl,
2404                                                     MVT::i32)),
2405                       0);
2406           break;
2407         case MVT::i32:
2408           break;
2409         case MVT::i64:
2410           ClrNode =
2411               SDValue(CurDAG->getMachineNode(
2412                           TargetOpcode::SUBREG_TO_REG, dl, MVT::i64,
2413                           CurDAG->getTargetConstant(0, dl, MVT::i64), ClrNode,
2414                           CurDAG->getTargetConstant(X86::sub_32bit, dl,
2415                                                     MVT::i32)),
2416                       0);
2417           break;
2418         default:
2419           llvm_unreachable("Unexpected division source");
2420         }
2421 
2422         InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg,
2423                                       ClrNode, InFlag).getValue(1);
2424       }
2425     }
2426 
2427     if (foldedLoad) {
2428       SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
2429                         InFlag };
2430       SDNode *CNode =
2431         CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops);
2432       InFlag = SDValue(CNode, 1);
2433       // Update the chain.
2434       ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
2435     } else {
2436       InFlag =
2437         SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag), 0);
2438     }
2439 
2440     // Prevent use of AH in a REX instruction by explicitly copying it to
2441     // an ABCD_L register.
2442     //
2443     // The current assumption of the register allocator is that isel
2444     // won't generate explicit references to the GR8_ABCD_H registers. If
2445     // the allocator and/or the backend get enhanced to be more robust in
2446     // that regard, this can be, and should be, removed.
2447     if (HiReg == X86::AH && !SDValue(Node, 1).use_empty()) {
2448       SDValue AHCopy = CurDAG->getRegister(X86::AH, MVT::i8);
2449       unsigned AHExtOpcode =
2450           isSigned ? X86::MOVSX32_NOREXrr8 : X86::MOVZX32_NOREXrr8;
2451 
2452       SDNode *RNode = CurDAG->getMachineNode(AHExtOpcode, dl, MVT::i32,
2453                                              MVT::Glue, AHCopy, InFlag);
2454       SDValue Result(RNode, 0);
2455       InFlag = SDValue(RNode, 1);
2456 
2457       if (Opcode == X86ISD::UDIVREM8_ZEXT_HREG ||
2458           Opcode == X86ISD::SDIVREM8_SEXT_HREG) {
2459         if (Node->getValueType(1) == MVT::i64) {
2460           // It's not possible to directly movsx AH to a 64bit register, because
2461           // the latter needs the REX prefix, but the former can't have it.
2462           assert(Opcode != X86ISD::SDIVREM8_SEXT_HREG &&
2463                  "Unexpected i64 sext of h-register");
2464           Result =
2465               SDValue(CurDAG->getMachineNode(
2466                           TargetOpcode::SUBREG_TO_REG, dl, MVT::i64,
2467                           CurDAG->getTargetConstant(0, dl, MVT::i64), Result,
2468                           CurDAG->getTargetConstant(X86::sub_32bit, dl,
2469                                                     MVT::i32)),
2470                       0);
2471         }
2472       } else {
2473         Result =
2474             CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result);
2475       }
2476       ReplaceUses(SDValue(Node, 1), Result);
2477       DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2478     }
2479     // Copy the division (low) result, if it is needed.
2480     if (!SDValue(Node, 0).use_empty()) {
2481       SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2482                                                 LoReg, NVT, InFlag);
2483       InFlag = Result.getValue(2);
2484       ReplaceUses(SDValue(Node, 0), Result);
2485       DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2486     }
2487     // Copy the remainder (high) result, if it is needed.
2488     if (!SDValue(Node, 1).use_empty()) {
2489       SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2490                                               HiReg, NVT, InFlag);
2491       InFlag = Result.getValue(2);
2492       ReplaceUses(SDValue(Node, 1), Result);
2493       DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2494     }
2495     return;
2496   }
2497 
2498   case X86ISD::CMP:
2499   case X86ISD::SUB: {
2500     // Sometimes a SUB is used to perform comparison.
2501     if (Opcode == X86ISD::SUB && Node->hasAnyUseOfValue(0))
2502       // This node is not a CMP.
2503       break;
2504     SDValue N0 = Node->getOperand(0);
2505     SDValue N1 = Node->getOperand(1);
2506 
2507     if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() &&
2508         hasNoSignedComparisonUses(Node))
2509       N0 = N0.getOperand(0);
2510 
2511     // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to
2512     // use a smaller encoding.
2513     // Look past the truncate if CMP is the only use of it.
2514     if ((N0.getNode()->getOpcode() == ISD::AND ||
2515          (N0.getResNo() == 0 && N0.getNode()->getOpcode() == X86ISD::AND)) &&
2516         N0.getNode()->hasOneUse() &&
2517         N0.getValueType() != MVT::i8 &&
2518         X86::isZeroNode(N1)) {
2519       ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getNode()->getOperand(1));
2520       if (!C) break;
2521 
2522       // For example, convert "testl %eax, $8" to "testb %al, $8"
2523       if ((C->getZExtValue() & ~UINT64_C(0xff)) == 0 &&
2524           (!(C->getZExtValue() & 0x80) ||
2525            hasNoSignedComparisonUses(Node))) {
2526         SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), dl, MVT::i8);
2527         SDValue Reg = N0.getNode()->getOperand(0);
2528 
2529         // On x86-32, only the ABCD registers have 8-bit subregisters.
2530         if (!Subtarget->is64Bit()) {
2531           const TargetRegisterClass *TRC;
2532           switch (N0.getSimpleValueType().SimpleTy) {
2533           case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
2534           case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
2535           default: llvm_unreachable("Unsupported TEST operand type!");
2536           }
2537           SDValue RC = CurDAG->getTargetConstant(TRC->getID(), dl, MVT::i32);
2538           Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl,
2539                                                Reg.getValueType(), Reg, RC), 0);
2540         }
2541 
2542         // Extract the l-register.
2543         SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl,
2544                                                         MVT::i8, Reg);
2545 
2546         // Emit a testb.
2547         SDNode *NewNode = CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32,
2548                                                  Subreg, Imm);
2549         // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
2550         // one, do not call ReplaceAllUsesWith.
2551         ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
2552                     SDValue(NewNode, 0));
2553         return;
2554       }
2555 
2556       // For example, "testl %eax, $2048" to "testb %ah, $8".
2557       if ((C->getZExtValue() & ~UINT64_C(0xff00)) == 0 &&
2558           (!(C->getZExtValue() & 0x8000) ||
2559            hasNoSignedComparisonUses(Node))) {
2560         // Shift the immediate right by 8 bits.
2561         SDValue ShiftedImm = CurDAG->getTargetConstant(C->getZExtValue() >> 8,
2562                                                        dl, MVT::i8);
2563         SDValue Reg = N0.getNode()->getOperand(0);
2564 
2565         // Put the value in an ABCD register.
2566         const TargetRegisterClass *TRC;
2567         switch (N0.getSimpleValueType().SimpleTy) {
2568         case MVT::i64: TRC = &X86::GR64_ABCDRegClass; break;
2569         case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
2570         case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
2571         default: llvm_unreachable("Unsupported TEST operand type!");
2572         }
2573         SDValue RC = CurDAG->getTargetConstant(TRC->getID(), dl, MVT::i32);
2574         Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl,
2575                                              Reg.getValueType(), Reg, RC), 0);
2576 
2577         // Extract the h-register.
2578         SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit_hi, dl,
2579                                                         MVT::i8, Reg);
2580 
2581         // Emit a testb.  The EXTRACT_SUBREG becomes a COPY that can only
2582         // target GR8_NOREX registers, so make sure the register class is
2583         // forced.
2584         SDNode *NewNode = CurDAG->getMachineNode(X86::TEST8ri_NOREX, dl,
2585                                                  MVT::i32, Subreg, ShiftedImm);
2586         // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
2587         // one, do not call ReplaceAllUsesWith.
2588         ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
2589                     SDValue(NewNode, 0));
2590         return;
2591       }
2592 
2593       // For example, "testl %eax, $32776" to "testw %ax, $32776".
2594       if ((C->getZExtValue() & ~UINT64_C(0xffff)) == 0 &&
2595           N0.getValueType() != MVT::i16 &&
2596           (!(C->getZExtValue() & 0x8000) ||
2597            hasNoSignedComparisonUses(Node))) {
2598         SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), dl,
2599                                                 MVT::i16);
2600         SDValue Reg = N0.getNode()->getOperand(0);
2601 
2602         // Extract the 16-bit subregister.
2603         SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl,
2604                                                         MVT::i16, Reg);
2605 
2606         // Emit a testw.
2607         SDNode *NewNode = CurDAG->getMachineNode(X86::TEST16ri, dl, MVT::i32,
2608                                                  Subreg, Imm);
2609         // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
2610         // one, do not call ReplaceAllUsesWith.
2611         ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
2612                     SDValue(NewNode, 0));
2613         return;
2614       }
2615 
2616       // For example, "testq %rax, $268468232" to "testl %eax, $268468232".
2617       if ((C->getZExtValue() & ~UINT64_C(0xffffffff)) == 0 &&
2618           N0.getValueType() == MVT::i64 &&
2619           (!(C->getZExtValue() & 0x80000000) ||
2620            hasNoSignedComparisonUses(Node))) {
2621         SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), dl,
2622                                                 MVT::i32);
2623         SDValue Reg = N0.getNode()->getOperand(0);
2624 
2625         // Extract the 32-bit subregister.
2626         SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_32bit, dl,
2627                                                         MVT::i32, Reg);
2628 
2629         // Emit a testl.
2630         SDNode *NewNode = CurDAG->getMachineNode(X86::TEST32ri, dl, MVT::i32,
2631                                                  Subreg, Imm);
2632         // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
2633         // one, do not call ReplaceAllUsesWith.
2634         ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
2635                     SDValue(NewNode, 0));
2636         return;
2637       }
2638     }
2639     break;
2640   }
2641   case ISD::STORE: {
2642     // Change a chain of {load; incr or dec; store} of the same value into
2643     // a simple increment or decrement through memory of that value, if the
2644     // uses of the modified value and its address are suitable.
2645     // The DEC64m tablegen pattern is currently not able to match the case where
2646     // the EFLAGS on the original DEC are used. (This also applies to
2647     // {INC,DEC}X{64,32,16,8}.)
2648     // We'll need to improve tablegen to allow flags to be transferred from a
2649     // node in the pattern to the result node.  probably with a new keyword
2650     // for example, we have this
2651     // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
2652     //  [(store (add (loadi64 addr:$dst), -1), addr:$dst),
2653     //   (implicit EFLAGS)]>;
2654     // but maybe need something like this
2655     // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
2656     //  [(store (add (loadi64 addr:$dst), -1), addr:$dst),
2657     //   (transferrable EFLAGS)]>;
2658 
2659     StoreSDNode *StoreNode = cast<StoreSDNode>(Node);
2660     SDValue StoredVal = StoreNode->getOperand(1);
2661     unsigned Opc = StoredVal->getOpcode();
2662 
2663     LoadSDNode *LoadNode = nullptr;
2664     SDValue InputChain;
2665     if (!isLoadIncOrDecStore(StoreNode, Opc, StoredVal, CurDAG,
2666                              LoadNode, InputChain))
2667       break;
2668 
2669     SDValue Base, Scale, Index, Disp, Segment;
2670     if (!selectAddr(LoadNode, LoadNode->getBasePtr(),
2671                     Base, Scale, Index, Disp, Segment))
2672       break;
2673 
2674     MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(2);
2675     MemOp[0] = StoreNode->getMemOperand();
2676     MemOp[1] = LoadNode->getMemOperand();
2677     const SDValue Ops[] = { Base, Scale, Index, Disp, Segment, InputChain };
2678     EVT LdVT = LoadNode->getMemoryVT();
2679     unsigned newOpc = getFusedLdStOpcode(LdVT, Opc);
2680     MachineSDNode *Result = CurDAG->getMachineNode(newOpc,
2681                                                    SDLoc(Node),
2682                                                    MVT::i32, MVT::Other, Ops);
2683     Result->setMemRefs(MemOp, MemOp + 2);
2684 
2685     ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1));
2686     ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0));
2687     CurDAG->RemoveDeadNode(Node);
2688     return;
2689   }
2690   }
2691 
2692   SelectCode(Node);
2693 }
2694 
2695 bool X86DAGToDAGISel::
SelectInlineAsmMemoryOperand(const SDValue & Op,unsigned ConstraintID,std::vector<SDValue> & OutOps)2696 SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
2697                              std::vector<SDValue> &OutOps) {
2698   SDValue Op0, Op1, Op2, Op3, Op4;
2699   switch (ConstraintID) {
2700   default:
2701     llvm_unreachable("Unexpected asm memory constraint");
2702   case InlineAsm::Constraint_i:
2703     // FIXME: It seems strange that 'i' is needed here since it's supposed to
2704     //        be an immediate and not a memory constraint.
2705     // Fallthrough.
2706   case InlineAsm::Constraint_o: // offsetable        ??
2707   case InlineAsm::Constraint_v: // not offsetable    ??
2708   case InlineAsm::Constraint_m: // memory
2709   case InlineAsm::Constraint_X:
2710     if (!selectAddr(nullptr, Op, Op0, Op1, Op2, Op3, Op4))
2711       return true;
2712     break;
2713   }
2714 
2715   OutOps.push_back(Op0);
2716   OutOps.push_back(Op1);
2717   OutOps.push_back(Op2);
2718   OutOps.push_back(Op3);
2719   OutOps.push_back(Op4);
2720   return false;
2721 }
2722 
2723 /// This pass converts a legalized DAG into a X86-specific DAG,
2724 /// ready for instruction scheduling.
createX86ISelDag(X86TargetMachine & TM,CodeGenOpt::Level OptLevel)2725 FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM,
2726                                      CodeGenOpt::Level OptLevel) {
2727   return new X86DAGToDAGISel(TM, OptLevel);
2728 }
2729