1 //===-- SelectionDAG.cpp - Implement the SelectionDAG data structures -----===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements the SelectionDAG class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/SelectionDAG.h"
15 #include "SDNodeDbgValue.h"
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/CodeGen/MachineBasicBlock.h"
23 #include "llvm/CodeGen/MachineConstantPool.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineModuleInfo.h"
26 #include "llvm/IR/CallingConv.h"
27 #include "llvm/IR/Constants.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/DebugInfo.h"
30 #include "llvm/IR/DerivedTypes.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/IR/GlobalAlias.h"
33 #include "llvm/IR/GlobalVariable.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/ManagedStatic.h"
39 #include "llvm/Support/MathExtras.h"
40 #include "llvm/Support/Mutex.h"
41 #include "llvm/Support/raw_ostream.h"
42 #include "llvm/Target/TargetInstrInfo.h"
43 #include "llvm/Target/TargetIntrinsicInfo.h"
44 #include "llvm/Target/TargetLowering.h"
45 #include "llvm/Target/TargetMachine.h"
46 #include "llvm/Target/TargetOptions.h"
47 #include "llvm/Target/TargetRegisterInfo.h"
48 #include "llvm/Target/TargetSelectionDAGInfo.h"
49 #include "llvm/Target/TargetSubtargetInfo.h"
50 #include <algorithm>
51 #include <cmath>
52 
53 using namespace llvm;
54 
55 /// makeVTList - Return an instance of the SDVTList struct initialized with the
56 /// specified members.
makeVTList(const EVT * VTs,unsigned NumVTs)57 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
58   SDVTList Res = {VTs, NumVTs};
59   return Res;
60 }
61 
62 // Default null implementations of the callbacks.
NodeDeleted(SDNode *,SDNode *)63 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
NodeUpdated(SDNode *)64 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
65 
66 //===----------------------------------------------------------------------===//
67 //                              ConstantFPSDNode Class
68 //===----------------------------------------------------------------------===//
69 
70 /// isExactlyValue - We don't rely on operator== working on double values, as
71 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
72 /// As such, this method can be used to do an exact bit-for-bit comparison of
73 /// two floating point values.
isExactlyValue(const APFloat & V) const74 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
75   return getValueAPF().bitwiseIsEqual(V);
76 }
77 
isValueValidForType(EVT VT,const APFloat & Val)78 bool ConstantFPSDNode::isValueValidForType(EVT VT,
79                                            const APFloat& Val) {
80   assert(VT.isFloatingPoint() && "Can only convert between FP types");
81 
82   // convert modifies in place, so make a copy.
83   APFloat Val2 = APFloat(Val);
84   bool losesInfo;
85   (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT),
86                       APFloat::rmNearestTiesToEven,
87                       &losesInfo);
88   return !losesInfo;
89 }
90 
91 //===----------------------------------------------------------------------===//
92 //                              ISD Namespace
93 //===----------------------------------------------------------------------===//
94 
95 /// isBuildVectorAllOnes - Return true if the specified node is a
96 /// BUILD_VECTOR where all of the elements are ~0 or undef.
isBuildVectorAllOnes(const SDNode * N)97 bool ISD::isBuildVectorAllOnes(const SDNode *N) {
98   // Look through a bit convert.
99   while (N->getOpcode() == ISD::BITCAST)
100     N = N->getOperand(0).getNode();
101 
102   if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
103 
104   unsigned i = 0, e = N->getNumOperands();
105 
106   // Skip over all of the undef values.
107   while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF)
108     ++i;
109 
110   // Do not accept an all-undef vector.
111   if (i == e) return false;
112 
113   // Do not accept build_vectors that aren't all constants or which have non-~0
114   // elements. We have to be a bit careful here, as the type of the constant
115   // may not be the same as the type of the vector elements due to type
116   // legalization (the elements are promoted to a legal type for the target and
117   // a vector of a type may be legal when the base element type is not).
118   // We only want to check enough bits to cover the vector elements, because
119   // we care if the resultant vector is all ones, not whether the individual
120   // constants are.
121   SDValue NotZero = N->getOperand(i);
122   unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
123   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
124     if (CN->getAPIntValue().countTrailingOnes() < EltSize)
125       return false;
126   } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
127     if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize)
128       return false;
129   } else
130     return false;
131 
132   // Okay, we have at least one ~0 value, check to see if the rest match or are
133   // undefs. Even with the above element type twiddling, this should be OK, as
134   // the same type legalization should have applied to all the elements.
135   for (++i; i != e; ++i)
136     if (N->getOperand(i) != NotZero &&
137         N->getOperand(i).getOpcode() != ISD::UNDEF)
138       return false;
139   return true;
140 }
141 
142 
143 /// isBuildVectorAllZeros - Return true if the specified node is a
144 /// BUILD_VECTOR where all of the elements are 0 or undef.
isBuildVectorAllZeros(const SDNode * N)145 bool ISD::isBuildVectorAllZeros(const SDNode *N) {
146   // Look through a bit convert.
147   while (N->getOpcode() == ISD::BITCAST)
148     N = N->getOperand(0).getNode();
149 
150   if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
151 
152   bool IsAllUndef = true;
153   for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i) {
154     if (N->getOperand(i).getOpcode() == ISD::UNDEF)
155       continue;
156     IsAllUndef = false;
157     // Do not accept build_vectors that aren't all constants or which have non-0
158     // elements. We have to be a bit careful here, as the type of the constant
159     // may not be the same as the type of the vector elements due to type
160     // legalization (the elements are promoted to a legal type for the target
161     // and a vector of a type may be legal when the base element type is not).
162     // We only want to check enough bits to cover the vector elements, because
163     // we care if the resultant vector is all zeros, not whether the individual
164     // constants are.
165     SDValue Zero = N->getOperand(i);
166     unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
167     if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Zero)) {
168       if (CN->getAPIntValue().countTrailingZeros() < EltSize)
169         return false;
170     } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Zero)) {
171       if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize)
172         return false;
173     } else
174       return false;
175   }
176 
177   // Do not accept an all-undef vector.
178   if (IsAllUndef)
179     return false;
180   return true;
181 }
182 
183 /// \brief Return true if the specified node is a BUILD_VECTOR node of
184 /// all ConstantSDNode or undef.
isBuildVectorOfConstantSDNodes(const SDNode * N)185 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) {
186   if (N->getOpcode() != ISD::BUILD_VECTOR)
187     return false;
188 
189   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
190     SDValue Op = N->getOperand(i);
191     if (Op.getOpcode() == ISD::UNDEF)
192       continue;
193     if (!isa<ConstantSDNode>(Op))
194       return false;
195   }
196   return true;
197 }
198 
199 /// \brief Return true if the specified node is a BUILD_VECTOR node of
200 /// all ConstantFPSDNode or undef.
isBuildVectorOfConstantFPSDNodes(const SDNode * N)201 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) {
202   if (N->getOpcode() != ISD::BUILD_VECTOR)
203     return false;
204 
205   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
206     SDValue Op = N->getOperand(i);
207     if (Op.getOpcode() == ISD::UNDEF)
208       continue;
209     if (!isa<ConstantFPSDNode>(Op))
210       return false;
211   }
212   return true;
213 }
214 
215 /// isScalarToVector - Return true if the specified node is a
216 /// ISD::SCALAR_TO_VECTOR node or a BUILD_VECTOR node where only the low
217 /// element is not an undef.
isScalarToVector(const SDNode * N)218 bool ISD::isScalarToVector(const SDNode *N) {
219   if (N->getOpcode() == ISD::SCALAR_TO_VECTOR)
220     return true;
221 
222   if (N->getOpcode() != ISD::BUILD_VECTOR)
223     return false;
224   if (N->getOperand(0).getOpcode() == ISD::UNDEF)
225     return false;
226   unsigned NumElems = N->getNumOperands();
227   if (NumElems == 1)
228     return false;
229   for (unsigned i = 1; i < NumElems; ++i) {
230     SDValue V = N->getOperand(i);
231     if (V.getOpcode() != ISD::UNDEF)
232       return false;
233   }
234   return true;
235 }
236 
237 /// allOperandsUndef - Return true if the node has at least one operand
238 /// and all operands of the specified node are ISD::UNDEF.
allOperandsUndef(const SDNode * N)239 bool ISD::allOperandsUndef(const SDNode *N) {
240   // Return false if the node has no operands.
241   // This is "logically inconsistent" with the definition of "all" but
242   // is probably the desired behavior.
243   if (N->getNumOperands() == 0)
244     return false;
245 
246   for (unsigned i = 0, e = N->getNumOperands(); i != e ; ++i)
247     if (N->getOperand(i).getOpcode() != ISD::UNDEF)
248       return false;
249 
250   return true;
251 }
252 
getExtForLoadExtType(bool IsFP,ISD::LoadExtType ExtType)253 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) {
254   switch (ExtType) {
255   case ISD::EXTLOAD:
256     return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND;
257   case ISD::SEXTLOAD:
258     return ISD::SIGN_EXTEND;
259   case ISD::ZEXTLOAD:
260     return ISD::ZERO_EXTEND;
261   default:
262     break;
263   }
264 
265   llvm_unreachable("Invalid LoadExtType");
266 }
267 
268 /// getSetCCSwappedOperands - Return the operation corresponding to (Y op X)
269 /// when given the operation for (X op Y).
getSetCCSwappedOperands(ISD::CondCode Operation)270 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
271   // To perform this operation, we just need to swap the L and G bits of the
272   // operation.
273   unsigned OldL = (Operation >> 2) & 1;
274   unsigned OldG = (Operation >> 1) & 1;
275   return ISD::CondCode((Operation & ~6) |  // Keep the N, U, E bits
276                        (OldL << 1) |       // New G bit
277                        (OldG << 2));       // New L bit.
278 }
279 
280 /// getSetCCInverse - Return the operation corresponding to !(X op Y), where
281 /// 'op' is a valid SetCC operation.
getSetCCInverse(ISD::CondCode Op,bool isInteger)282 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) {
283   unsigned Operation = Op;
284   if (isInteger)
285     Operation ^= 7;   // Flip L, G, E bits, but not U.
286   else
287     Operation ^= 15;  // Flip all of the condition bits.
288 
289   if (Operation > ISD::SETTRUE2)
290     Operation &= ~8;  // Don't let N and U bits get set.
291 
292   return ISD::CondCode(Operation);
293 }
294 
295 
296 /// isSignedOp - For an integer comparison, return 1 if the comparison is a
297 /// signed operation and 2 if the result is an unsigned comparison.  Return zero
298 /// if the operation does not depend on the sign of the input (setne and seteq).
isSignedOp(ISD::CondCode Opcode)299 static int isSignedOp(ISD::CondCode Opcode) {
300   switch (Opcode) {
301   default: llvm_unreachable("Illegal integer setcc operation!");
302   case ISD::SETEQ:
303   case ISD::SETNE: return 0;
304   case ISD::SETLT:
305   case ISD::SETLE:
306   case ISD::SETGT:
307   case ISD::SETGE: return 1;
308   case ISD::SETULT:
309   case ISD::SETULE:
310   case ISD::SETUGT:
311   case ISD::SETUGE: return 2;
312   }
313 }
314 
315 /// getSetCCOrOperation - Return the result of a logical OR between different
316 /// comparisons of identical values: ((X op1 Y) | (X op2 Y)).  This function
317 /// returns SETCC_INVALID if it is not possible to represent the resultant
318 /// comparison.
getSetCCOrOperation(ISD::CondCode Op1,ISD::CondCode Op2,bool isInteger)319 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
320                                        bool isInteger) {
321   if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
322     // Cannot fold a signed integer setcc with an unsigned integer setcc.
323     return ISD::SETCC_INVALID;
324 
325   unsigned Op = Op1 | Op2;  // Combine all of the condition bits.
326 
327   // If the N and U bits get set then the resultant comparison DOES suddenly
328   // care about orderedness, and is true when ordered.
329   if (Op > ISD::SETTRUE2)
330     Op &= ~16;     // Clear the U bit if the N bit is set.
331 
332   // Canonicalize illegal integer setcc's.
333   if (isInteger && Op == ISD::SETUNE)  // e.g. SETUGT | SETULT
334     Op = ISD::SETNE;
335 
336   return ISD::CondCode(Op);
337 }
338 
339 /// getSetCCAndOperation - Return the result of a logical AND between different
340 /// comparisons of identical values: ((X op1 Y) & (X op2 Y)).  This
341 /// function returns zero if it is not possible to represent the resultant
342 /// comparison.
getSetCCAndOperation(ISD::CondCode Op1,ISD::CondCode Op2,bool isInteger)343 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
344                                         bool isInteger) {
345   if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
346     // Cannot fold a signed setcc with an unsigned setcc.
347     return ISD::SETCC_INVALID;
348 
349   // Combine all of the condition bits.
350   ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
351 
352   // Canonicalize illegal integer setcc's.
353   if (isInteger) {
354     switch (Result) {
355     default: break;
356     case ISD::SETUO : Result = ISD::SETFALSE; break;  // SETUGT & SETULT
357     case ISD::SETOEQ:                                 // SETEQ  & SETU[LG]E
358     case ISD::SETUEQ: Result = ISD::SETEQ   ; break;  // SETUGE & SETULE
359     case ISD::SETOLT: Result = ISD::SETULT  ; break;  // SETULT & SETNE
360     case ISD::SETOGT: Result = ISD::SETUGT  ; break;  // SETUGT & SETNE
361     }
362   }
363 
364   return Result;
365 }
366 
367 //===----------------------------------------------------------------------===//
368 //                           SDNode Profile Support
369 //===----------------------------------------------------------------------===//
370 
371 /// AddNodeIDOpcode - Add the node opcode to the NodeID data.
372 ///
AddNodeIDOpcode(FoldingSetNodeID & ID,unsigned OpC)373 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC)  {
374   ID.AddInteger(OpC);
375 }
376 
377 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
378 /// solely with their pointer.
AddNodeIDValueTypes(FoldingSetNodeID & ID,SDVTList VTList)379 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
380   ID.AddPointer(VTList.VTs);
381 }
382 
383 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
384 ///
AddNodeIDOperands(FoldingSetNodeID & ID,ArrayRef<SDValue> Ops)385 static void AddNodeIDOperands(FoldingSetNodeID &ID,
386                               ArrayRef<SDValue> Ops) {
387   for (auto& Op : Ops) {
388     ID.AddPointer(Op.getNode());
389     ID.AddInteger(Op.getResNo());
390   }
391 }
392 
393 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
394 ///
AddNodeIDOperands(FoldingSetNodeID & ID,ArrayRef<SDUse> Ops)395 static void AddNodeIDOperands(FoldingSetNodeID &ID,
396                               ArrayRef<SDUse> Ops) {
397   for (auto& Op : Ops) {
398     ID.AddPointer(Op.getNode());
399     ID.AddInteger(Op.getResNo());
400   }
401 }
402 
AddBinaryNodeIDCustom(FoldingSetNodeID & ID,bool nuw,bool nsw,bool exact)403 static void AddBinaryNodeIDCustom(FoldingSetNodeID &ID, bool nuw, bool nsw,
404                                   bool exact) {
405   ID.AddBoolean(nuw);
406   ID.AddBoolean(nsw);
407   ID.AddBoolean(exact);
408 }
409 
410 /// AddBinaryNodeIDCustom - Add BinarySDNodes special infos
AddBinaryNodeIDCustom(FoldingSetNodeID & ID,unsigned Opcode,bool nuw,bool nsw,bool exact)411 static void AddBinaryNodeIDCustom(FoldingSetNodeID &ID, unsigned Opcode,
412                                   bool nuw, bool nsw, bool exact) {
413   if (isBinOpWithFlags(Opcode))
414     AddBinaryNodeIDCustom(ID, nuw, nsw, exact);
415 }
416 
AddNodeIDNode(FoldingSetNodeID & ID,unsigned short OpC,SDVTList VTList,ArrayRef<SDValue> OpList)417 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC,
418                           SDVTList VTList, ArrayRef<SDValue> OpList) {
419   AddNodeIDOpcode(ID, OpC);
420   AddNodeIDValueTypes(ID, VTList);
421   AddNodeIDOperands(ID, OpList);
422 }
423 
424 /// AddNodeIDCustom - If this is an SDNode with special info, add this info to
425 /// the NodeID data.
AddNodeIDCustom(FoldingSetNodeID & ID,const SDNode * N)426 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
427   switch (N->getOpcode()) {
428   case ISD::TargetExternalSymbol:
429   case ISD::ExternalSymbol:
430     llvm_unreachable("Should only be used on nodes with operands");
431   default: break;  // Normal nodes don't need extra info.
432   case ISD::TargetConstant:
433   case ISD::Constant: {
434     const ConstantSDNode *C = cast<ConstantSDNode>(N);
435     ID.AddPointer(C->getConstantIntValue());
436     ID.AddBoolean(C->isOpaque());
437     break;
438   }
439   case ISD::TargetConstantFP:
440   case ISD::ConstantFP: {
441     ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
442     break;
443   }
444   case ISD::TargetGlobalAddress:
445   case ISD::GlobalAddress:
446   case ISD::TargetGlobalTLSAddress:
447   case ISD::GlobalTLSAddress: {
448     const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
449     ID.AddPointer(GA->getGlobal());
450     ID.AddInteger(GA->getOffset());
451     ID.AddInteger(GA->getTargetFlags());
452     ID.AddInteger(GA->getAddressSpace());
453     break;
454   }
455   case ISD::BasicBlock:
456     ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
457     break;
458   case ISD::Register:
459     ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
460     break;
461   case ISD::RegisterMask:
462     ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
463     break;
464   case ISD::SRCVALUE:
465     ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
466     break;
467   case ISD::FrameIndex:
468   case ISD::TargetFrameIndex:
469     ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
470     break;
471   case ISD::JumpTable:
472   case ISD::TargetJumpTable:
473     ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
474     ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
475     break;
476   case ISD::ConstantPool:
477   case ISD::TargetConstantPool: {
478     const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
479     ID.AddInteger(CP->getAlignment());
480     ID.AddInteger(CP->getOffset());
481     if (CP->isMachineConstantPoolEntry())
482       CP->getMachineCPVal()->addSelectionDAGCSEId(ID);
483     else
484       ID.AddPointer(CP->getConstVal());
485     ID.AddInteger(CP->getTargetFlags());
486     break;
487   }
488   case ISD::TargetIndex: {
489     const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
490     ID.AddInteger(TI->getIndex());
491     ID.AddInteger(TI->getOffset());
492     ID.AddInteger(TI->getTargetFlags());
493     break;
494   }
495   case ISD::LOAD: {
496     const LoadSDNode *LD = cast<LoadSDNode>(N);
497     ID.AddInteger(LD->getMemoryVT().getRawBits());
498     ID.AddInteger(LD->getRawSubclassData());
499     ID.AddInteger(LD->getPointerInfo().getAddrSpace());
500     break;
501   }
502   case ISD::STORE: {
503     const StoreSDNode *ST = cast<StoreSDNode>(N);
504     ID.AddInteger(ST->getMemoryVT().getRawBits());
505     ID.AddInteger(ST->getRawSubclassData());
506     ID.AddInteger(ST->getPointerInfo().getAddrSpace());
507     break;
508   }
509   case ISD::SDIV:
510   case ISD::UDIV:
511   case ISD::SRA:
512   case ISD::SRL:
513   case ISD::MUL:
514   case ISD::ADD:
515   case ISD::SUB:
516   case ISD::SHL: {
517     const BinaryWithFlagsSDNode *BinNode = cast<BinaryWithFlagsSDNode>(N);
518     AddBinaryNodeIDCustom(ID, N->getOpcode(), BinNode->hasNoUnsignedWrap(),
519                           BinNode->hasNoSignedWrap(), BinNode->isExact());
520     break;
521   }
522   case ISD::ATOMIC_CMP_SWAP:
523   case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
524   case ISD::ATOMIC_SWAP:
525   case ISD::ATOMIC_LOAD_ADD:
526   case ISD::ATOMIC_LOAD_SUB:
527   case ISD::ATOMIC_LOAD_AND:
528   case ISD::ATOMIC_LOAD_OR:
529   case ISD::ATOMIC_LOAD_XOR:
530   case ISD::ATOMIC_LOAD_NAND:
531   case ISD::ATOMIC_LOAD_MIN:
532   case ISD::ATOMIC_LOAD_MAX:
533   case ISD::ATOMIC_LOAD_UMIN:
534   case ISD::ATOMIC_LOAD_UMAX:
535   case ISD::ATOMIC_LOAD:
536   case ISD::ATOMIC_STORE: {
537     const AtomicSDNode *AT = cast<AtomicSDNode>(N);
538     ID.AddInteger(AT->getMemoryVT().getRawBits());
539     ID.AddInteger(AT->getRawSubclassData());
540     ID.AddInteger(AT->getPointerInfo().getAddrSpace());
541     break;
542   }
543   case ISD::PREFETCH: {
544     const MemSDNode *PF = cast<MemSDNode>(N);
545     ID.AddInteger(PF->getPointerInfo().getAddrSpace());
546     break;
547   }
548   case ISD::VECTOR_SHUFFLE: {
549     const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
550     for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
551          i != e; ++i)
552       ID.AddInteger(SVN->getMaskElt(i));
553     break;
554   }
555   case ISD::TargetBlockAddress:
556   case ISD::BlockAddress: {
557     const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
558     ID.AddPointer(BA->getBlockAddress());
559     ID.AddInteger(BA->getOffset());
560     ID.AddInteger(BA->getTargetFlags());
561     break;
562   }
563   } // end switch (N->getOpcode())
564 
565   // Target specific memory nodes could also have address spaces to check.
566   if (N->isTargetMemoryOpcode())
567     ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
568 }
569 
570 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
571 /// data.
AddNodeIDNode(FoldingSetNodeID & ID,const SDNode * N)572 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
573   AddNodeIDOpcode(ID, N->getOpcode());
574   // Add the return value info.
575   AddNodeIDValueTypes(ID, N->getVTList());
576   // Add the operand info.
577   AddNodeIDOperands(ID, N->ops());
578 
579   // Handle SDNode leafs with special info.
580   AddNodeIDCustom(ID, N);
581 }
582 
583 /// encodeMemSDNodeFlags - Generic routine for computing a value for use in
584 /// the CSE map that carries volatility, temporalness, indexing mode, and
585 /// extension/truncation information.
586 ///
587 static inline unsigned
encodeMemSDNodeFlags(int ConvType,ISD::MemIndexedMode AM,bool isVolatile,bool isNonTemporal,bool isInvariant)588 encodeMemSDNodeFlags(int ConvType, ISD::MemIndexedMode AM, bool isVolatile,
589                      bool isNonTemporal, bool isInvariant) {
590   assert((ConvType & 3) == ConvType &&
591          "ConvType may not require more than 2 bits!");
592   assert((AM & 7) == AM &&
593          "AM may not require more than 3 bits!");
594   return ConvType |
595          (AM << 2) |
596          (isVolatile << 5) |
597          (isNonTemporal << 6) |
598          (isInvariant << 7);
599 }
600 
601 //===----------------------------------------------------------------------===//
602 //                              SelectionDAG Class
603 //===----------------------------------------------------------------------===//
604 
605 /// doNotCSE - Return true if CSE should not be performed for this node.
doNotCSE(SDNode * N)606 static bool doNotCSE(SDNode *N) {
607   if (N->getValueType(0) == MVT::Glue)
608     return true; // Never CSE anything that produces a flag.
609 
610   switch (N->getOpcode()) {
611   default: break;
612   case ISD::HANDLENODE:
613   case ISD::EH_LABEL:
614     return true;   // Never CSE these nodes.
615   }
616 
617   // Check that remaining values produced are not flags.
618   for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
619     if (N->getValueType(i) == MVT::Glue)
620       return true; // Never CSE anything that produces a flag.
621 
622   return false;
623 }
624 
625 /// RemoveDeadNodes - This method deletes all unreachable nodes in the
626 /// SelectionDAG.
RemoveDeadNodes()627 void SelectionDAG::RemoveDeadNodes() {
628   // Create a dummy node (which is not added to allnodes), that adds a reference
629   // to the root node, preventing it from being deleted.
630   HandleSDNode Dummy(getRoot());
631 
632   SmallVector<SDNode*, 128> DeadNodes;
633 
634   // Add all obviously-dead nodes to the DeadNodes worklist.
635   for (allnodes_iterator I = allnodes_begin(), E = allnodes_end(); I != E; ++I)
636     if (I->use_empty())
637       DeadNodes.push_back(I);
638 
639   RemoveDeadNodes(DeadNodes);
640 
641   // If the root changed (e.g. it was a dead load, update the root).
642   setRoot(Dummy.getValue());
643 }
644 
645 /// RemoveDeadNodes - This method deletes the unreachable nodes in the
646 /// given list, and any nodes that become unreachable as a result.
RemoveDeadNodes(SmallVectorImpl<SDNode * > & DeadNodes)647 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
648 
649   // Process the worklist, deleting the nodes and adding their uses to the
650   // worklist.
651   while (!DeadNodes.empty()) {
652     SDNode *N = DeadNodes.pop_back_val();
653 
654     for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
655       DUL->NodeDeleted(N, nullptr);
656 
657     // Take the node out of the appropriate CSE map.
658     RemoveNodeFromCSEMaps(N);
659 
660     // Next, brutally remove the operand list.  This is safe to do, as there are
661     // no cycles in the graph.
662     for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
663       SDUse &Use = *I++;
664       SDNode *Operand = Use.getNode();
665       Use.set(SDValue());
666 
667       // Now that we removed this operand, see if there are no uses of it left.
668       if (Operand->use_empty())
669         DeadNodes.push_back(Operand);
670     }
671 
672     DeallocateNode(N);
673   }
674 }
675 
RemoveDeadNode(SDNode * N)676 void SelectionDAG::RemoveDeadNode(SDNode *N){
677   SmallVector<SDNode*, 16> DeadNodes(1, N);
678 
679   // Create a dummy node that adds a reference to the root node, preventing
680   // it from being deleted.  (This matters if the root is an operand of the
681   // dead node.)
682   HandleSDNode Dummy(getRoot());
683 
684   RemoveDeadNodes(DeadNodes);
685 }
686 
DeleteNode(SDNode * N)687 void SelectionDAG::DeleteNode(SDNode *N) {
688   // First take this out of the appropriate CSE map.
689   RemoveNodeFromCSEMaps(N);
690 
691   // Finally, remove uses due to operands of this node, remove from the
692   // AllNodes list, and delete the node.
693   DeleteNodeNotInCSEMaps(N);
694 }
695 
DeleteNodeNotInCSEMaps(SDNode * N)696 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
697   assert(N != AllNodes.begin() && "Cannot delete the entry node!");
698   assert(N->use_empty() && "Cannot delete a node that is not dead!");
699 
700   // Drop all of the operands and decrement used node's use counts.
701   N->DropOperands();
702 
703   DeallocateNode(N);
704 }
705 
erase(const SDNode * Node)706 void SDDbgInfo::erase(const SDNode *Node) {
707   DbgValMapType::iterator I = DbgValMap.find(Node);
708   if (I == DbgValMap.end())
709     return;
710   for (auto &Val: I->second)
711     Val->setIsInvalidated();
712   DbgValMap.erase(I);
713 }
714 
DeallocateNode(SDNode * N)715 void SelectionDAG::DeallocateNode(SDNode *N) {
716   if (N->OperandsNeedDelete)
717     delete[] N->OperandList;
718 
719   // Set the opcode to DELETED_NODE to help catch bugs when node
720   // memory is reallocated.
721   N->NodeType = ISD::DELETED_NODE;
722 
723   NodeAllocator.Deallocate(AllNodes.remove(N));
724 
725   // If any of the SDDbgValue nodes refer to this SDNode, invalidate
726   // them and forget about that node.
727   DbgInfo->erase(N);
728 }
729 
730 #ifndef NDEBUG
731 /// VerifySDNode - Sanity check the given SDNode.  Aborts if it is invalid.
VerifySDNode(SDNode * N)732 static void VerifySDNode(SDNode *N) {
733   switch (N->getOpcode()) {
734   default:
735     break;
736   case ISD::BUILD_PAIR: {
737     EVT VT = N->getValueType(0);
738     assert(N->getNumValues() == 1 && "Too many results!");
739     assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
740            "Wrong return type!");
741     assert(N->getNumOperands() == 2 && "Wrong number of operands!");
742     assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
743            "Mismatched operand types!");
744     assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
745            "Wrong operand type!");
746     assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
747            "Wrong return type size");
748     break;
749   }
750   case ISD::BUILD_VECTOR: {
751     assert(N->getNumValues() == 1 && "Too many results!");
752     assert(N->getValueType(0).isVector() && "Wrong return type!");
753     assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
754            "Wrong number of operands!");
755     EVT EltVT = N->getValueType(0).getVectorElementType();
756     for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) {
757       assert((I->getValueType() == EltVT ||
758              (EltVT.isInteger() && I->getValueType().isInteger() &&
759               EltVT.bitsLE(I->getValueType()))) &&
760             "Wrong operand type!");
761       assert(I->getValueType() == N->getOperand(0).getValueType() &&
762              "Operands must all have the same type");
763     }
764     break;
765   }
766   }
767 }
768 #endif // NDEBUG
769 
770 /// \brief Insert a newly allocated node into the DAG.
771 ///
772 /// Handles insertion into the all nodes list and CSE map, as well as
773 /// verification and other common operations when a new node is allocated.
InsertNode(SDNode * N)774 void SelectionDAG::InsertNode(SDNode *N) {
775   AllNodes.push_back(N);
776 #ifndef NDEBUG
777   VerifySDNode(N);
778 #endif
779 }
780 
781 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
782 /// correspond to it.  This is useful when we're about to delete or repurpose
783 /// the node.  We don't want future request for structurally identical nodes
784 /// to return N anymore.
RemoveNodeFromCSEMaps(SDNode * N)785 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
786   bool Erased = false;
787   switch (N->getOpcode()) {
788   case ISD::HANDLENODE: return false;  // noop.
789   case ISD::CONDCODE:
790     assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
791            "Cond code doesn't exist!");
792     Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr;
793     CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr;
794     break;
795   case ISD::ExternalSymbol:
796     Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
797     break;
798   case ISD::TargetExternalSymbol: {
799     ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
800     Erased = TargetExternalSymbols.erase(
801                std::pair<std::string,unsigned char>(ESN->getSymbol(),
802                                                     ESN->getTargetFlags()));
803     break;
804   }
805   case ISD::VALUETYPE: {
806     EVT VT = cast<VTSDNode>(N)->getVT();
807     if (VT.isExtended()) {
808       Erased = ExtendedValueTypeNodes.erase(VT);
809     } else {
810       Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr;
811       ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr;
812     }
813     break;
814   }
815   default:
816     // Remove it from the CSE Map.
817     assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
818     assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
819     Erased = CSEMap.RemoveNode(N);
820     break;
821   }
822 #ifndef NDEBUG
823   // Verify that the node was actually in one of the CSE maps, unless it has a
824   // flag result (which cannot be CSE'd) or is one of the special cases that are
825   // not subject to CSE.
826   if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
827       !N->isMachineOpcode() && !doNotCSE(N)) {
828     N->dump(this);
829     dbgs() << "\n";
830     llvm_unreachable("Node is not in map!");
831   }
832 #endif
833   return Erased;
834 }
835 
836 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
837 /// maps and modified in place. Add it back to the CSE maps, unless an identical
838 /// node already exists, in which case transfer all its users to the existing
839 /// node. This transfer can potentially trigger recursive merging.
840 ///
841 void
AddModifiedNodeToCSEMaps(SDNode * N)842 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
843   // For node types that aren't CSE'd, just act as if no identical node
844   // already exists.
845   if (!doNotCSE(N)) {
846     SDNode *Existing = CSEMap.GetOrInsertNode(N);
847     if (Existing != N) {
848       // If there was already an existing matching node, use ReplaceAllUsesWith
849       // to replace the dead one with the existing one.  This can cause
850       // recursive merging of other unrelated nodes down the line.
851       ReplaceAllUsesWith(N, Existing);
852 
853       // N is now dead. Inform the listeners and delete it.
854       for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
855         DUL->NodeDeleted(N, Existing);
856       DeleteNodeNotInCSEMaps(N);
857       return;
858     }
859   }
860 
861   // If the node doesn't already exist, we updated it.  Inform listeners.
862   for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
863     DUL->NodeUpdated(N);
864 }
865 
866 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
867 /// were replaced with those specified.  If this node is never memoized,
868 /// return null, otherwise return a pointer to the slot it would take.  If a
869 /// node already exists with these operands, the slot will be non-null.
FindModifiedNodeSlot(SDNode * N,SDValue Op,void * & InsertPos)870 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
871                                            void *&InsertPos) {
872   if (doNotCSE(N))
873     return nullptr;
874 
875   SDValue Ops[] = { Op };
876   FoldingSetNodeID ID;
877   AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
878   AddNodeIDCustom(ID, N);
879   SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
880   return Node;
881 }
882 
883 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
884 /// were replaced with those specified.  If this node is never memoized,
885 /// return null, otherwise return a pointer to the slot it would take.  If a
886 /// node already exists with these operands, the slot will be non-null.
FindModifiedNodeSlot(SDNode * N,SDValue Op1,SDValue Op2,void * & InsertPos)887 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
888                                            SDValue Op1, SDValue Op2,
889                                            void *&InsertPos) {
890   if (doNotCSE(N))
891     return nullptr;
892 
893   SDValue Ops[] = { Op1, Op2 };
894   FoldingSetNodeID ID;
895   AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
896   AddNodeIDCustom(ID, N);
897   SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
898   return Node;
899 }
900 
901 
902 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
903 /// were replaced with those specified.  If this node is never memoized,
904 /// return null, otherwise return a pointer to the slot it would take.  If a
905 /// node already exists with these operands, the slot will be non-null.
FindModifiedNodeSlot(SDNode * N,ArrayRef<SDValue> Ops,void * & InsertPos)906 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
907                                            void *&InsertPos) {
908   if (doNotCSE(N))
909     return nullptr;
910 
911   FoldingSetNodeID ID;
912   AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
913   AddNodeIDCustom(ID, N);
914   SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
915   return Node;
916 }
917 
918 /// getEVTAlignment - Compute the default alignment value for the
919 /// given type.
920 ///
getEVTAlignment(EVT VT) const921 unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
922   Type *Ty = VT == MVT::iPTR ?
923                    PointerType::get(Type::getInt8Ty(*getContext()), 0) :
924                    VT.getTypeForEVT(*getContext());
925 
926   return TLI->getDataLayout()->getABITypeAlignment(Ty);
927 }
928 
929 // EntryNode could meaningfully have debug info if we can find it...
SelectionDAG(const TargetMachine & tm,CodeGenOpt::Level OL)930 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
931     : TM(tm), TSI(nullptr), TLI(nullptr), OptLevel(OL),
932       EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
933       Root(getEntryNode()), NewNodesMustHaveLegalTypes(false),
934       UpdateListeners(nullptr) {
935   AllNodes.push_back(&EntryNode);
936   DbgInfo = new SDDbgInfo();
937 }
938 
init(MachineFunction & mf)939 void SelectionDAG::init(MachineFunction &mf) {
940   MF = &mf;
941   TLI = getSubtarget().getTargetLowering();
942   TSI = getSubtarget().getSelectionDAGInfo();
943   Context = &mf.getFunction()->getContext();
944 }
945 
~SelectionDAG()946 SelectionDAG::~SelectionDAG() {
947   assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
948   allnodes_clear();
949   delete DbgInfo;
950 }
951 
allnodes_clear()952 void SelectionDAG::allnodes_clear() {
953   assert(&*AllNodes.begin() == &EntryNode);
954   AllNodes.remove(AllNodes.begin());
955   while (!AllNodes.empty())
956     DeallocateNode(AllNodes.begin());
957 }
958 
GetBinarySDNode(unsigned Opcode,SDLoc DL,SDVTList VTs,SDValue N1,SDValue N2,bool nuw,bool nsw,bool exact)959 BinarySDNode *SelectionDAG::GetBinarySDNode(unsigned Opcode, SDLoc DL,
960                                             SDVTList VTs, SDValue N1,
961                                             SDValue N2, bool nuw, bool nsw,
962                                             bool exact) {
963   if (isBinOpWithFlags(Opcode)) {
964     BinaryWithFlagsSDNode *FN = new (NodeAllocator) BinaryWithFlagsSDNode(
965         Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, N1, N2);
966     FN->setHasNoUnsignedWrap(nuw);
967     FN->setHasNoSignedWrap(nsw);
968     FN->setIsExact(exact);
969 
970     return FN;
971   }
972 
973   BinarySDNode *N = new (NodeAllocator)
974       BinarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, N1, N2);
975   return N;
976 }
977 
clear()978 void SelectionDAG::clear() {
979   allnodes_clear();
980   OperandAllocator.Reset();
981   CSEMap.clear();
982 
983   ExtendedValueTypeNodes.clear();
984   ExternalSymbols.clear();
985   TargetExternalSymbols.clear();
986   std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
987             static_cast<CondCodeSDNode*>(nullptr));
988   std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
989             static_cast<SDNode*>(nullptr));
990 
991   EntryNode.UseList = nullptr;
992   AllNodes.push_back(&EntryNode);
993   Root = getEntryNode();
994   DbgInfo->clear();
995 }
996 
getAnyExtOrTrunc(SDValue Op,SDLoc DL,EVT VT)997 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
998   return VT.bitsGT(Op.getValueType()) ?
999     getNode(ISD::ANY_EXTEND, DL, VT, Op) :
1000     getNode(ISD::TRUNCATE, DL, VT, Op);
1001 }
1002 
getSExtOrTrunc(SDValue Op,SDLoc DL,EVT VT)1003 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
1004   return VT.bitsGT(Op.getValueType()) ?
1005     getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
1006     getNode(ISD::TRUNCATE, DL, VT, Op);
1007 }
1008 
getZExtOrTrunc(SDValue Op,SDLoc DL,EVT VT)1009 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
1010   return VT.bitsGT(Op.getValueType()) ?
1011     getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
1012     getNode(ISD::TRUNCATE, DL, VT, Op);
1013 }
1014 
getBoolExtOrTrunc(SDValue Op,SDLoc SL,EVT VT,EVT OpVT)1015 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, SDLoc SL, EVT VT,
1016                                         EVT OpVT) {
1017   if (VT.bitsLE(Op.getValueType()))
1018     return getNode(ISD::TRUNCATE, SL, VT, Op);
1019 
1020   TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT);
1021   return getNode(TLI->getExtendForContent(BType), SL, VT, Op);
1022 }
1023 
getZeroExtendInReg(SDValue Op,SDLoc DL,EVT VT)1024 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, SDLoc DL, EVT VT) {
1025   assert(!VT.isVector() &&
1026          "getZeroExtendInReg should use the vector element type instead of "
1027          "the vector type!");
1028   if (Op.getValueType() == VT) return Op;
1029   unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
1030   APInt Imm = APInt::getLowBitsSet(BitWidth,
1031                                    VT.getSizeInBits());
1032   return getNode(ISD::AND, DL, Op.getValueType(), Op,
1033                  getConstant(Imm, Op.getValueType()));
1034 }
1035 
getAnyExtendVectorInReg(SDValue Op,SDLoc DL,EVT VT)1036 SDValue SelectionDAG::getAnyExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT) {
1037   assert(VT.isVector() && "This DAG node is restricted to vector types.");
1038   assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() &&
1039          "The sizes of the input and result must match in order to perform the "
1040          "extend in-register.");
1041   assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
1042          "The destination vector type must have fewer lanes than the input.");
1043   return getNode(ISD::ANY_EXTEND_VECTOR_INREG, DL, VT, Op);
1044 }
1045 
getSignExtendVectorInReg(SDValue Op,SDLoc DL,EVT VT)1046 SDValue SelectionDAG::getSignExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT) {
1047   assert(VT.isVector() && "This DAG node is restricted to vector types.");
1048   assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() &&
1049          "The sizes of the input and result must match in order to perform the "
1050          "extend in-register.");
1051   assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
1052          "The destination vector type must have fewer lanes than the input.");
1053   return getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, VT, Op);
1054 }
1055 
getZeroExtendVectorInReg(SDValue Op,SDLoc DL,EVT VT)1056 SDValue SelectionDAG::getZeroExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT) {
1057   assert(VT.isVector() && "This DAG node is restricted to vector types.");
1058   assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() &&
1059          "The sizes of the input and result must match in order to perform the "
1060          "extend in-register.");
1061   assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
1062          "The destination vector type must have fewer lanes than the input.");
1063   return getNode(ISD::ZERO_EXTEND_VECTOR_INREG, DL, VT, Op);
1064 }
1065 
1066 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
1067 ///
getNOT(SDLoc DL,SDValue Val,EVT VT)1068 SDValue SelectionDAG::getNOT(SDLoc DL, SDValue Val, EVT VT) {
1069   EVT EltVT = VT.getScalarType();
1070   SDValue NegOne =
1071     getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), VT);
1072   return getNode(ISD::XOR, DL, VT, Val, NegOne);
1073 }
1074 
getLogicalNOT(SDLoc DL,SDValue Val,EVT VT)1075 SDValue SelectionDAG::getLogicalNOT(SDLoc DL, SDValue Val, EVT VT) {
1076   EVT EltVT = VT.getScalarType();
1077   SDValue TrueValue;
1078   switch (TLI->getBooleanContents(VT)) {
1079     case TargetLowering::ZeroOrOneBooleanContent:
1080     case TargetLowering::UndefinedBooleanContent:
1081       TrueValue = getConstant(1, VT);
1082       break;
1083     case TargetLowering::ZeroOrNegativeOneBooleanContent:
1084       TrueValue = getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()),
1085                               VT);
1086       break;
1087   }
1088   return getNode(ISD::XOR, DL, VT, Val, TrueValue);
1089 }
1090 
getConstant(uint64_t Val,EVT VT,bool isT,bool isO)1091 SDValue SelectionDAG::getConstant(uint64_t Val, EVT VT, bool isT, bool isO) {
1092   EVT EltVT = VT.getScalarType();
1093   assert((EltVT.getSizeInBits() >= 64 ||
1094          (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
1095          "getConstant with a uint64_t value that doesn't fit in the type!");
1096   return getConstant(APInt(EltVT.getSizeInBits(), Val), VT, isT, isO);
1097 }
1098 
getConstant(const APInt & Val,EVT VT,bool isT,bool isO)1099 SDValue SelectionDAG::getConstant(const APInt &Val, EVT VT, bool isT, bool isO)
1100 {
1101   return getConstant(*ConstantInt::get(*Context, Val), VT, isT, isO);
1102 }
1103 
getConstant(const ConstantInt & Val,EVT VT,bool isT,bool isO)1104 SDValue SelectionDAG::getConstant(const ConstantInt &Val, EVT VT, bool isT,
1105                                   bool isO) {
1106   assert(VT.isInteger() && "Cannot create FP integer constant!");
1107 
1108   EVT EltVT = VT.getScalarType();
1109   const ConstantInt *Elt = &Val;
1110 
1111   // In some cases the vector type is legal but the element type is illegal and
1112   // needs to be promoted, for example v8i8 on ARM.  In this case, promote the
1113   // inserted value (the type does not need to match the vector element type).
1114   // Any extra bits introduced will be truncated away.
1115   if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
1116       TargetLowering::TypePromoteInteger) {
1117    EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1118    APInt NewVal = Elt->getValue().zext(EltVT.getSizeInBits());
1119    Elt = ConstantInt::get(*getContext(), NewVal);
1120   }
1121   // In other cases the element type is illegal and needs to be expanded, for
1122   // example v2i64 on MIPS32. In this case, find the nearest legal type, split
1123   // the value into n parts and use a vector type with n-times the elements.
1124   // Then bitcast to the type requested.
1125   // Legalizing constants too early makes the DAGCombiner's job harder so we
1126   // only legalize if the DAG tells us we must produce legal types.
1127   else if (NewNodesMustHaveLegalTypes && VT.isVector() &&
1128            TLI->getTypeAction(*getContext(), EltVT) ==
1129            TargetLowering::TypeExpandInteger) {
1130     APInt NewVal = Elt->getValue();
1131     EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1132     unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits();
1133     unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits;
1134     EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts);
1135 
1136     // Check the temporary vector is the correct size. If this fails then
1137     // getTypeToTransformTo() probably returned a type whose size (in bits)
1138     // isn't a power-of-2 factor of the requested type size.
1139     assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits());
1140 
1141     SmallVector<SDValue, 2> EltParts;
1142     for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) {
1143       EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits)
1144                                            .trunc(ViaEltSizeInBits),
1145                                      ViaEltVT, isT, isO));
1146     }
1147 
1148     // EltParts is currently in little endian order. If we actually want
1149     // big-endian order then reverse it now.
1150     if (TLI->isBigEndian())
1151       std::reverse(EltParts.begin(), EltParts.end());
1152 
1153     // The elements must be reversed when the element order is different
1154     // to the endianness of the elements (because the BITCAST is itself a
1155     // vector shuffle in this situation). However, we do not need any code to
1156     // perform this reversal because getConstant() is producing a vector
1157     // splat.
1158     // This situation occurs in MIPS MSA.
1159 
1160     SmallVector<SDValue, 8> Ops;
1161     for (unsigned i = 0; i < VT.getVectorNumElements(); ++i)
1162       Ops.insert(Ops.end(), EltParts.begin(), EltParts.end());
1163 
1164     SDValue Result = getNode(ISD::BITCAST, SDLoc(), VT,
1165                              getNode(ISD::BUILD_VECTOR, SDLoc(), ViaVecVT,
1166                                      Ops));
1167     return Result;
1168   }
1169 
1170   assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&
1171          "APInt size does not match type size!");
1172   unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
1173   FoldingSetNodeID ID;
1174   AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1175   ID.AddPointer(Elt);
1176   ID.AddBoolean(isO);
1177   void *IP = nullptr;
1178   SDNode *N = nullptr;
1179   if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
1180     if (!VT.isVector())
1181       return SDValue(N, 0);
1182 
1183   if (!N) {
1184     N = new (NodeAllocator) ConstantSDNode(isT, isO, Elt, EltVT);
1185     CSEMap.InsertNode(N, IP);
1186     InsertNode(N);
1187   }
1188 
1189   SDValue Result(N, 0);
1190   if (VT.isVector()) {
1191     SmallVector<SDValue, 8> Ops;
1192     Ops.assign(VT.getVectorNumElements(), Result);
1193     Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Ops);
1194   }
1195   return Result;
1196 }
1197 
getIntPtrConstant(uint64_t Val,bool isTarget)1198 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, bool isTarget) {
1199   return getConstant(Val, TLI->getPointerTy(), isTarget);
1200 }
1201 
1202 
getConstantFP(const APFloat & V,EVT VT,bool isTarget)1203 SDValue SelectionDAG::getConstantFP(const APFloat& V, EVT VT, bool isTarget) {
1204   return getConstantFP(*ConstantFP::get(*getContext(), V), VT, isTarget);
1205 }
1206 
getConstantFP(const ConstantFP & V,EVT VT,bool isTarget)1207 SDValue SelectionDAG::getConstantFP(const ConstantFP& V, EVT VT, bool isTarget){
1208   assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
1209 
1210   EVT EltVT = VT.getScalarType();
1211 
1212   // Do the map lookup using the actual bit pattern for the floating point
1213   // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
1214   // we don't have issues with SNANs.
1215   unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
1216   FoldingSetNodeID ID;
1217   AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1218   ID.AddPointer(&V);
1219   void *IP = nullptr;
1220   SDNode *N = nullptr;
1221   if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
1222     if (!VT.isVector())
1223       return SDValue(N, 0);
1224 
1225   if (!N) {
1226     N = new (NodeAllocator) ConstantFPSDNode(isTarget, &V, EltVT);
1227     CSEMap.InsertNode(N, IP);
1228     InsertNode(N);
1229   }
1230 
1231   SDValue Result(N, 0);
1232   if (VT.isVector()) {
1233     SmallVector<SDValue, 8> Ops;
1234     Ops.assign(VT.getVectorNumElements(), Result);
1235     // FIXME SDLoc info might be appropriate here
1236     Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Ops);
1237   }
1238   return Result;
1239 }
1240 
getConstantFP(double Val,EVT VT,bool isTarget)1241 SDValue SelectionDAG::getConstantFP(double Val, EVT VT, bool isTarget) {
1242   EVT EltVT = VT.getScalarType();
1243   if (EltVT==MVT::f32)
1244     return getConstantFP(APFloat((float)Val), VT, isTarget);
1245   else if (EltVT==MVT::f64)
1246     return getConstantFP(APFloat(Val), VT, isTarget);
1247   else if (EltVT==MVT::f80 || EltVT==MVT::f128 || EltVT==MVT::ppcf128 ||
1248            EltVT==MVT::f16) {
1249     bool ignored;
1250     APFloat apf = APFloat(Val);
1251     apf.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
1252                 &ignored);
1253     return getConstantFP(apf, VT, isTarget);
1254   } else
1255     llvm_unreachable("Unsupported type in getConstantFP");
1256 }
1257 
getGlobalAddress(const GlobalValue * GV,SDLoc DL,EVT VT,int64_t Offset,bool isTargetGA,unsigned char TargetFlags)1258 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, SDLoc DL,
1259                                        EVT VT, int64_t Offset,
1260                                        bool isTargetGA,
1261                                        unsigned char TargetFlags) {
1262   assert((TargetFlags == 0 || isTargetGA) &&
1263          "Cannot set target flags on target-independent globals");
1264 
1265   // Truncate (with sign-extension) the offset value to the pointer size.
1266   unsigned BitWidth = TLI->getPointerTypeSizeInBits(GV->getType());
1267   if (BitWidth < 64)
1268     Offset = SignExtend64(Offset, BitWidth);
1269 
1270   unsigned Opc;
1271   if (GV->isThreadLocal())
1272     Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
1273   else
1274     Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
1275 
1276   FoldingSetNodeID ID;
1277   AddNodeIDNode(ID, Opc, getVTList(VT), None);
1278   ID.AddPointer(GV);
1279   ID.AddInteger(Offset);
1280   ID.AddInteger(TargetFlags);
1281   ID.AddInteger(GV->getType()->getAddressSpace());
1282   void *IP = nullptr;
1283   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1284     return SDValue(E, 0);
1285 
1286   SDNode *N = new (NodeAllocator) GlobalAddressSDNode(Opc, DL.getIROrder(),
1287                                                       DL.getDebugLoc(), GV, VT,
1288                                                       Offset, TargetFlags);
1289   CSEMap.InsertNode(N, IP);
1290     InsertNode(N);
1291   return SDValue(N, 0);
1292 }
1293 
getFrameIndex(int FI,EVT VT,bool isTarget)1294 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
1295   unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
1296   FoldingSetNodeID ID;
1297   AddNodeIDNode(ID, Opc, getVTList(VT), None);
1298   ID.AddInteger(FI);
1299   void *IP = nullptr;
1300   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1301     return SDValue(E, 0);
1302 
1303   SDNode *N = new (NodeAllocator) FrameIndexSDNode(FI, VT, isTarget);
1304   CSEMap.InsertNode(N, IP);
1305   InsertNode(N);
1306   return SDValue(N, 0);
1307 }
1308 
getJumpTable(int JTI,EVT VT,bool isTarget,unsigned char TargetFlags)1309 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
1310                                    unsigned char TargetFlags) {
1311   assert((TargetFlags == 0 || isTarget) &&
1312          "Cannot set target flags on target-independent jump tables");
1313   unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
1314   FoldingSetNodeID ID;
1315   AddNodeIDNode(ID, Opc, getVTList(VT), None);
1316   ID.AddInteger(JTI);
1317   ID.AddInteger(TargetFlags);
1318   void *IP = nullptr;
1319   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1320     return SDValue(E, 0);
1321 
1322   SDNode *N = new (NodeAllocator) JumpTableSDNode(JTI, VT, isTarget,
1323                                                   TargetFlags);
1324   CSEMap.InsertNode(N, IP);
1325   InsertNode(N);
1326   return SDValue(N, 0);
1327 }
1328 
getConstantPool(const Constant * C,EVT VT,unsigned Alignment,int Offset,bool isTarget,unsigned char TargetFlags)1329 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
1330                                       unsigned Alignment, int Offset,
1331                                       bool isTarget,
1332                                       unsigned char TargetFlags) {
1333   assert((TargetFlags == 0 || isTarget) &&
1334          "Cannot set target flags on target-independent globals");
1335   if (Alignment == 0)
1336     Alignment = TLI->getDataLayout()->getPrefTypeAlignment(C->getType());
1337   unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1338   FoldingSetNodeID ID;
1339   AddNodeIDNode(ID, Opc, getVTList(VT), None);
1340   ID.AddInteger(Alignment);
1341   ID.AddInteger(Offset);
1342   ID.AddPointer(C);
1343   ID.AddInteger(TargetFlags);
1344   void *IP = nullptr;
1345   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1346     return SDValue(E, 0);
1347 
1348   SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset,
1349                                                      Alignment, TargetFlags);
1350   CSEMap.InsertNode(N, IP);
1351   InsertNode(N);
1352   return SDValue(N, 0);
1353 }
1354 
1355 
getConstantPool(MachineConstantPoolValue * C,EVT VT,unsigned Alignment,int Offset,bool isTarget,unsigned char TargetFlags)1356 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
1357                                       unsigned Alignment, int Offset,
1358                                       bool isTarget,
1359                                       unsigned char TargetFlags) {
1360   assert((TargetFlags == 0 || isTarget) &&
1361          "Cannot set target flags on target-independent globals");
1362   if (Alignment == 0)
1363     Alignment = TLI->getDataLayout()->getPrefTypeAlignment(C->getType());
1364   unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1365   FoldingSetNodeID ID;
1366   AddNodeIDNode(ID, Opc, getVTList(VT), None);
1367   ID.AddInteger(Alignment);
1368   ID.AddInteger(Offset);
1369   C->addSelectionDAGCSEId(ID);
1370   ID.AddInteger(TargetFlags);
1371   void *IP = nullptr;
1372   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1373     return SDValue(E, 0);
1374 
1375   SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset,
1376                                                      Alignment, TargetFlags);
1377   CSEMap.InsertNode(N, IP);
1378   InsertNode(N);
1379   return SDValue(N, 0);
1380 }
1381 
getTargetIndex(int Index,EVT VT,int64_t Offset,unsigned char TargetFlags)1382 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
1383                                      unsigned char TargetFlags) {
1384   FoldingSetNodeID ID;
1385   AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None);
1386   ID.AddInteger(Index);
1387   ID.AddInteger(Offset);
1388   ID.AddInteger(TargetFlags);
1389   void *IP = nullptr;
1390   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1391     return SDValue(E, 0);
1392 
1393   SDNode *N = new (NodeAllocator) TargetIndexSDNode(Index, VT, Offset,
1394                                                     TargetFlags);
1395   CSEMap.InsertNode(N, IP);
1396   InsertNode(N);
1397   return SDValue(N, 0);
1398 }
1399 
getBasicBlock(MachineBasicBlock * MBB)1400 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
1401   FoldingSetNodeID ID;
1402   AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None);
1403   ID.AddPointer(MBB);
1404   void *IP = nullptr;
1405   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1406     return SDValue(E, 0);
1407 
1408   SDNode *N = new (NodeAllocator) BasicBlockSDNode(MBB);
1409   CSEMap.InsertNode(N, IP);
1410   InsertNode(N);
1411   return SDValue(N, 0);
1412 }
1413 
getValueType(EVT VT)1414 SDValue SelectionDAG::getValueType(EVT VT) {
1415   if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
1416       ValueTypeNodes.size())
1417     ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
1418 
1419   SDNode *&N = VT.isExtended() ?
1420     ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
1421 
1422   if (N) return SDValue(N, 0);
1423   N = new (NodeAllocator) VTSDNode(VT);
1424   InsertNode(N);
1425   return SDValue(N, 0);
1426 }
1427 
getExternalSymbol(const char * Sym,EVT VT)1428 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
1429   SDNode *&N = ExternalSymbols[Sym];
1430   if (N) return SDValue(N, 0);
1431   N = new (NodeAllocator) ExternalSymbolSDNode(false, Sym, 0, VT);
1432   InsertNode(N);
1433   return SDValue(N, 0);
1434 }
1435 
getTargetExternalSymbol(const char * Sym,EVT VT,unsigned char TargetFlags)1436 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
1437                                               unsigned char TargetFlags) {
1438   SDNode *&N =
1439     TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym,
1440                                                                TargetFlags)];
1441   if (N) return SDValue(N, 0);
1442   N = new (NodeAllocator) ExternalSymbolSDNode(true, Sym, TargetFlags, VT);
1443   InsertNode(N);
1444   return SDValue(N, 0);
1445 }
1446 
getCondCode(ISD::CondCode Cond)1447 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
1448   if ((unsigned)Cond >= CondCodeNodes.size())
1449     CondCodeNodes.resize(Cond+1);
1450 
1451   if (!CondCodeNodes[Cond]) {
1452     CondCodeSDNode *N = new (NodeAllocator) CondCodeSDNode(Cond);
1453     CondCodeNodes[Cond] = N;
1454     InsertNode(N);
1455   }
1456 
1457   return SDValue(CondCodeNodes[Cond], 0);
1458 }
1459 
1460 // commuteShuffle - swaps the values of N1 and N2, and swaps all indices in
1461 // the shuffle mask M that point at N1 to point at N2, and indices that point
1462 // N2 to point at N1.
commuteShuffle(SDValue & N1,SDValue & N2,SmallVectorImpl<int> & M)1463 static void commuteShuffle(SDValue &N1, SDValue &N2, SmallVectorImpl<int> &M) {
1464   std::swap(N1, N2);
1465   ShuffleVectorSDNode::commuteMask(M);
1466 }
1467 
getVectorShuffle(EVT VT,SDLoc dl,SDValue N1,SDValue N2,const int * Mask)1468 SDValue SelectionDAG::getVectorShuffle(EVT VT, SDLoc dl, SDValue N1,
1469                                        SDValue N2, const int *Mask) {
1470   assert(VT == N1.getValueType() && VT == N2.getValueType() &&
1471          "Invalid VECTOR_SHUFFLE");
1472 
1473   // Canonicalize shuffle undef, undef -> undef
1474   if (N1.getOpcode() == ISD::UNDEF && N2.getOpcode() == ISD::UNDEF)
1475     return getUNDEF(VT);
1476 
1477   // Validate that all indices in Mask are within the range of the elements
1478   // input to the shuffle.
1479   unsigned NElts = VT.getVectorNumElements();
1480   SmallVector<int, 8> MaskVec;
1481   for (unsigned i = 0; i != NElts; ++i) {
1482     assert(Mask[i] < (int)(NElts * 2) && "Index out of range");
1483     MaskVec.push_back(Mask[i]);
1484   }
1485 
1486   // Canonicalize shuffle v, v -> v, undef
1487   if (N1 == N2) {
1488     N2 = getUNDEF(VT);
1489     for (unsigned i = 0; i != NElts; ++i)
1490       if (MaskVec[i] >= (int)NElts) MaskVec[i] -= NElts;
1491   }
1492 
1493   // Canonicalize shuffle undef, v -> v, undef.  Commute the shuffle mask.
1494   if (N1.getOpcode() == ISD::UNDEF)
1495     commuteShuffle(N1, N2, MaskVec);
1496 
1497   // If shuffling a splat, try to blend the splat instead. We do this here so
1498   // that even when this arises during lowering we don't have to re-handle it.
1499   auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) {
1500     BitVector UndefElements;
1501     SDValue Splat = BV->getSplatValue(&UndefElements);
1502     if (!Splat)
1503       return;
1504 
1505     for (int i = 0; i < (int)NElts; ++i) {
1506       if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + (int)NElts))
1507         continue;
1508 
1509       // If this input comes from undef, mark it as such.
1510       if (UndefElements[MaskVec[i] - Offset]) {
1511         MaskVec[i] = -1;
1512         continue;
1513       }
1514 
1515       // If we can blend a non-undef lane, use that instead.
1516       if (!UndefElements[i])
1517         MaskVec[i] = i + Offset;
1518     }
1519   };
1520   if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
1521     BlendSplat(N1BV, 0);
1522   if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2))
1523     BlendSplat(N2BV, NElts);
1524 
1525   // Canonicalize all index into lhs, -> shuffle lhs, undef
1526   // Canonicalize all index into rhs, -> shuffle rhs, undef
1527   bool AllLHS = true, AllRHS = true;
1528   bool N2Undef = N2.getOpcode() == ISD::UNDEF;
1529   for (unsigned i = 0; i != NElts; ++i) {
1530     if (MaskVec[i] >= (int)NElts) {
1531       if (N2Undef)
1532         MaskVec[i] = -1;
1533       else
1534         AllLHS = false;
1535     } else if (MaskVec[i] >= 0) {
1536       AllRHS = false;
1537     }
1538   }
1539   if (AllLHS && AllRHS)
1540     return getUNDEF(VT);
1541   if (AllLHS && !N2Undef)
1542     N2 = getUNDEF(VT);
1543   if (AllRHS) {
1544     N1 = getUNDEF(VT);
1545     commuteShuffle(N1, N2, MaskVec);
1546   }
1547   // Reset our undef status after accounting for the mask.
1548   N2Undef = N2.getOpcode() == ISD::UNDEF;
1549   // Re-check whether both sides ended up undef.
1550   if (N1.getOpcode() == ISD::UNDEF && N2Undef)
1551     return getUNDEF(VT);
1552 
1553   // If Identity shuffle return that node.
1554   bool Identity = true, AllSame = true;
1555   for (unsigned i = 0; i != NElts; ++i) {
1556     if (MaskVec[i] >= 0 && MaskVec[i] != (int)i) Identity = false;
1557     if (MaskVec[i] != MaskVec[0]) AllSame = false;
1558   }
1559   if (Identity && NElts)
1560     return N1;
1561 
1562   // Shuffling a constant splat doesn't change the result.
1563   if (N2Undef) {
1564     SDValue V = N1;
1565 
1566     // Look through any bitcasts. We check that these don't change the number
1567     // (and size) of elements and just changes their types.
1568     while (V.getOpcode() == ISD::BITCAST)
1569       V = V->getOperand(0);
1570 
1571     // A splat should always show up as a build vector node.
1572     if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
1573       BitVector UndefElements;
1574       SDValue Splat = BV->getSplatValue(&UndefElements);
1575       // If this is a splat of an undef, shuffling it is also undef.
1576       if (Splat && Splat.getOpcode() == ISD::UNDEF)
1577         return getUNDEF(VT);
1578 
1579       bool SameNumElts =
1580           V.getValueType().getVectorNumElements() == VT.getVectorNumElements();
1581 
1582       // We only have a splat which can skip shuffles if there is a splatted
1583       // value and no undef lanes rearranged by the shuffle.
1584       if (Splat && UndefElements.none()) {
1585         // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the
1586         // number of elements match or the value splatted is a zero constant.
1587         if (SameNumElts)
1588           return N1;
1589         if (auto *C = dyn_cast<ConstantSDNode>(Splat))
1590           if (C->isNullValue())
1591             return N1;
1592       }
1593 
1594       // If the shuffle itself creates a splat, build the vector directly.
1595       if (AllSame && SameNumElts) {
1596         const SDValue &Splatted = BV->getOperand(MaskVec[0]);
1597         SmallVector<SDValue, 8> Ops(NElts, Splatted);
1598 
1599         EVT BuildVT = BV->getValueType(0);
1600         SDValue NewBV = getNode(ISD::BUILD_VECTOR, dl, BuildVT, Ops);
1601 
1602         // We may have jumped through bitcasts, so the type of the
1603         // BUILD_VECTOR may not match the type of the shuffle.
1604         if (BuildVT != VT)
1605           NewBV = getNode(ISD::BITCAST, dl, VT, NewBV);
1606         return NewBV;
1607       }
1608     }
1609   }
1610 
1611   FoldingSetNodeID ID;
1612   SDValue Ops[2] = { N1, N2 };
1613   AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops);
1614   for (unsigned i = 0; i != NElts; ++i)
1615     ID.AddInteger(MaskVec[i]);
1616 
1617   void* IP = nullptr;
1618   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1619     return SDValue(E, 0);
1620 
1621   // Allocate the mask array for the node out of the BumpPtrAllocator, since
1622   // SDNode doesn't have access to it.  This memory will be "leaked" when
1623   // the node is deallocated, but recovered when the NodeAllocator is released.
1624   int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
1625   memcpy(MaskAlloc, &MaskVec[0], NElts * sizeof(int));
1626 
1627   ShuffleVectorSDNode *N =
1628     new (NodeAllocator) ShuffleVectorSDNode(VT, dl.getIROrder(),
1629                                             dl.getDebugLoc(), N1, N2,
1630                                             MaskAlloc);
1631   CSEMap.InsertNode(N, IP);
1632   InsertNode(N);
1633   return SDValue(N, 0);
1634 }
1635 
getCommutedVectorShuffle(const ShuffleVectorSDNode & SV)1636 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) {
1637   MVT VT = SV.getSimpleValueType(0);
1638   SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end());
1639   ShuffleVectorSDNode::commuteMask(MaskVec);
1640 
1641   SDValue Op0 = SV.getOperand(0);
1642   SDValue Op1 = SV.getOperand(1);
1643   return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, &MaskVec[0]);
1644 }
1645 
getConvertRndSat(EVT VT,SDLoc dl,SDValue Val,SDValue DTy,SDValue STy,SDValue Rnd,SDValue Sat,ISD::CvtCode Code)1646 SDValue SelectionDAG::getConvertRndSat(EVT VT, SDLoc dl,
1647                                        SDValue Val, SDValue DTy,
1648                                        SDValue STy, SDValue Rnd, SDValue Sat,
1649                                        ISD::CvtCode Code) {
1650   // If the src and dest types are the same and the conversion is between
1651   // integer types of the same sign or two floats, no conversion is necessary.
1652   if (DTy == STy &&
1653       (Code == ISD::CVT_UU || Code == ISD::CVT_SS || Code == ISD::CVT_FF))
1654     return Val;
1655 
1656   FoldingSetNodeID ID;
1657   SDValue Ops[] = { Val, DTy, STy, Rnd, Sat };
1658   AddNodeIDNode(ID, ISD::CONVERT_RNDSAT, getVTList(VT), Ops);
1659   void* IP = nullptr;
1660   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1661     return SDValue(E, 0);
1662 
1663   CvtRndSatSDNode *N = new (NodeAllocator) CvtRndSatSDNode(VT, dl.getIROrder(),
1664                                                            dl.getDebugLoc(),
1665                                                            Ops, Code);
1666   CSEMap.InsertNode(N, IP);
1667   InsertNode(N);
1668   return SDValue(N, 0);
1669 }
1670 
getRegister(unsigned RegNo,EVT VT)1671 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
1672   FoldingSetNodeID ID;
1673   AddNodeIDNode(ID, ISD::Register, getVTList(VT), None);
1674   ID.AddInteger(RegNo);
1675   void *IP = nullptr;
1676   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1677     return SDValue(E, 0);
1678 
1679   SDNode *N = new (NodeAllocator) RegisterSDNode(RegNo, VT);
1680   CSEMap.InsertNode(N, IP);
1681   InsertNode(N);
1682   return SDValue(N, 0);
1683 }
1684 
getRegisterMask(const uint32_t * RegMask)1685 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
1686   FoldingSetNodeID ID;
1687   AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None);
1688   ID.AddPointer(RegMask);
1689   void *IP = nullptr;
1690   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1691     return SDValue(E, 0);
1692 
1693   SDNode *N = new (NodeAllocator) RegisterMaskSDNode(RegMask);
1694   CSEMap.InsertNode(N, IP);
1695   InsertNode(N);
1696   return SDValue(N, 0);
1697 }
1698 
getEHLabel(SDLoc dl,SDValue Root,MCSymbol * Label)1699 SDValue SelectionDAG::getEHLabel(SDLoc dl, SDValue Root, MCSymbol *Label) {
1700   FoldingSetNodeID ID;
1701   SDValue Ops[] = { Root };
1702   AddNodeIDNode(ID, ISD::EH_LABEL, getVTList(MVT::Other), Ops);
1703   ID.AddPointer(Label);
1704   void *IP = nullptr;
1705   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1706     return SDValue(E, 0);
1707 
1708   SDNode *N = new (NodeAllocator) EHLabelSDNode(dl.getIROrder(),
1709                                                 dl.getDebugLoc(), Root, Label);
1710   CSEMap.InsertNode(N, IP);
1711   InsertNode(N);
1712   return SDValue(N, 0);
1713 }
1714 
1715 
getBlockAddress(const BlockAddress * BA,EVT VT,int64_t Offset,bool isTarget,unsigned char TargetFlags)1716 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
1717                                       int64_t Offset,
1718                                       bool isTarget,
1719                                       unsigned char TargetFlags) {
1720   unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
1721 
1722   FoldingSetNodeID ID;
1723   AddNodeIDNode(ID, Opc, getVTList(VT), None);
1724   ID.AddPointer(BA);
1725   ID.AddInteger(Offset);
1726   ID.AddInteger(TargetFlags);
1727   void *IP = nullptr;
1728   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1729     return SDValue(E, 0);
1730 
1731   SDNode *N = new (NodeAllocator) BlockAddressSDNode(Opc, VT, BA, Offset,
1732                                                      TargetFlags);
1733   CSEMap.InsertNode(N, IP);
1734   InsertNode(N);
1735   return SDValue(N, 0);
1736 }
1737 
getSrcValue(const Value * V)1738 SDValue SelectionDAG::getSrcValue(const Value *V) {
1739   assert((!V || V->getType()->isPointerTy()) &&
1740          "SrcValue is not a pointer?");
1741 
1742   FoldingSetNodeID ID;
1743   AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None);
1744   ID.AddPointer(V);
1745 
1746   void *IP = nullptr;
1747   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1748     return SDValue(E, 0);
1749 
1750   SDNode *N = new (NodeAllocator) SrcValueSDNode(V);
1751   CSEMap.InsertNode(N, IP);
1752   InsertNode(N);
1753   return SDValue(N, 0);
1754 }
1755 
1756 /// getMDNode - Return an MDNodeSDNode which holds an MDNode.
getMDNode(const MDNode * MD)1757 SDValue SelectionDAG::getMDNode(const MDNode *MD) {
1758   FoldingSetNodeID ID;
1759   AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None);
1760   ID.AddPointer(MD);
1761 
1762   void *IP = nullptr;
1763   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1764     return SDValue(E, 0);
1765 
1766   SDNode *N = new (NodeAllocator) MDNodeSDNode(MD);
1767   CSEMap.InsertNode(N, IP);
1768   InsertNode(N);
1769   return SDValue(N, 0);
1770 }
1771 
1772 /// getAddrSpaceCast - Return an AddrSpaceCastSDNode.
getAddrSpaceCast(SDLoc dl,EVT VT,SDValue Ptr,unsigned SrcAS,unsigned DestAS)1773 SDValue SelectionDAG::getAddrSpaceCast(SDLoc dl, EVT VT, SDValue Ptr,
1774                                        unsigned SrcAS, unsigned DestAS) {
1775   SDValue Ops[] = {Ptr};
1776   FoldingSetNodeID ID;
1777   AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops);
1778   ID.AddInteger(SrcAS);
1779   ID.AddInteger(DestAS);
1780 
1781   void *IP = nullptr;
1782   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1783     return SDValue(E, 0);
1784 
1785   SDNode *N = new (NodeAllocator) AddrSpaceCastSDNode(dl.getIROrder(),
1786                                                       dl.getDebugLoc(),
1787                                                       VT, Ptr, SrcAS, DestAS);
1788   CSEMap.InsertNode(N, IP);
1789   InsertNode(N);
1790   return SDValue(N, 0);
1791 }
1792 
1793 /// getShiftAmountOperand - Return the specified value casted to
1794 /// the target's desired shift amount type.
getShiftAmountOperand(EVT LHSTy,SDValue Op)1795 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
1796   EVT OpTy = Op.getValueType();
1797   EVT ShTy = TLI->getShiftAmountTy(LHSTy);
1798   if (OpTy == ShTy || OpTy.isVector()) return Op;
1799 
1800   ISD::NodeType Opcode = OpTy.bitsGT(ShTy) ?  ISD::TRUNCATE : ISD::ZERO_EXTEND;
1801   return getNode(Opcode, SDLoc(Op), ShTy, Op);
1802 }
1803 
1804 /// CreateStackTemporary - Create a stack temporary, suitable for holding the
1805 /// specified value type.
CreateStackTemporary(EVT VT,unsigned minAlign)1806 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
1807   MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
1808   unsigned ByteSize = VT.getStoreSize();
1809   Type *Ty = VT.getTypeForEVT(*getContext());
1810   unsigned StackAlign =
1811   std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty), minAlign);
1812 
1813   int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false);
1814   return getFrameIndex(FrameIdx, TLI->getPointerTy());
1815 }
1816 
1817 /// CreateStackTemporary - Create a stack temporary suitable for holding
1818 /// either of the specified value types.
CreateStackTemporary(EVT VT1,EVT VT2)1819 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
1820   unsigned Bytes = std::max(VT1.getStoreSizeInBits(),
1821                             VT2.getStoreSizeInBits())/8;
1822   Type *Ty1 = VT1.getTypeForEVT(*getContext());
1823   Type *Ty2 = VT2.getTypeForEVT(*getContext());
1824   const DataLayout *TD = TLI->getDataLayout();
1825   unsigned Align = std::max(TD->getPrefTypeAlignment(Ty1),
1826                             TD->getPrefTypeAlignment(Ty2));
1827 
1828   MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
1829   int FrameIdx = FrameInfo->CreateStackObject(Bytes, Align, false);
1830   return getFrameIndex(FrameIdx, TLI->getPointerTy());
1831 }
1832 
FoldSetCC(EVT VT,SDValue N1,SDValue N2,ISD::CondCode Cond,SDLoc dl)1833 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1,
1834                                 SDValue N2, ISD::CondCode Cond, SDLoc dl) {
1835   // These setcc operations always fold.
1836   switch (Cond) {
1837   default: break;
1838   case ISD::SETFALSE:
1839   case ISD::SETFALSE2: return getConstant(0, VT);
1840   case ISD::SETTRUE:
1841   case ISD::SETTRUE2: {
1842     TargetLowering::BooleanContent Cnt =
1843         TLI->getBooleanContents(N1->getValueType(0));
1844     return getConstant(
1845         Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, VT);
1846   }
1847 
1848   case ISD::SETOEQ:
1849   case ISD::SETOGT:
1850   case ISD::SETOGE:
1851   case ISD::SETOLT:
1852   case ISD::SETOLE:
1853   case ISD::SETONE:
1854   case ISD::SETO:
1855   case ISD::SETUO:
1856   case ISD::SETUEQ:
1857   case ISD::SETUNE:
1858     assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!");
1859     break;
1860   }
1861 
1862   if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode())) {
1863     const APInt &C2 = N2C->getAPIntValue();
1864     if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
1865       const APInt &C1 = N1C->getAPIntValue();
1866 
1867       switch (Cond) {
1868       default: llvm_unreachable("Unknown integer setcc!");
1869       case ISD::SETEQ:  return getConstant(C1 == C2, VT);
1870       case ISD::SETNE:  return getConstant(C1 != C2, VT);
1871       case ISD::SETULT: return getConstant(C1.ult(C2), VT);
1872       case ISD::SETUGT: return getConstant(C1.ugt(C2), VT);
1873       case ISD::SETULE: return getConstant(C1.ule(C2), VT);
1874       case ISD::SETUGE: return getConstant(C1.uge(C2), VT);
1875       case ISD::SETLT:  return getConstant(C1.slt(C2), VT);
1876       case ISD::SETGT:  return getConstant(C1.sgt(C2), VT);
1877       case ISD::SETLE:  return getConstant(C1.sle(C2), VT);
1878       case ISD::SETGE:  return getConstant(C1.sge(C2), VT);
1879       }
1880     }
1881   }
1882   if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1.getNode())) {
1883     if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2.getNode())) {
1884       APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF());
1885       switch (Cond) {
1886       default: break;
1887       case ISD::SETEQ:  if (R==APFloat::cmpUnordered)
1888                           return getUNDEF(VT);
1889                         // fall through
1890       case ISD::SETOEQ: return getConstant(R==APFloat::cmpEqual, VT);
1891       case ISD::SETNE:  if (R==APFloat::cmpUnordered)
1892                           return getUNDEF(VT);
1893                         // fall through
1894       case ISD::SETONE: return getConstant(R==APFloat::cmpGreaterThan ||
1895                                            R==APFloat::cmpLessThan, VT);
1896       case ISD::SETLT:  if (R==APFloat::cmpUnordered)
1897                           return getUNDEF(VT);
1898                         // fall through
1899       case ISD::SETOLT: return getConstant(R==APFloat::cmpLessThan, VT);
1900       case ISD::SETGT:  if (R==APFloat::cmpUnordered)
1901                           return getUNDEF(VT);
1902                         // fall through
1903       case ISD::SETOGT: return getConstant(R==APFloat::cmpGreaterThan, VT);
1904       case ISD::SETLE:  if (R==APFloat::cmpUnordered)
1905                           return getUNDEF(VT);
1906                         // fall through
1907       case ISD::SETOLE: return getConstant(R==APFloat::cmpLessThan ||
1908                                            R==APFloat::cmpEqual, VT);
1909       case ISD::SETGE:  if (R==APFloat::cmpUnordered)
1910                           return getUNDEF(VT);
1911                         // fall through
1912       case ISD::SETOGE: return getConstant(R==APFloat::cmpGreaterThan ||
1913                                            R==APFloat::cmpEqual, VT);
1914       case ISD::SETO:   return getConstant(R!=APFloat::cmpUnordered, VT);
1915       case ISD::SETUO:  return getConstant(R==APFloat::cmpUnordered, VT);
1916       case ISD::SETUEQ: return getConstant(R==APFloat::cmpUnordered ||
1917                                            R==APFloat::cmpEqual, VT);
1918       case ISD::SETUNE: return getConstant(R!=APFloat::cmpEqual, VT);
1919       case ISD::SETULT: return getConstant(R==APFloat::cmpUnordered ||
1920                                            R==APFloat::cmpLessThan, VT);
1921       case ISD::SETUGT: return getConstant(R==APFloat::cmpGreaterThan ||
1922                                            R==APFloat::cmpUnordered, VT);
1923       case ISD::SETULE: return getConstant(R!=APFloat::cmpGreaterThan, VT);
1924       case ISD::SETUGE: return getConstant(R!=APFloat::cmpLessThan, VT);
1925       }
1926     } else {
1927       // Ensure that the constant occurs on the RHS.
1928       ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
1929       MVT CompVT = N1.getValueType().getSimpleVT();
1930       if (!TLI->isCondCodeLegal(SwappedCond, CompVT))
1931         return SDValue();
1932 
1933       return getSetCC(dl, VT, N2, N1, SwappedCond);
1934     }
1935   }
1936 
1937   // Could not fold it.
1938   return SDValue();
1939 }
1940 
1941 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero.  We
1942 /// use this predicate to simplify operations downstream.
SignBitIsZero(SDValue Op,unsigned Depth) const1943 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
1944   // This predicate is not safe for vector operations.
1945   if (Op.getValueType().isVector())
1946     return false;
1947 
1948   unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
1949   return MaskedValueIsZero(Op, APInt::getSignBit(BitWidth), Depth);
1950 }
1951 
1952 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero.  We use
1953 /// this predicate to simplify operations downstream.  Mask is known to be zero
1954 /// for bits that V cannot have.
MaskedValueIsZero(SDValue Op,const APInt & Mask,unsigned Depth) const1955 bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask,
1956                                      unsigned Depth) const {
1957   APInt KnownZero, KnownOne;
1958   computeKnownBits(Op, KnownZero, KnownOne, Depth);
1959   return (KnownZero & Mask) == Mask;
1960 }
1961 
1962 /// Determine which bits of Op are known to be either zero or one and return
1963 /// them in the KnownZero/KnownOne bitsets.
computeKnownBits(SDValue Op,APInt & KnownZero,APInt & KnownOne,unsigned Depth) const1964 void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
1965                                     APInt &KnownOne, unsigned Depth) const {
1966   unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
1967 
1968   KnownZero = KnownOne = APInt(BitWidth, 0);   // Don't know anything.
1969   if (Depth == 6)
1970     return;  // Limit search depth.
1971 
1972   APInt KnownZero2, KnownOne2;
1973 
1974   switch (Op.getOpcode()) {
1975   case ISD::Constant:
1976     // We know all of the bits for a constant!
1977     KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue();
1978     KnownZero = ~KnownOne;
1979     break;
1980   case ISD::AND:
1981     // If either the LHS or the RHS are Zero, the result is zero.
1982     computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1983     computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1984 
1985     // Output known-1 bits are only known if set in both the LHS & RHS.
1986     KnownOne &= KnownOne2;
1987     // Output known-0 are known to be clear if zero in either the LHS | RHS.
1988     KnownZero |= KnownZero2;
1989     break;
1990   case ISD::OR:
1991     computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1992     computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1993 
1994     // Output known-0 bits are only known if clear in both the LHS & RHS.
1995     KnownZero &= KnownZero2;
1996     // Output known-1 are known to be set if set in either the LHS | RHS.
1997     KnownOne |= KnownOne2;
1998     break;
1999   case ISD::XOR: {
2000     computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
2001     computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
2002 
2003     // Output known-0 bits are known if clear or set in both the LHS & RHS.
2004     APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
2005     // Output known-1 are known to be set if set in only one of the LHS, RHS.
2006     KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
2007     KnownZero = KnownZeroOut;
2008     break;
2009   }
2010   case ISD::MUL: {
2011     computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
2012     computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
2013 
2014     // If low bits are zero in either operand, output low known-0 bits.
2015     // Also compute a conserative estimate for high known-0 bits.
2016     // More trickiness is possible, but this is sufficient for the
2017     // interesting case of alignment computation.
2018     KnownOne.clearAllBits();
2019     unsigned TrailZ = KnownZero.countTrailingOnes() +
2020                       KnownZero2.countTrailingOnes();
2021     unsigned LeadZ =  std::max(KnownZero.countLeadingOnes() +
2022                                KnownZero2.countLeadingOnes(),
2023                                BitWidth) - BitWidth;
2024 
2025     TrailZ = std::min(TrailZ, BitWidth);
2026     LeadZ = std::min(LeadZ, BitWidth);
2027     KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
2028                 APInt::getHighBitsSet(BitWidth, LeadZ);
2029     break;
2030   }
2031   case ISD::UDIV: {
2032     // For the purposes of computing leading zeros we can conservatively
2033     // treat a udiv as a logical right shift by the power of 2 known to
2034     // be less than the denominator.
2035     computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
2036     unsigned LeadZ = KnownZero2.countLeadingOnes();
2037 
2038     KnownOne2.clearAllBits();
2039     KnownZero2.clearAllBits();
2040     computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2041     unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
2042     if (RHSUnknownLeadingOnes != BitWidth)
2043       LeadZ = std::min(BitWidth,
2044                        LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
2045 
2046     KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
2047     break;
2048   }
2049   case ISD::SELECT:
2050     computeKnownBits(Op.getOperand(2), KnownZero, KnownOne, Depth+1);
2051     computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2052 
2053     // Only known if known in both the LHS and RHS.
2054     KnownOne &= KnownOne2;
2055     KnownZero &= KnownZero2;
2056     break;
2057   case ISD::SELECT_CC:
2058     computeKnownBits(Op.getOperand(3), KnownZero, KnownOne, Depth+1);
2059     computeKnownBits(Op.getOperand(2), KnownZero2, KnownOne2, Depth+1);
2060 
2061     // Only known if known in both the LHS and RHS.
2062     KnownOne &= KnownOne2;
2063     KnownZero &= KnownZero2;
2064     break;
2065   case ISD::SADDO:
2066   case ISD::UADDO:
2067   case ISD::SSUBO:
2068   case ISD::USUBO:
2069   case ISD::SMULO:
2070   case ISD::UMULO:
2071     if (Op.getResNo() != 1)
2072       break;
2073     // The boolean result conforms to getBooleanContents.
2074     // If we know the result of a setcc has the top bits zero, use this info.
2075     // We know that we have an integer-based boolean since these operations
2076     // are only available for integer.
2077     if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
2078             TargetLowering::ZeroOrOneBooleanContent &&
2079         BitWidth > 1)
2080       KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
2081     break;
2082   case ISD::SETCC:
2083     // If we know the result of a setcc has the top bits zero, use this info.
2084     if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
2085             TargetLowering::ZeroOrOneBooleanContent &&
2086         BitWidth > 1)
2087       KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
2088     break;
2089   case ISD::SHL:
2090     // (shl X, C1) & C2 == 0   iff   (X & C2 >>u C1) == 0
2091     if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2092       unsigned ShAmt = SA->getZExtValue();
2093 
2094       // If the shift count is an invalid immediate, don't do anything.
2095       if (ShAmt >= BitWidth)
2096         break;
2097 
2098       computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2099       KnownZero <<= ShAmt;
2100       KnownOne  <<= ShAmt;
2101       // low bits known zero.
2102       KnownZero |= APInt::getLowBitsSet(BitWidth, ShAmt);
2103     }
2104     break;
2105   case ISD::SRL:
2106     // (ushr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
2107     if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2108       unsigned ShAmt = SA->getZExtValue();
2109 
2110       // If the shift count is an invalid immediate, don't do anything.
2111       if (ShAmt >= BitWidth)
2112         break;
2113 
2114       computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2115       KnownZero = KnownZero.lshr(ShAmt);
2116       KnownOne  = KnownOne.lshr(ShAmt);
2117 
2118       APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
2119       KnownZero |= HighBits;  // High bits known zero.
2120     }
2121     break;
2122   case ISD::SRA:
2123     if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2124       unsigned ShAmt = SA->getZExtValue();
2125 
2126       // If the shift count is an invalid immediate, don't do anything.
2127       if (ShAmt >= BitWidth)
2128         break;
2129 
2130       // If any of the demanded bits are produced by the sign extension, we also
2131       // demand the input sign bit.
2132       APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
2133 
2134       computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2135       KnownZero = KnownZero.lshr(ShAmt);
2136       KnownOne  = KnownOne.lshr(ShAmt);
2137 
2138       // Handle the sign bits.
2139       APInt SignBit = APInt::getSignBit(BitWidth);
2140       SignBit = SignBit.lshr(ShAmt);  // Adjust to where it is now in the mask.
2141 
2142       if (KnownZero.intersects(SignBit)) {
2143         KnownZero |= HighBits;  // New bits are known zero.
2144       } else if (KnownOne.intersects(SignBit)) {
2145         KnownOne  |= HighBits;  // New bits are known one.
2146       }
2147     }
2148     break;
2149   case ISD::SIGN_EXTEND_INREG: {
2150     EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2151     unsigned EBits = EVT.getScalarType().getSizeInBits();
2152 
2153     // Sign extension.  Compute the demanded bits in the result that are not
2154     // present in the input.
2155     APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits);
2156 
2157     APInt InSignBit = APInt::getSignBit(EBits);
2158     APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits);
2159 
2160     // If the sign extended bits are demanded, we know that the sign
2161     // bit is demanded.
2162     InSignBit = InSignBit.zext(BitWidth);
2163     if (NewBits.getBoolValue())
2164       InputDemandedBits |= InSignBit;
2165 
2166     computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2167     KnownOne &= InputDemandedBits;
2168     KnownZero &= InputDemandedBits;
2169 
2170     // If the sign bit of the input is known set or clear, then we know the
2171     // top bits of the result.
2172     if (KnownZero.intersects(InSignBit)) {         // Input sign bit known clear
2173       KnownZero |= NewBits;
2174       KnownOne  &= ~NewBits;
2175     } else if (KnownOne.intersects(InSignBit)) {   // Input sign bit known set
2176       KnownOne  |= NewBits;
2177       KnownZero &= ~NewBits;
2178     } else {                              // Input sign bit unknown
2179       KnownZero &= ~NewBits;
2180       KnownOne  &= ~NewBits;
2181     }
2182     break;
2183   }
2184   case ISD::CTTZ:
2185   case ISD::CTTZ_ZERO_UNDEF:
2186   case ISD::CTLZ:
2187   case ISD::CTLZ_ZERO_UNDEF:
2188   case ISD::CTPOP: {
2189     unsigned LowBits = Log2_32(BitWidth)+1;
2190     KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
2191     KnownOne.clearAllBits();
2192     break;
2193   }
2194   case ISD::LOAD: {
2195     LoadSDNode *LD = cast<LoadSDNode>(Op);
2196     // If this is a ZEXTLoad and we are looking at the loaded value.
2197     if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
2198       EVT VT = LD->getMemoryVT();
2199       unsigned MemBits = VT.getScalarType().getSizeInBits();
2200       KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
2201     } else if (const MDNode *Ranges = LD->getRanges()) {
2202       computeKnownBitsFromRangeMetadata(*Ranges, KnownZero);
2203     }
2204     break;
2205   }
2206   case ISD::ZERO_EXTEND: {
2207     EVT InVT = Op.getOperand(0).getValueType();
2208     unsigned InBits = InVT.getScalarType().getSizeInBits();
2209     APInt NewBits   = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
2210     KnownZero = KnownZero.trunc(InBits);
2211     KnownOne = KnownOne.trunc(InBits);
2212     computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2213     KnownZero = KnownZero.zext(BitWidth);
2214     KnownOne = KnownOne.zext(BitWidth);
2215     KnownZero |= NewBits;
2216     break;
2217   }
2218   case ISD::SIGN_EXTEND: {
2219     EVT InVT = Op.getOperand(0).getValueType();
2220     unsigned InBits = InVT.getScalarType().getSizeInBits();
2221     APInt NewBits   = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
2222 
2223     KnownZero = KnownZero.trunc(InBits);
2224     KnownOne = KnownOne.trunc(InBits);
2225     computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2226 
2227     // Note if the sign bit is known to be zero or one.
2228     bool SignBitKnownZero = KnownZero.isNegative();
2229     bool SignBitKnownOne  = KnownOne.isNegative();
2230 
2231     KnownZero = KnownZero.zext(BitWidth);
2232     KnownOne = KnownOne.zext(BitWidth);
2233 
2234     // If the sign bit is known zero or one, the top bits match.
2235     if (SignBitKnownZero)
2236       KnownZero |= NewBits;
2237     else if (SignBitKnownOne)
2238       KnownOne  |= NewBits;
2239     break;
2240   }
2241   case ISD::ANY_EXTEND: {
2242     EVT InVT = Op.getOperand(0).getValueType();
2243     unsigned InBits = InVT.getScalarType().getSizeInBits();
2244     KnownZero = KnownZero.trunc(InBits);
2245     KnownOne = KnownOne.trunc(InBits);
2246     computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2247     KnownZero = KnownZero.zext(BitWidth);
2248     KnownOne = KnownOne.zext(BitWidth);
2249     break;
2250   }
2251   case ISD::TRUNCATE: {
2252     EVT InVT = Op.getOperand(0).getValueType();
2253     unsigned InBits = InVT.getScalarType().getSizeInBits();
2254     KnownZero = KnownZero.zext(InBits);
2255     KnownOne = KnownOne.zext(InBits);
2256     computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2257     KnownZero = KnownZero.trunc(BitWidth);
2258     KnownOne = KnownOne.trunc(BitWidth);
2259     break;
2260   }
2261   case ISD::AssertZext: {
2262     EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2263     APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
2264     computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2265     KnownZero |= (~InMask);
2266     KnownOne  &= (~KnownZero);
2267     break;
2268   }
2269   case ISD::FGETSIGN:
2270     // All bits are zero except the low bit.
2271     KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - 1);
2272     break;
2273 
2274   case ISD::SUB: {
2275     if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0))) {
2276       // We know that the top bits of C-X are clear if X contains less bits
2277       // than C (i.e. no wrap-around can happen).  For example, 20-X is
2278       // positive if we can prove that X is >= 0 and < 16.
2279       if (CLHS->getAPIntValue().isNonNegative()) {
2280         unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros();
2281         // NLZ can't be BitWidth with no sign bit
2282         APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
2283         computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2284 
2285         // If all of the MaskV bits are known to be zero, then we know the
2286         // output top bits are zero, because we now know that the output is
2287         // from [0-C].
2288         if ((KnownZero2 & MaskV) == MaskV) {
2289           unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros();
2290           // Top bits known zero.
2291           KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2);
2292         }
2293       }
2294     }
2295   }
2296   // fall through
2297   case ISD::ADD:
2298   case ISD::ADDE: {
2299     // Output known-0 bits are known if clear or set in both the low clear bits
2300     // common to both LHS & RHS.  For example, 8+(X<<3) is known to have the
2301     // low 3 bits clear.
2302     computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
2303     unsigned KnownZeroOut = KnownZero2.countTrailingOnes();
2304 
2305     computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2306     KnownZeroOut = std::min(KnownZeroOut,
2307                             KnownZero2.countTrailingOnes());
2308 
2309     if (Op.getOpcode() == ISD::ADD) {
2310       KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroOut);
2311       break;
2312     }
2313 
2314     // With ADDE, a carry bit may be added in, so we can only use this
2315     // information if we know (at least) that the low two bits are clear.  We
2316     // then return to the caller that the low bit is unknown but that other bits
2317     // are known zero.
2318     if (KnownZeroOut >= 2) // ADDE
2319       KnownZero |= APInt::getBitsSet(BitWidth, 1, KnownZeroOut);
2320     break;
2321   }
2322   case ISD::SREM:
2323     if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2324       const APInt &RA = Rem->getAPIntValue().abs();
2325       if (RA.isPowerOf2()) {
2326         APInt LowBits = RA - 1;
2327         computeKnownBits(Op.getOperand(0), KnownZero2,KnownOne2,Depth+1);
2328 
2329         // The low bits of the first operand are unchanged by the srem.
2330         KnownZero = KnownZero2 & LowBits;
2331         KnownOne = KnownOne2 & LowBits;
2332 
2333         // If the first operand is non-negative or has all low bits zero, then
2334         // the upper bits are all zero.
2335         if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
2336           KnownZero |= ~LowBits;
2337 
2338         // If the first operand is negative and not all low bits are zero, then
2339         // the upper bits are all one.
2340         if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
2341           KnownOne |= ~LowBits;
2342         assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
2343       }
2344     }
2345     break;
2346   case ISD::UREM: {
2347     if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2348       const APInt &RA = Rem->getAPIntValue();
2349       if (RA.isPowerOf2()) {
2350         APInt LowBits = (RA - 1);
2351         computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth + 1);
2352 
2353         // The upper bits are all zero, the lower ones are unchanged.
2354         KnownZero = KnownZero2 | ~LowBits;
2355         KnownOne = KnownOne2 & LowBits;
2356         break;
2357       }
2358     }
2359 
2360     // Since the result is less than or equal to either operand, any leading
2361     // zero bits in either operand must also exist in the result.
2362     computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2363     computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2364 
2365     uint32_t Leaders = std::max(KnownZero.countLeadingOnes(),
2366                                 KnownZero2.countLeadingOnes());
2367     KnownOne.clearAllBits();
2368     KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
2369     break;
2370   }
2371   case ISD::EXTRACT_ELEMENT: {
2372     computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2373     const unsigned Index =
2374       cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
2375     const unsigned BitWidth = Op.getValueType().getSizeInBits();
2376 
2377     // Remove low part of known bits mask
2378     KnownZero = KnownZero.getHiBits(KnownZero.getBitWidth() - Index * BitWidth);
2379     KnownOne = KnownOne.getHiBits(KnownOne.getBitWidth() - Index * BitWidth);
2380 
2381     // Remove high part of known bit mask
2382     KnownZero = KnownZero.trunc(BitWidth);
2383     KnownOne = KnownOne.trunc(BitWidth);
2384     break;
2385   }
2386   case ISD::FrameIndex:
2387   case ISD::TargetFrameIndex:
2388     if (unsigned Align = InferPtrAlignment(Op)) {
2389       // The low bits are known zero if the pointer is aligned.
2390       KnownZero = APInt::getLowBitsSet(BitWidth, Log2_32(Align));
2391       break;
2392     }
2393     break;
2394 
2395   default:
2396     if (Op.getOpcode() < ISD::BUILTIN_OP_END)
2397       break;
2398     // Fallthrough
2399   case ISD::INTRINSIC_WO_CHAIN:
2400   case ISD::INTRINSIC_W_CHAIN:
2401   case ISD::INTRINSIC_VOID:
2402     // Allow the target to implement this method for its nodes.
2403     TLI->computeKnownBitsForTargetNode(Op, KnownZero, KnownOne, *this, Depth);
2404     break;
2405   }
2406 
2407   assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
2408 }
2409 
2410 /// ComputeNumSignBits - Return the number of times the sign bit of the
2411 /// register is replicated into the other bits.  We know that at least 1 bit
2412 /// is always equal to the sign bit (itself), but other cases can give us
2413 /// information.  For example, immediately after an "SRA X, 2", we know that
2414 /// the top 3 bits are all equal to each other, so we return 3.
ComputeNumSignBits(SDValue Op,unsigned Depth) const2415 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
2416   EVT VT = Op.getValueType();
2417   assert(VT.isInteger() && "Invalid VT!");
2418   unsigned VTBits = VT.getScalarType().getSizeInBits();
2419   unsigned Tmp, Tmp2;
2420   unsigned FirstAnswer = 1;
2421 
2422   if (Depth == 6)
2423     return 1;  // Limit search depth.
2424 
2425   switch (Op.getOpcode()) {
2426   default: break;
2427   case ISD::AssertSext:
2428     Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2429     return VTBits-Tmp+1;
2430   case ISD::AssertZext:
2431     Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2432     return VTBits-Tmp;
2433 
2434   case ISD::Constant: {
2435     const APInt &Val = cast<ConstantSDNode>(Op)->getAPIntValue();
2436     return Val.getNumSignBits();
2437   }
2438 
2439   case ISD::SIGN_EXTEND:
2440     Tmp =
2441         VTBits-Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
2442     return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp;
2443 
2444   case ISD::SIGN_EXTEND_INREG:
2445     // Max of the input and what this extends.
2446     Tmp =
2447       cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarType().getSizeInBits();
2448     Tmp = VTBits-Tmp+1;
2449 
2450     Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2451     return std::max(Tmp, Tmp2);
2452 
2453   case ISD::SRA:
2454     Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2455     // SRA X, C   -> adds C sign bits.
2456     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2457       Tmp += C->getZExtValue();
2458       if (Tmp > VTBits) Tmp = VTBits;
2459     }
2460     return Tmp;
2461   case ISD::SHL:
2462     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2463       // shl destroys sign bits.
2464       Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2465       if (C->getZExtValue() >= VTBits ||      // Bad shift.
2466           C->getZExtValue() >= Tmp) break;    // Shifted all sign bits out.
2467       return Tmp - C->getZExtValue();
2468     }
2469     break;
2470   case ISD::AND:
2471   case ISD::OR:
2472   case ISD::XOR:    // NOT is handled here.
2473     // Logical binary ops preserve the number of sign bits at the worst.
2474     Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2475     if (Tmp != 1) {
2476       Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2477       FirstAnswer = std::min(Tmp, Tmp2);
2478       // We computed what we know about the sign bits as our first
2479       // answer. Now proceed to the generic code that uses
2480       // computeKnownBits, and pick whichever answer is better.
2481     }
2482     break;
2483 
2484   case ISD::SELECT:
2485     Tmp = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2486     if (Tmp == 1) return 1;  // Early out.
2487     Tmp2 = ComputeNumSignBits(Op.getOperand(2), Depth+1);
2488     return std::min(Tmp, Tmp2);
2489 
2490   case ISD::SADDO:
2491   case ISD::UADDO:
2492   case ISD::SSUBO:
2493   case ISD::USUBO:
2494   case ISD::SMULO:
2495   case ISD::UMULO:
2496     if (Op.getResNo() != 1)
2497       break;
2498     // The boolean result conforms to getBooleanContents.  Fall through.
2499     // If setcc returns 0/-1, all bits are sign bits.
2500     // We know that we have an integer-based boolean since these operations
2501     // are only available for integer.
2502     if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
2503         TargetLowering::ZeroOrNegativeOneBooleanContent)
2504       return VTBits;
2505     break;
2506   case ISD::SETCC:
2507     // If setcc returns 0/-1, all bits are sign bits.
2508     if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
2509         TargetLowering::ZeroOrNegativeOneBooleanContent)
2510       return VTBits;
2511     break;
2512   case ISD::ROTL:
2513   case ISD::ROTR:
2514     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2515       unsigned RotAmt = C->getZExtValue() & (VTBits-1);
2516 
2517       // Handle rotate right by N like a rotate left by 32-N.
2518       if (Op.getOpcode() == ISD::ROTR)
2519         RotAmt = (VTBits-RotAmt) & (VTBits-1);
2520 
2521       // If we aren't rotating out all of the known-in sign bits, return the
2522       // number that are left.  This handles rotl(sext(x), 1) for example.
2523       Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2524       if (Tmp > RotAmt+1) return Tmp-RotAmt;
2525     }
2526     break;
2527   case ISD::ADD:
2528     // Add can have at most one carry bit.  Thus we know that the output
2529     // is, at worst, one more bit than the inputs.
2530     Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2531     if (Tmp == 1) return 1;  // Early out.
2532 
2533     // Special case decrementing a value (ADD X, -1):
2534     if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
2535       if (CRHS->isAllOnesValue()) {
2536         APInt KnownZero, KnownOne;
2537         computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2538 
2539         // If the input is known to be 0 or 1, the output is 0/-1, which is all
2540         // sign bits set.
2541         if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
2542           return VTBits;
2543 
2544         // If we are subtracting one from a positive number, there is no carry
2545         // out of the result.
2546         if (KnownZero.isNegative())
2547           return Tmp;
2548       }
2549 
2550     Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2551     if (Tmp2 == 1) return 1;
2552     return std::min(Tmp, Tmp2)-1;
2553 
2554   case ISD::SUB:
2555     Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2556     if (Tmp2 == 1) return 1;
2557 
2558     // Handle NEG.
2559     if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0)))
2560       if (CLHS->isNullValue()) {
2561         APInt KnownZero, KnownOne;
2562         computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
2563         // If the input is known to be 0 or 1, the output is 0/-1, which is all
2564         // sign bits set.
2565         if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
2566           return VTBits;
2567 
2568         // If the input is known to be positive (the sign bit is known clear),
2569         // the output of the NEG has the same number of sign bits as the input.
2570         if (KnownZero.isNegative())
2571           return Tmp2;
2572 
2573         // Otherwise, we treat this like a SUB.
2574       }
2575 
2576     // Sub can have at most one carry bit.  Thus we know that the output
2577     // is, at worst, one more bit than the inputs.
2578     Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2579     if (Tmp == 1) return 1;  // Early out.
2580     return std::min(Tmp, Tmp2)-1;
2581   case ISD::TRUNCATE:
2582     // FIXME: it's tricky to do anything useful for this, but it is an important
2583     // case for targets like X86.
2584     break;
2585   case ISD::EXTRACT_ELEMENT: {
2586     const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2587     const int BitWidth = Op.getValueType().getSizeInBits();
2588     const int Items =
2589       Op.getOperand(0).getValueType().getSizeInBits() / BitWidth;
2590 
2591     // Get reverse index (starting from 1), Op1 value indexes elements from
2592     // little end. Sign starts at big end.
2593     const int rIndex = Items - 1 -
2594       cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
2595 
2596     // If the sign portion ends in our element the substraction gives correct
2597     // result. Otherwise it gives either negative or > bitwidth result
2598     return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0);
2599   }
2600   }
2601 
2602   // If we are looking at the loaded value of the SDNode.
2603   if (Op.getResNo() == 0) {
2604     // Handle LOADX separately here. EXTLOAD case will fallthrough.
2605     if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
2606       unsigned ExtType = LD->getExtensionType();
2607       switch (ExtType) {
2608         default: break;
2609         case ISD::SEXTLOAD:    // '17' bits known
2610           Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
2611           return VTBits-Tmp+1;
2612         case ISD::ZEXTLOAD:    // '16' bits known
2613           Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
2614           return VTBits-Tmp;
2615       }
2616     }
2617   }
2618 
2619   // Allow the target to implement this method for its nodes.
2620   if (Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2621       Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2622       Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2623       Op.getOpcode() == ISD::INTRINSIC_VOID) {
2624     unsigned NumBits = TLI->ComputeNumSignBitsForTargetNode(Op, *this, Depth);
2625     if (NumBits > 1) FirstAnswer = std::max(FirstAnswer, NumBits);
2626   }
2627 
2628   // Finally, if we can prove that the top bits of the result are 0's or 1's,
2629   // use this information.
2630   APInt KnownZero, KnownOne;
2631   computeKnownBits(Op, KnownZero, KnownOne, Depth);
2632 
2633   APInt Mask;
2634   if (KnownZero.isNegative()) {        // sign bit is 0
2635     Mask = KnownZero;
2636   } else if (KnownOne.isNegative()) {  // sign bit is 1;
2637     Mask = KnownOne;
2638   } else {
2639     // Nothing known.
2640     return FirstAnswer;
2641   }
2642 
2643   // Okay, we know that the sign bit in Mask is set.  Use CLZ to determine
2644   // the number of identical bits in the top of the input value.
2645   Mask = ~Mask;
2646   Mask <<= Mask.getBitWidth()-VTBits;
2647   // Return # leading zeros.  We use 'min' here in case Val was zero before
2648   // shifting.  We don't want to return '64' as for an i32 "0".
2649   return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros()));
2650 }
2651 
2652 /// isBaseWithConstantOffset - Return true if the specified operand is an
2653 /// ISD::ADD with a ConstantSDNode on the right-hand side, or if it is an
2654 /// ISD::OR with a ConstantSDNode that is guaranteed to have the same
2655 /// semantics as an ADD.  This handles the equivalence:
2656 ///     X|Cst == X+Cst iff X&Cst = 0.
isBaseWithConstantOffset(SDValue Op) const2657 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
2658   if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
2659       !isa<ConstantSDNode>(Op.getOperand(1)))
2660     return false;
2661 
2662   if (Op.getOpcode() == ISD::OR &&
2663       !MaskedValueIsZero(Op.getOperand(0),
2664                      cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue()))
2665     return false;
2666 
2667   return true;
2668 }
2669 
2670 
isKnownNeverNaN(SDValue Op) const2671 bool SelectionDAG::isKnownNeverNaN(SDValue Op) const {
2672   // If we're told that NaNs won't happen, assume they won't.
2673   if (getTarget().Options.NoNaNsFPMath)
2674     return true;
2675 
2676   // If the value is a constant, we can obviously see if it is a NaN or not.
2677   if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
2678     return !C->getValueAPF().isNaN();
2679 
2680   // TODO: Recognize more cases here.
2681 
2682   return false;
2683 }
2684 
isKnownNeverZero(SDValue Op) const2685 bool SelectionDAG::isKnownNeverZero(SDValue Op) const {
2686   // If the value is a constant, we can obviously see if it is a zero or not.
2687   if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
2688     return !C->isZero();
2689 
2690   // TODO: Recognize more cases here.
2691   switch (Op.getOpcode()) {
2692   default: break;
2693   case ISD::OR:
2694     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
2695       return !C->isNullValue();
2696     break;
2697   }
2698 
2699   return false;
2700 }
2701 
isEqualTo(SDValue A,SDValue B) const2702 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const {
2703   // Check the obvious case.
2704   if (A == B) return true;
2705 
2706   // For for negative and positive zero.
2707   if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A))
2708     if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B))
2709       if (CA->isZero() && CB->isZero()) return true;
2710 
2711   // Otherwise they may not be equal.
2712   return false;
2713 }
2714 
2715 /// getNode - Gets or creates the specified node.
2716 ///
getNode(unsigned Opcode,SDLoc DL,EVT VT)2717 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT) {
2718   FoldingSetNodeID ID;
2719   AddNodeIDNode(ID, Opcode, getVTList(VT), None);
2720   void *IP = nullptr;
2721   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
2722     return SDValue(E, 0);
2723 
2724   SDNode *N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(),
2725                                          DL.getDebugLoc(), getVTList(VT));
2726   CSEMap.InsertNode(N, IP);
2727 
2728   InsertNode(N);
2729   return SDValue(N, 0);
2730 }
2731 
getNode(unsigned Opcode,SDLoc DL,EVT VT,SDValue Operand)2732 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
2733                               EVT VT, SDValue Operand) {
2734   // Constant fold unary operations with an integer constant operand. Even
2735   // opaque constant will be folded, because the folding of unary operations
2736   // doesn't create new constants with different values. Nevertheless, the
2737   // opaque flag is preserved during folding to prevent future folding with
2738   // other constants.
2739   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand.getNode())) {
2740     const APInt &Val = C->getAPIntValue();
2741     switch (Opcode) {
2742     default: break;
2743     case ISD::SIGN_EXTEND:
2744       return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), VT,
2745                          C->isTargetOpcode(), C->isOpaque());
2746     case ISD::ANY_EXTEND:
2747     case ISD::ZERO_EXTEND:
2748     case ISD::TRUNCATE:
2749       return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), VT,
2750                          C->isTargetOpcode(), C->isOpaque());
2751     case ISD::UINT_TO_FP:
2752     case ISD::SINT_TO_FP: {
2753       APFloat apf(EVTToAPFloatSemantics(VT),
2754                   APInt::getNullValue(VT.getSizeInBits()));
2755       (void)apf.convertFromAPInt(Val,
2756                                  Opcode==ISD::SINT_TO_FP,
2757                                  APFloat::rmNearestTiesToEven);
2758       return getConstantFP(apf, VT);
2759     }
2760     case ISD::BITCAST:
2761       if (VT == MVT::f16 && C->getValueType(0) == MVT::i16)
2762         return getConstantFP(APFloat(APFloat::IEEEhalf, Val), VT);
2763       if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
2764         return getConstantFP(APFloat(APFloat::IEEEsingle, Val), VT);
2765       else if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
2766         return getConstantFP(APFloat(APFloat::IEEEdouble, Val), VT);
2767       break;
2768     case ISD::BSWAP:
2769       return getConstant(Val.byteSwap(), VT, C->isTargetOpcode(),
2770                          C->isOpaque());
2771     case ISD::CTPOP:
2772       return getConstant(Val.countPopulation(), VT, C->isTargetOpcode(),
2773                          C->isOpaque());
2774     case ISD::CTLZ:
2775     case ISD::CTLZ_ZERO_UNDEF:
2776       return getConstant(Val.countLeadingZeros(), VT, C->isTargetOpcode(),
2777                          C->isOpaque());
2778     case ISD::CTTZ:
2779     case ISD::CTTZ_ZERO_UNDEF:
2780       return getConstant(Val.countTrailingZeros(), VT, C->isTargetOpcode(),
2781                          C->isOpaque());
2782     }
2783   }
2784 
2785   // Constant fold unary operations with a floating point constant operand.
2786   if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand.getNode())) {
2787     APFloat V = C->getValueAPF();    // make copy
2788     switch (Opcode) {
2789     case ISD::FNEG:
2790       V.changeSign();
2791       return getConstantFP(V, VT);
2792     case ISD::FABS:
2793       V.clearSign();
2794       return getConstantFP(V, VT);
2795     case ISD::FCEIL: {
2796       APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
2797       if (fs == APFloat::opOK || fs == APFloat::opInexact)
2798         return getConstantFP(V, VT);
2799       break;
2800     }
2801     case ISD::FTRUNC: {
2802       APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
2803       if (fs == APFloat::opOK || fs == APFloat::opInexact)
2804         return getConstantFP(V, VT);
2805       break;
2806     }
2807     case ISD::FFLOOR: {
2808       APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
2809       if (fs == APFloat::opOK || fs == APFloat::opInexact)
2810         return getConstantFP(V, VT);
2811       break;
2812     }
2813     case ISD::FP_EXTEND: {
2814       bool ignored;
2815       // This can return overflow, underflow, or inexact; we don't care.
2816       // FIXME need to be more flexible about rounding mode.
2817       (void)V.convert(EVTToAPFloatSemantics(VT),
2818                       APFloat::rmNearestTiesToEven, &ignored);
2819       return getConstantFP(V, VT);
2820     }
2821     case ISD::FP_TO_SINT:
2822     case ISD::FP_TO_UINT: {
2823       integerPart x[2];
2824       bool ignored;
2825       static_assert(integerPartWidth >= 64, "APFloat parts too small!");
2826       // FIXME need to be more flexible about rounding mode.
2827       APFloat::opStatus s = V.convertToInteger(x, VT.getSizeInBits(),
2828                             Opcode==ISD::FP_TO_SINT,
2829                             APFloat::rmTowardZero, &ignored);
2830       if (s==APFloat::opInvalidOp)     // inexact is OK, in fact usual
2831         break;
2832       APInt api(VT.getSizeInBits(), x);
2833       return getConstant(api, VT);
2834     }
2835     case ISD::BITCAST:
2836       if (VT == MVT::i16 && C->getValueType(0) == MVT::f16)
2837         return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), VT);
2838       else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
2839         return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), VT);
2840       else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
2841         return getConstant(V.bitcastToAPInt().getZExtValue(), VT);
2842       break;
2843     }
2844   }
2845 
2846   // Constant fold unary operations with a vector integer or float operand.
2847   if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand.getNode())) {
2848     if (BV->isConstant()) {
2849       switch (Opcode) {
2850       default:
2851         // FIXME: Entirely reasonable to perform folding of other unary
2852         // operations here as the need arises.
2853         break;
2854       case ISD::TRUNCATE:
2855         // Constant build vector truncation can be done with the original scalar
2856         // operands but with a new build vector with the truncated value type.
2857         return getNode(ISD::BUILD_VECTOR, DL, VT, BV->ops());
2858       case ISD::FNEG:
2859       case ISD::FABS:
2860       case ISD::FCEIL:
2861       case ISD::FTRUNC:
2862       case ISD::FFLOOR:
2863       case ISD::FP_EXTEND:
2864       case ISD::UINT_TO_FP:
2865       case ISD::SINT_TO_FP: {
2866         // Let the above scalar folding handle the folding of each element.
2867         SmallVector<SDValue, 8> Ops;
2868         for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
2869           SDValue OpN = BV->getOperand(i);
2870           OpN = getNode(Opcode, DL, VT.getVectorElementType(), OpN);
2871           if (OpN.getOpcode() != ISD::UNDEF &&
2872               OpN.getOpcode() != ISD::Constant &&
2873               OpN.getOpcode() != ISD::ConstantFP)
2874             break;
2875           Ops.push_back(OpN);
2876         }
2877         if (Ops.size() == VT.getVectorNumElements())
2878           return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
2879         break;
2880       }
2881       }
2882     }
2883   }
2884 
2885   unsigned OpOpcode = Operand.getNode()->getOpcode();
2886   switch (Opcode) {
2887   case ISD::TokenFactor:
2888   case ISD::MERGE_VALUES:
2889   case ISD::CONCAT_VECTORS:
2890     return Operand;         // Factor, merge or concat of one node?  No need.
2891   case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
2892   case ISD::FP_EXTEND:
2893     assert(VT.isFloatingPoint() &&
2894            Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
2895     if (Operand.getValueType() == VT) return Operand;  // noop conversion.
2896     assert((!VT.isVector() ||
2897             VT.getVectorNumElements() ==
2898             Operand.getValueType().getVectorNumElements()) &&
2899            "Vector element count mismatch!");
2900     if (Operand.getOpcode() == ISD::UNDEF)
2901       return getUNDEF(VT);
2902     break;
2903   case ISD::SIGN_EXTEND:
2904     assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2905            "Invalid SIGN_EXTEND!");
2906     if (Operand.getValueType() == VT) return Operand;   // noop extension
2907     assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2908            "Invalid sext node, dst < src!");
2909     assert((!VT.isVector() ||
2910             VT.getVectorNumElements() ==
2911             Operand.getValueType().getVectorNumElements()) &&
2912            "Vector element count mismatch!");
2913     if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
2914       return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2915     else if (OpOpcode == ISD::UNDEF)
2916       // sext(undef) = 0, because the top bits will all be the same.
2917       return getConstant(0, VT);
2918     break;
2919   case ISD::ZERO_EXTEND:
2920     assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2921            "Invalid ZERO_EXTEND!");
2922     if (Operand.getValueType() == VT) return Operand;   // noop extension
2923     assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2924            "Invalid zext node, dst < src!");
2925     assert((!VT.isVector() ||
2926             VT.getVectorNumElements() ==
2927             Operand.getValueType().getVectorNumElements()) &&
2928            "Vector element count mismatch!");
2929     if (OpOpcode == ISD::ZERO_EXTEND)   // (zext (zext x)) -> (zext x)
2930       return getNode(ISD::ZERO_EXTEND, DL, VT,
2931                      Operand.getNode()->getOperand(0));
2932     else if (OpOpcode == ISD::UNDEF)
2933       // zext(undef) = 0, because the top bits will be zero.
2934       return getConstant(0, VT);
2935     break;
2936   case ISD::ANY_EXTEND:
2937     assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2938            "Invalid ANY_EXTEND!");
2939     if (Operand.getValueType() == VT) return Operand;   // noop extension
2940     assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2941            "Invalid anyext node, dst < src!");
2942     assert((!VT.isVector() ||
2943             VT.getVectorNumElements() ==
2944             Operand.getValueType().getVectorNumElements()) &&
2945            "Vector element count mismatch!");
2946 
2947     if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
2948         OpOpcode == ISD::ANY_EXTEND)
2949       // (ext (zext x)) -> (zext x)  and  (ext (sext x)) -> (sext x)
2950       return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2951     else if (OpOpcode == ISD::UNDEF)
2952       return getUNDEF(VT);
2953 
2954     // (ext (trunx x)) -> x
2955     if (OpOpcode == ISD::TRUNCATE) {
2956       SDValue OpOp = Operand.getNode()->getOperand(0);
2957       if (OpOp.getValueType() == VT)
2958         return OpOp;
2959     }
2960     break;
2961   case ISD::TRUNCATE:
2962     assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2963            "Invalid TRUNCATE!");
2964     if (Operand.getValueType() == VT) return Operand;   // noop truncate
2965     assert(Operand.getValueType().getScalarType().bitsGT(VT.getScalarType()) &&
2966            "Invalid truncate node, src < dst!");
2967     assert((!VT.isVector() ||
2968             VT.getVectorNumElements() ==
2969             Operand.getValueType().getVectorNumElements()) &&
2970            "Vector element count mismatch!");
2971     if (OpOpcode == ISD::TRUNCATE)
2972       return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
2973     if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
2974         OpOpcode == ISD::ANY_EXTEND) {
2975       // If the source is smaller than the dest, we still need an extend.
2976       if (Operand.getNode()->getOperand(0).getValueType().getScalarType()
2977             .bitsLT(VT.getScalarType()))
2978         return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2979       if (Operand.getNode()->getOperand(0).getValueType().bitsGT(VT))
2980         return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
2981       return Operand.getNode()->getOperand(0);
2982     }
2983     if (OpOpcode == ISD::UNDEF)
2984       return getUNDEF(VT);
2985     break;
2986   case ISD::BITCAST:
2987     // Basic sanity checking.
2988     assert(VT.getSizeInBits() == Operand.getValueType().getSizeInBits()
2989            && "Cannot BITCAST between types of different sizes!");
2990     if (VT == Operand.getValueType()) return Operand;  // noop conversion.
2991     if (OpOpcode == ISD::BITCAST)  // bitconv(bitconv(x)) -> bitconv(x)
2992       return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
2993     if (OpOpcode == ISD::UNDEF)
2994       return getUNDEF(VT);
2995     break;
2996   case ISD::SCALAR_TO_VECTOR:
2997     assert(VT.isVector() && !Operand.getValueType().isVector() &&
2998            (VT.getVectorElementType() == Operand.getValueType() ||
2999             (VT.getVectorElementType().isInteger() &&
3000              Operand.getValueType().isInteger() &&
3001              VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&
3002            "Illegal SCALAR_TO_VECTOR node!");
3003     if (OpOpcode == ISD::UNDEF)
3004       return getUNDEF(VT);
3005     // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
3006     if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
3007         isa<ConstantSDNode>(Operand.getOperand(1)) &&
3008         Operand.getConstantOperandVal(1) == 0 &&
3009         Operand.getOperand(0).getValueType() == VT)
3010       return Operand.getOperand(0);
3011     break;
3012   case ISD::FNEG:
3013     // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0
3014     if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB)
3015       return getNode(ISD::FSUB, DL, VT, Operand.getNode()->getOperand(1),
3016                      Operand.getNode()->getOperand(0));
3017     if (OpOpcode == ISD::FNEG)  // --X -> X
3018       return Operand.getNode()->getOperand(0);
3019     break;
3020   case ISD::FABS:
3021     if (OpOpcode == ISD::FNEG)  // abs(-X) -> abs(X)
3022       return getNode(ISD::FABS, DL, VT, Operand.getNode()->getOperand(0));
3023     break;
3024   }
3025 
3026   SDNode *N;
3027   SDVTList VTs = getVTList(VT);
3028   if (VT != MVT::Glue) { // Don't CSE flag producing nodes
3029     FoldingSetNodeID ID;
3030     SDValue Ops[1] = { Operand };
3031     AddNodeIDNode(ID, Opcode, VTs, Ops);
3032     void *IP = nullptr;
3033     if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3034       return SDValue(E, 0);
3035 
3036     N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
3037                                         DL.getDebugLoc(), VTs, Operand);
3038     CSEMap.InsertNode(N, IP);
3039   } else {
3040     N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
3041                                         DL.getDebugLoc(), VTs, Operand);
3042   }
3043 
3044   InsertNode(N);
3045   return SDValue(N, 0);
3046 }
3047 
FoldConstantArithmetic(unsigned Opcode,EVT VT,SDNode * Cst1,SDNode * Cst2)3048 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, EVT VT,
3049                                              SDNode *Cst1, SDNode *Cst2) {
3050   // If the opcode is a target-specific ISD node, there's nothing we can
3051   // do here and the operand rules may not line up with the below, so
3052   // bail early.
3053   if (Opcode >= ISD::BUILTIN_OP_END)
3054     return SDValue();
3055 
3056   SmallVector<std::pair<ConstantSDNode *, ConstantSDNode *>, 4> Inputs;
3057   SmallVector<SDValue, 4> Outputs;
3058   EVT SVT = VT.getScalarType();
3059 
3060   ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1);
3061   ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2);
3062   if (Scalar1 && Scalar2 && (Scalar1->isOpaque() || Scalar2->isOpaque()))
3063     return SDValue();
3064 
3065   if (Scalar1 && Scalar2)
3066     // Scalar instruction.
3067     Inputs.push_back(std::make_pair(Scalar1, Scalar2));
3068   else {
3069     // For vectors extract each constant element into Inputs so we can constant
3070     // fold them individually.
3071     BuildVectorSDNode *BV1 = dyn_cast<BuildVectorSDNode>(Cst1);
3072     BuildVectorSDNode *BV2 = dyn_cast<BuildVectorSDNode>(Cst2);
3073     if (!BV1 || !BV2)
3074       return SDValue();
3075 
3076     assert(BV1->getNumOperands() == BV2->getNumOperands() && "Out of sync!");
3077 
3078     for (unsigned I = 0, E = BV1->getNumOperands(); I != E; ++I) {
3079       ConstantSDNode *V1 = dyn_cast<ConstantSDNode>(BV1->getOperand(I));
3080       ConstantSDNode *V2 = dyn_cast<ConstantSDNode>(BV2->getOperand(I));
3081       if (!V1 || !V2) // Not a constant, bail.
3082         return SDValue();
3083 
3084       if (V1->isOpaque() || V2->isOpaque())
3085         return SDValue();
3086 
3087       // Avoid BUILD_VECTOR nodes that perform implicit truncation.
3088       // FIXME: This is valid and could be handled by truncating the APInts.
3089       if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT)
3090         return SDValue();
3091 
3092       Inputs.push_back(std::make_pair(V1, V2));
3093     }
3094   }
3095 
3096   // We have a number of constant values, constant fold them element by element.
3097   for (unsigned I = 0, E = Inputs.size(); I != E; ++I) {
3098     const APInt &C1 = Inputs[I].first->getAPIntValue();
3099     const APInt &C2 = Inputs[I].second->getAPIntValue();
3100 
3101     switch (Opcode) {
3102     case ISD::ADD:
3103       Outputs.push_back(getConstant(C1 + C2, SVT));
3104       break;
3105     case ISD::SUB:
3106       Outputs.push_back(getConstant(C1 - C2, SVT));
3107       break;
3108     case ISD::MUL:
3109       Outputs.push_back(getConstant(C1 * C2, SVT));
3110       break;
3111     case ISD::UDIV:
3112       if (!C2.getBoolValue())
3113         return SDValue();
3114       Outputs.push_back(getConstant(C1.udiv(C2), SVT));
3115       break;
3116     case ISD::UREM:
3117       if (!C2.getBoolValue())
3118         return SDValue();
3119       Outputs.push_back(getConstant(C1.urem(C2), SVT));
3120       break;
3121     case ISD::SDIV:
3122       if (!C2.getBoolValue())
3123         return SDValue();
3124       Outputs.push_back(getConstant(C1.sdiv(C2), SVT));
3125       break;
3126     case ISD::SREM:
3127       if (!C2.getBoolValue())
3128         return SDValue();
3129       Outputs.push_back(getConstant(C1.srem(C2), SVT));
3130       break;
3131     case ISD::AND:
3132       Outputs.push_back(getConstant(C1 & C2, SVT));
3133       break;
3134     case ISD::OR:
3135       Outputs.push_back(getConstant(C1 | C2, SVT));
3136       break;
3137     case ISD::XOR:
3138       Outputs.push_back(getConstant(C1 ^ C2, SVT));
3139       break;
3140     case ISD::SHL:
3141       Outputs.push_back(getConstant(C1 << C2, SVT));
3142       break;
3143     case ISD::SRL:
3144       Outputs.push_back(getConstant(C1.lshr(C2), SVT));
3145       break;
3146     case ISD::SRA:
3147       Outputs.push_back(getConstant(C1.ashr(C2), SVT));
3148       break;
3149     case ISD::ROTL:
3150       Outputs.push_back(getConstant(C1.rotl(C2), SVT));
3151       break;
3152     case ISD::ROTR:
3153       Outputs.push_back(getConstant(C1.rotr(C2), SVT));
3154       break;
3155     default:
3156       return SDValue();
3157     }
3158   }
3159 
3160   assert((Scalar1 && Scalar2) || (VT.getVectorNumElements() == Outputs.size() &&
3161                                   "Expected a scalar or vector!"));
3162 
3163   // Handle the scalar case first.
3164   if (!VT.isVector())
3165     return Outputs.back();
3166 
3167   // We may have a vector type but a scalar result. Create a splat.
3168   Outputs.resize(VT.getVectorNumElements(), Outputs.back());
3169 
3170   // Build a big vector out of the scalar elements we generated.
3171   return getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Outputs);
3172 }
3173 
getNode(unsigned Opcode,SDLoc DL,EVT VT,SDValue N1,SDValue N2,bool nuw,bool nsw,bool exact)3174 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1,
3175                               SDValue N2, bool nuw, bool nsw, bool exact) {
3176   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
3177   ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
3178   switch (Opcode) {
3179   default: break;
3180   case ISD::TokenFactor:
3181     assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
3182            N2.getValueType() == MVT::Other && "Invalid token factor!");
3183     // Fold trivial token factors.
3184     if (N1.getOpcode() == ISD::EntryToken) return N2;
3185     if (N2.getOpcode() == ISD::EntryToken) return N1;
3186     if (N1 == N2) return N1;
3187     break;
3188   case ISD::CONCAT_VECTORS:
3189     // Concat of UNDEFs is UNDEF.
3190     if (N1.getOpcode() == ISD::UNDEF &&
3191         N2.getOpcode() == ISD::UNDEF)
3192       return getUNDEF(VT);
3193 
3194     // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
3195     // one big BUILD_VECTOR.
3196     if (N1.getOpcode() == ISD::BUILD_VECTOR &&
3197         N2.getOpcode() == ISD::BUILD_VECTOR) {
3198       SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
3199                                     N1.getNode()->op_end());
3200       Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
3201       return getNode(ISD::BUILD_VECTOR, DL, VT, Elts);
3202     }
3203     break;
3204   case ISD::AND:
3205     assert(VT.isInteger() && "This operator does not apply to FP types!");
3206     assert(N1.getValueType() == N2.getValueType() &&
3207            N1.getValueType() == VT && "Binary operator types must match!");
3208     // (X & 0) -> 0.  This commonly occurs when legalizing i64 values, so it's
3209     // worth handling here.
3210     if (N2C && N2C->isNullValue())
3211       return N2;
3212     if (N2C && N2C->isAllOnesValue())  // X & -1 -> X
3213       return N1;
3214     break;
3215   case ISD::OR:
3216   case ISD::XOR:
3217   case ISD::ADD:
3218   case ISD::SUB:
3219     assert(VT.isInteger() && "This operator does not apply to FP types!");
3220     assert(N1.getValueType() == N2.getValueType() &&
3221            N1.getValueType() == VT && "Binary operator types must match!");
3222     // (X ^|+- 0) -> X.  This commonly occurs when legalizing i64 values, so
3223     // it's worth handling here.
3224     if (N2C && N2C->isNullValue())
3225       return N1;
3226     break;
3227   case ISD::UDIV:
3228   case ISD::UREM:
3229   case ISD::MULHU:
3230   case ISD::MULHS:
3231   case ISD::MUL:
3232   case ISD::SDIV:
3233   case ISD::SREM:
3234     assert(VT.isInteger() && "This operator does not apply to FP types!");
3235     assert(N1.getValueType() == N2.getValueType() &&
3236            N1.getValueType() == VT && "Binary operator types must match!");
3237     break;
3238   case ISD::FADD:
3239   case ISD::FSUB:
3240   case ISD::FMUL:
3241   case ISD::FDIV:
3242   case ISD::FREM:
3243     if (getTarget().Options.UnsafeFPMath) {
3244       if (Opcode == ISD::FADD) {
3245         // 0+x --> x
3246         if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1))
3247           if (CFP->getValueAPF().isZero())
3248             return N2;
3249         // x+0 --> x
3250         if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
3251           if (CFP->getValueAPF().isZero())
3252             return N1;
3253       } else if (Opcode == ISD::FSUB) {
3254         // x-0 --> x
3255         if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
3256           if (CFP->getValueAPF().isZero())
3257             return N1;
3258       } else if (Opcode == ISD::FMUL) {
3259         ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1);
3260         SDValue V = N2;
3261 
3262         // If the first operand isn't the constant, try the second
3263         if (!CFP) {
3264           CFP = dyn_cast<ConstantFPSDNode>(N2);
3265           V = N1;
3266         }
3267 
3268         if (CFP) {
3269           // 0*x --> 0
3270           if (CFP->isZero())
3271             return SDValue(CFP,0);
3272           // 1*x --> x
3273           if (CFP->isExactlyValue(1.0))
3274             return V;
3275         }
3276       }
3277     }
3278     assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
3279     assert(N1.getValueType() == N2.getValueType() &&
3280            N1.getValueType() == VT && "Binary operator types must match!");
3281     break;
3282   case ISD::FCOPYSIGN:   // N1 and result must match.  N1/N2 need not match.
3283     assert(N1.getValueType() == VT &&
3284            N1.getValueType().isFloatingPoint() &&
3285            N2.getValueType().isFloatingPoint() &&
3286            "Invalid FCOPYSIGN!");
3287     break;
3288   case ISD::SHL:
3289   case ISD::SRA:
3290   case ISD::SRL:
3291   case ISD::ROTL:
3292   case ISD::ROTR:
3293     assert(VT == N1.getValueType() &&
3294            "Shift operators return type must be the same as their first arg");
3295     assert(VT.isInteger() && N2.getValueType().isInteger() &&
3296            "Shifts only work on integers");
3297     assert((!VT.isVector() || VT == N2.getValueType()) &&
3298            "Vector shift amounts must be in the same as their first arg");
3299     // Verify that the shift amount VT is bit enough to hold valid shift
3300     // amounts.  This catches things like trying to shift an i1024 value by an
3301     // i8, which is easy to fall into in generic code that uses
3302     // TLI.getShiftAmount().
3303     assert(N2.getValueType().getSizeInBits() >=
3304                    Log2_32_Ceil(N1.getValueType().getSizeInBits()) &&
3305            "Invalid use of small shift amount with oversized value!");
3306 
3307     // Always fold shifts of i1 values so the code generator doesn't need to
3308     // handle them.  Since we know the size of the shift has to be less than the
3309     // size of the value, the shift/rotate count is guaranteed to be zero.
3310     if (VT == MVT::i1)
3311       return N1;
3312     if (N2C && N2C->isNullValue())
3313       return N1;
3314     break;
3315   case ISD::FP_ROUND_INREG: {
3316     EVT EVT = cast<VTSDNode>(N2)->getVT();
3317     assert(VT == N1.getValueType() && "Not an inreg round!");
3318     assert(VT.isFloatingPoint() && EVT.isFloatingPoint() &&
3319            "Cannot FP_ROUND_INREG integer types");
3320     assert(EVT.isVector() == VT.isVector() &&
3321            "FP_ROUND_INREG type should be vector iff the operand "
3322            "type is vector!");
3323     assert((!EVT.isVector() ||
3324             EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
3325            "Vector element counts must match in FP_ROUND_INREG");
3326     assert(EVT.bitsLE(VT) && "Not rounding down!");
3327     (void)EVT;
3328     if (cast<VTSDNode>(N2)->getVT() == VT) return N1;  // Not actually rounding.
3329     break;
3330   }
3331   case ISD::FP_ROUND:
3332     assert(VT.isFloatingPoint() &&
3333            N1.getValueType().isFloatingPoint() &&
3334            VT.bitsLE(N1.getValueType()) &&
3335            isa<ConstantSDNode>(N2) && "Invalid FP_ROUND!");
3336     if (N1.getValueType() == VT) return N1;  // noop conversion.
3337     break;
3338   case ISD::AssertSext:
3339   case ISD::AssertZext: {
3340     EVT EVT = cast<VTSDNode>(N2)->getVT();
3341     assert(VT == N1.getValueType() && "Not an inreg extend!");
3342     assert(VT.isInteger() && EVT.isInteger() &&
3343            "Cannot *_EXTEND_INREG FP types");
3344     assert(!EVT.isVector() &&
3345            "AssertSExt/AssertZExt type should be the vector element type "
3346            "rather than the vector type!");
3347     assert(EVT.bitsLE(VT) && "Not extending!");
3348     if (VT == EVT) return N1; // noop assertion.
3349     break;
3350   }
3351   case ISD::SIGN_EXTEND_INREG: {
3352     EVT EVT = cast<VTSDNode>(N2)->getVT();
3353     assert(VT == N1.getValueType() && "Not an inreg extend!");
3354     assert(VT.isInteger() && EVT.isInteger() &&
3355            "Cannot *_EXTEND_INREG FP types");
3356     assert(EVT.isVector() == VT.isVector() &&
3357            "SIGN_EXTEND_INREG type should be vector iff the operand "
3358            "type is vector!");
3359     assert((!EVT.isVector() ||
3360             EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
3361            "Vector element counts must match in SIGN_EXTEND_INREG");
3362     assert(EVT.bitsLE(VT) && "Not extending!");
3363     if (EVT == VT) return N1;  // Not actually extending
3364 
3365     if (N1C) {
3366       APInt Val = N1C->getAPIntValue();
3367       unsigned FromBits = EVT.getScalarType().getSizeInBits();
3368       Val <<= Val.getBitWidth()-FromBits;
3369       Val = Val.ashr(Val.getBitWidth()-FromBits);
3370       return getConstant(Val, VT);
3371     }
3372     break;
3373   }
3374   case ISD::EXTRACT_VECTOR_ELT:
3375     // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF.
3376     if (N1.getOpcode() == ISD::UNDEF)
3377       return getUNDEF(VT);
3378 
3379     // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
3380     // expanding copies of large vectors from registers.
3381     if (N2C &&
3382         N1.getOpcode() == ISD::CONCAT_VECTORS &&
3383         N1.getNumOperands() > 0) {
3384       unsigned Factor =
3385         N1.getOperand(0).getValueType().getVectorNumElements();
3386       return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
3387                      N1.getOperand(N2C->getZExtValue() / Factor),
3388                      getConstant(N2C->getZExtValue() % Factor,
3389                                  N2.getValueType()));
3390     }
3391 
3392     // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is
3393     // expanding large vector constants.
3394     if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) {
3395       SDValue Elt = N1.getOperand(N2C->getZExtValue());
3396 
3397       if (VT != Elt.getValueType())
3398         // If the vector element type is not legal, the BUILD_VECTOR operands
3399         // are promoted and implicitly truncated, and the result implicitly
3400         // extended. Make that explicit here.
3401         Elt = getAnyExtOrTrunc(Elt, DL, VT);
3402 
3403       return Elt;
3404     }
3405 
3406     // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
3407     // operations are lowered to scalars.
3408     if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
3409       // If the indices are the same, return the inserted element else
3410       // if the indices are known different, extract the element from
3411       // the original vector.
3412       SDValue N1Op2 = N1.getOperand(2);
3413       ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2.getNode());
3414 
3415       if (N1Op2C && N2C) {
3416         if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
3417           if (VT == N1.getOperand(1).getValueType())
3418             return N1.getOperand(1);
3419           else
3420             return getSExtOrTrunc(N1.getOperand(1), DL, VT);
3421         }
3422 
3423         return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
3424       }
3425     }
3426     break;
3427   case ISD::EXTRACT_ELEMENT:
3428     assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
3429     assert(!N1.getValueType().isVector() && !VT.isVector() &&
3430            (N1.getValueType().isInteger() == VT.isInteger()) &&
3431            N1.getValueType() != VT &&
3432            "Wrong types for EXTRACT_ELEMENT!");
3433 
3434     // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
3435     // 64-bit integers into 32-bit parts.  Instead of building the extract of
3436     // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
3437     if (N1.getOpcode() == ISD::BUILD_PAIR)
3438       return N1.getOperand(N2C->getZExtValue());
3439 
3440     // EXTRACT_ELEMENT of a constant int is also very common.
3441     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
3442       unsigned ElementSize = VT.getSizeInBits();
3443       unsigned Shift = ElementSize * N2C->getZExtValue();
3444       APInt ShiftedVal = C->getAPIntValue().lshr(Shift);
3445       return getConstant(ShiftedVal.trunc(ElementSize), VT);
3446     }
3447     break;
3448   case ISD::EXTRACT_SUBVECTOR: {
3449     SDValue Index = N2;
3450     if (VT.isSimple() && N1.getValueType().isSimple()) {
3451       assert(VT.isVector() && N1.getValueType().isVector() &&
3452              "Extract subvector VTs must be a vectors!");
3453       assert(VT.getVectorElementType() ==
3454              N1.getValueType().getVectorElementType() &&
3455              "Extract subvector VTs must have the same element type!");
3456       assert(VT.getSimpleVT() <= N1.getSimpleValueType() &&
3457              "Extract subvector must be from larger vector to smaller vector!");
3458 
3459       if (isa<ConstantSDNode>(Index.getNode())) {
3460         assert((VT.getVectorNumElements() +
3461                 cast<ConstantSDNode>(Index.getNode())->getZExtValue()
3462                 <= N1.getValueType().getVectorNumElements())
3463                && "Extract subvector overflow!");
3464       }
3465 
3466       // Trivial extraction.
3467       if (VT.getSimpleVT() == N1.getSimpleValueType())
3468         return N1;
3469     }
3470     break;
3471   }
3472   }
3473 
3474   // Perform trivial constant folding.
3475   if (SDValue SV =
3476           FoldConstantArithmetic(Opcode, VT, N1.getNode(), N2.getNode()))
3477     return SV;
3478 
3479   // Canonicalize constant to RHS if commutative.
3480   if (N1C && !N2C && isCommutativeBinOp(Opcode)) {
3481     std::swap(N1C, N2C);
3482     std::swap(N1, N2);
3483   }
3484 
3485   // Constant fold FP operations.
3486   bool HasFPExceptions = TLI->hasFloatingPointExceptions();
3487   ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode());
3488   ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode());
3489   if (N1CFP) {
3490     if (!N2CFP && isCommutativeBinOp(Opcode)) {
3491       // Canonicalize constant to RHS if commutative.
3492       std::swap(N1CFP, N2CFP);
3493       std::swap(N1, N2);
3494     } else if (N2CFP) {
3495       APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF();
3496       APFloat::opStatus s;
3497       switch (Opcode) {
3498       case ISD::FADD:
3499         s = V1.add(V2, APFloat::rmNearestTiesToEven);
3500         if (!HasFPExceptions || s != APFloat::opInvalidOp)
3501           return getConstantFP(V1, VT);
3502         break;
3503       case ISD::FSUB:
3504         s = V1.subtract(V2, APFloat::rmNearestTiesToEven);
3505         if (!HasFPExceptions || s!=APFloat::opInvalidOp)
3506           return getConstantFP(V1, VT);
3507         break;
3508       case ISD::FMUL:
3509         s = V1.multiply(V2, APFloat::rmNearestTiesToEven);
3510         if (!HasFPExceptions || s!=APFloat::opInvalidOp)
3511           return getConstantFP(V1, VT);
3512         break;
3513       case ISD::FDIV:
3514         s = V1.divide(V2, APFloat::rmNearestTiesToEven);
3515         if (!HasFPExceptions || (s!=APFloat::opInvalidOp &&
3516                                  s!=APFloat::opDivByZero)) {
3517           return getConstantFP(V1, VT);
3518         }
3519         break;
3520       case ISD::FREM :
3521         s = V1.mod(V2, APFloat::rmNearestTiesToEven);
3522         if (!HasFPExceptions || (s!=APFloat::opInvalidOp &&
3523                                  s!=APFloat::opDivByZero)) {
3524           return getConstantFP(V1, VT);
3525         }
3526         break;
3527       case ISD::FCOPYSIGN:
3528         V1.copySign(V2);
3529         return getConstantFP(V1, VT);
3530       default: break;
3531       }
3532     }
3533 
3534     if (Opcode == ISD::FP_ROUND) {
3535       APFloat V = N1CFP->getValueAPF();    // make copy
3536       bool ignored;
3537       // This can return overflow, underflow, or inexact; we don't care.
3538       // FIXME need to be more flexible about rounding mode.
3539       (void)V.convert(EVTToAPFloatSemantics(VT),
3540                       APFloat::rmNearestTiesToEven, &ignored);
3541       return getConstantFP(V, VT);
3542     }
3543   }
3544 
3545   // Canonicalize an UNDEF to the RHS, even over a constant.
3546   if (N1.getOpcode() == ISD::UNDEF) {
3547     if (isCommutativeBinOp(Opcode)) {
3548       std::swap(N1, N2);
3549     } else {
3550       switch (Opcode) {
3551       case ISD::FP_ROUND_INREG:
3552       case ISD::SIGN_EXTEND_INREG:
3553       case ISD::SUB:
3554       case ISD::FSUB:
3555       case ISD::FDIV:
3556       case ISD::FREM:
3557       case ISD::SRA:
3558         return N1;     // fold op(undef, arg2) -> undef
3559       case ISD::UDIV:
3560       case ISD::SDIV:
3561       case ISD::UREM:
3562       case ISD::SREM:
3563       case ISD::SRL:
3564       case ISD::SHL:
3565         if (!VT.isVector())
3566           return getConstant(0, VT);    // fold op(undef, arg2) -> 0
3567         // For vectors, we can't easily build an all zero vector, just return
3568         // the LHS.
3569         return N2;
3570       }
3571     }
3572   }
3573 
3574   // Fold a bunch of operators when the RHS is undef.
3575   if (N2.getOpcode() == ISD::UNDEF) {
3576     switch (Opcode) {
3577     case ISD::XOR:
3578       if (N1.getOpcode() == ISD::UNDEF)
3579         // Handle undef ^ undef -> 0 special case. This is a common
3580         // idiom (misuse).
3581         return getConstant(0, VT);
3582       // fallthrough
3583     case ISD::ADD:
3584     case ISD::ADDC:
3585     case ISD::ADDE:
3586     case ISD::SUB:
3587     case ISD::UDIV:
3588     case ISD::SDIV:
3589     case ISD::UREM:
3590     case ISD::SREM:
3591       return N2;       // fold op(arg1, undef) -> undef
3592     case ISD::FADD:
3593     case ISD::FSUB:
3594     case ISD::FMUL:
3595     case ISD::FDIV:
3596     case ISD::FREM:
3597       if (getTarget().Options.UnsafeFPMath)
3598         return N2;
3599       break;
3600     case ISD::MUL:
3601     case ISD::AND:
3602     case ISD::SRL:
3603     case ISD::SHL:
3604       if (!VT.isVector())
3605         return getConstant(0, VT);  // fold op(arg1, undef) -> 0
3606       // For vectors, we can't easily build an all zero vector, just return
3607       // the LHS.
3608       return N1;
3609     case ISD::OR:
3610       if (!VT.isVector())
3611         return getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT);
3612       // For vectors, we can't easily build an all one vector, just return
3613       // the LHS.
3614       return N1;
3615     case ISD::SRA:
3616       return N1;
3617     }
3618   }
3619 
3620   // Memoize this node if possible.
3621   BinarySDNode *N;
3622   SDVTList VTs = getVTList(VT);
3623   const bool BinOpHasFlags = isBinOpWithFlags(Opcode);
3624   if (VT != MVT::Glue) {
3625     SDValue Ops[] = {N1, N2};
3626     FoldingSetNodeID ID;
3627     AddNodeIDNode(ID, Opcode, VTs, Ops);
3628     if (BinOpHasFlags)
3629       AddBinaryNodeIDCustom(ID, Opcode, nuw, nsw, exact);
3630     void *IP = nullptr;
3631     if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3632       return SDValue(E, 0);
3633 
3634     N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, nuw, nsw, exact);
3635 
3636     CSEMap.InsertNode(N, IP);
3637   } else {
3638     N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, nuw, nsw, exact);
3639   }
3640 
3641   InsertNode(N);
3642   return SDValue(N, 0);
3643 }
3644 
getNode(unsigned Opcode,SDLoc DL,EVT VT,SDValue N1,SDValue N2,SDValue N3)3645 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
3646                               SDValue N1, SDValue N2, SDValue N3) {
3647   // Perform various simplifications.
3648   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
3649   switch (Opcode) {
3650   case ISD::FMA: {
3651     ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
3652     ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
3653     ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3);
3654     if (N1CFP && N2CFP && N3CFP) {
3655       APFloat  V1 = N1CFP->getValueAPF();
3656       const APFloat &V2 = N2CFP->getValueAPF();
3657       const APFloat &V3 = N3CFP->getValueAPF();
3658       APFloat::opStatus s =
3659         V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven);
3660       if (!TLI->hasFloatingPointExceptions() || s != APFloat::opInvalidOp)
3661         return getConstantFP(V1, VT);
3662     }
3663     break;
3664   }
3665   case ISD::CONCAT_VECTORS:
3666     // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
3667     // one big BUILD_VECTOR.
3668     if (N1.getOpcode() == ISD::BUILD_VECTOR &&
3669         N2.getOpcode() == ISD::BUILD_VECTOR &&
3670         N3.getOpcode() == ISD::BUILD_VECTOR) {
3671       SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
3672                                     N1.getNode()->op_end());
3673       Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
3674       Elts.append(N3.getNode()->op_begin(), N3.getNode()->op_end());
3675       return getNode(ISD::BUILD_VECTOR, DL, VT, Elts);
3676     }
3677     break;
3678   case ISD::SETCC: {
3679     // Use FoldSetCC to simplify SETCC's.
3680     SDValue Simp = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL);
3681     if (Simp.getNode()) return Simp;
3682     break;
3683   }
3684   case ISD::SELECT:
3685     if (N1C) {
3686      if (N1C->getZExtValue())
3687        return N2;             // select true, X, Y -> X
3688      return N3;             // select false, X, Y -> Y
3689     }
3690 
3691     if (N2 == N3) return N2;   // select C, X, X -> X
3692     break;
3693   case ISD::VECTOR_SHUFFLE:
3694     llvm_unreachable("should use getVectorShuffle constructor!");
3695   case ISD::INSERT_SUBVECTOR: {
3696     SDValue Index = N3;
3697     if (VT.isSimple() && N1.getValueType().isSimple()
3698         && N2.getValueType().isSimple()) {
3699       assert(VT.isVector() && N1.getValueType().isVector() &&
3700              N2.getValueType().isVector() &&
3701              "Insert subvector VTs must be a vectors");
3702       assert(VT == N1.getValueType() &&
3703              "Dest and insert subvector source types must match!");
3704       assert(N2.getSimpleValueType() <= N1.getSimpleValueType() &&
3705              "Insert subvector must be from smaller vector to larger vector!");
3706       if (isa<ConstantSDNode>(Index.getNode())) {
3707         assert((N2.getValueType().getVectorNumElements() +
3708                 cast<ConstantSDNode>(Index.getNode())->getZExtValue()
3709                 <= VT.getVectorNumElements())
3710                && "Insert subvector overflow!");
3711       }
3712 
3713       // Trivial insertion.
3714       if (VT.getSimpleVT() == N2.getSimpleValueType())
3715         return N2;
3716     }
3717     break;
3718   }
3719   case ISD::BITCAST:
3720     // Fold bit_convert nodes from a type to themselves.
3721     if (N1.getValueType() == VT)
3722       return N1;
3723     break;
3724   }
3725 
3726   // Memoize node if it doesn't produce a flag.
3727   SDNode *N;
3728   SDVTList VTs = getVTList(VT);
3729   if (VT != MVT::Glue) {
3730     SDValue Ops[] = { N1, N2, N3 };
3731     FoldingSetNodeID ID;
3732     AddNodeIDNode(ID, Opcode, VTs, Ops);
3733     void *IP = nullptr;
3734     if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3735       return SDValue(E, 0);
3736 
3737     N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
3738                                           DL.getDebugLoc(), VTs, N1, N2, N3);
3739     CSEMap.InsertNode(N, IP);
3740   } else {
3741     N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
3742                                           DL.getDebugLoc(), VTs, N1, N2, N3);
3743   }
3744 
3745   InsertNode(N);
3746   return SDValue(N, 0);
3747 }
3748 
getNode(unsigned Opcode,SDLoc DL,EVT VT,SDValue N1,SDValue N2,SDValue N3,SDValue N4)3749 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
3750                               SDValue N1, SDValue N2, SDValue N3,
3751                               SDValue N4) {
3752   SDValue Ops[] = { N1, N2, N3, N4 };
3753   return getNode(Opcode, DL, VT, Ops);
3754 }
3755 
getNode(unsigned Opcode,SDLoc DL,EVT VT,SDValue N1,SDValue N2,SDValue N3,SDValue N4,SDValue N5)3756 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
3757                               SDValue N1, SDValue N2, SDValue N3,
3758                               SDValue N4, SDValue N5) {
3759   SDValue Ops[] = { N1, N2, N3, N4, N5 };
3760   return getNode(Opcode, DL, VT, Ops);
3761 }
3762 
3763 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all
3764 /// the incoming stack arguments to be loaded from the stack.
getStackArgumentTokenFactor(SDValue Chain)3765 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
3766   SmallVector<SDValue, 8> ArgChains;
3767 
3768   // Include the original chain at the beginning of the list. When this is
3769   // used by target LowerCall hooks, this helps legalize find the
3770   // CALLSEQ_BEGIN node.
3771   ArgChains.push_back(Chain);
3772 
3773   // Add a chain value for each stack argument.
3774   for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(),
3775        UE = getEntryNode().getNode()->use_end(); U != UE; ++U)
3776     if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
3777       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
3778         if (FI->getIndex() < 0)
3779           ArgChains.push_back(SDValue(L, 1));
3780 
3781   // Build a tokenfactor for all the chains.
3782   return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
3783 }
3784 
3785 /// getMemsetValue - Vectorized representation of the memset value
3786 /// operand.
getMemsetValue(SDValue Value,EVT VT,SelectionDAG & DAG,SDLoc dl)3787 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
3788                               SDLoc dl) {
3789   assert(Value.getOpcode() != ISD::UNDEF);
3790 
3791   unsigned NumBits = VT.getScalarType().getSizeInBits();
3792   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
3793     assert(C->getAPIntValue().getBitWidth() == 8);
3794     APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
3795     if (VT.isInteger())
3796       return DAG.getConstant(Val, VT);
3797     return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), VT);
3798   }
3799 
3800   assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?");
3801   EVT IntVT = VT.getScalarType();
3802   if (!IntVT.isInteger())
3803     IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits());
3804 
3805   Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value);
3806   if (NumBits > 8) {
3807     // Use a multiplication with 0x010101... to extend the input to the
3808     // required length.
3809     APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
3810     Value = DAG.getNode(ISD::MUL, dl, IntVT, Value,
3811                         DAG.getConstant(Magic, IntVT));
3812   }
3813 
3814   if (VT != Value.getValueType() && !VT.isInteger())
3815     Value = DAG.getNode(ISD::BITCAST, dl, VT.getScalarType(), Value);
3816   if (VT != Value.getValueType()) {
3817     assert(VT.getVectorElementType() == Value.getValueType() &&
3818            "value type should be one vector element here");
3819     SmallVector<SDValue, 8> BVOps(VT.getVectorNumElements(), Value);
3820     Value = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, BVOps);
3821   }
3822 
3823   return Value;
3824 }
3825 
3826 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
3827 /// used when a memcpy is turned into a memset when the source is a constant
3828 /// string ptr.
getMemsetStringVal(EVT VT,SDLoc dl,SelectionDAG & DAG,const TargetLowering & TLI,StringRef Str)3829 static SDValue getMemsetStringVal(EVT VT, SDLoc dl, SelectionDAG &DAG,
3830                                   const TargetLowering &TLI, StringRef Str) {
3831   // Handle vector with all elements zero.
3832   if (Str.empty()) {
3833     if (VT.isInteger())
3834       return DAG.getConstant(0, VT);
3835     else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128)
3836       return DAG.getConstantFP(0.0, VT);
3837     else if (VT.isVector()) {
3838       unsigned NumElts = VT.getVectorNumElements();
3839       MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
3840       return DAG.getNode(ISD::BITCAST, dl, VT,
3841                          DAG.getConstant(0, EVT::getVectorVT(*DAG.getContext(),
3842                                                              EltVT, NumElts)));
3843     } else
3844       llvm_unreachable("Expected type!");
3845   }
3846 
3847   assert(!VT.isVector() && "Can't handle vector type here!");
3848   unsigned NumVTBits = VT.getSizeInBits();
3849   unsigned NumVTBytes = NumVTBits / 8;
3850   unsigned NumBytes = std::min(NumVTBytes, unsigned(Str.size()));
3851 
3852   APInt Val(NumVTBits, 0);
3853   if (TLI.isLittleEndian()) {
3854     for (unsigned i = 0; i != NumBytes; ++i)
3855       Val |= (uint64_t)(unsigned char)Str[i] << i*8;
3856   } else {
3857     for (unsigned i = 0; i != NumBytes; ++i)
3858       Val |= (uint64_t)(unsigned char)Str[i] << (NumVTBytes-i-1)*8;
3859   }
3860 
3861   // If the "cost" of materializing the integer immediate is less than the cost
3862   // of a load, then it is cost effective to turn the load into the immediate.
3863   Type *Ty = VT.getTypeForEVT(*DAG.getContext());
3864   if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty))
3865     return DAG.getConstant(Val, VT);
3866   return SDValue(nullptr, 0);
3867 }
3868 
3869 /// getMemBasePlusOffset - Returns base and offset node for the
3870 ///
getMemBasePlusOffset(SDValue Base,unsigned Offset,SDLoc dl,SelectionDAG & DAG)3871 static SDValue getMemBasePlusOffset(SDValue Base, unsigned Offset, SDLoc dl,
3872                                       SelectionDAG &DAG) {
3873   EVT VT = Base.getValueType();
3874   return DAG.getNode(ISD::ADD, dl,
3875                      VT, Base, DAG.getConstant(Offset, VT));
3876 }
3877 
3878 /// isMemSrcFromString - Returns true if memcpy source is a string constant.
3879 ///
isMemSrcFromString(SDValue Src,StringRef & Str)3880 static bool isMemSrcFromString(SDValue Src, StringRef &Str) {
3881   unsigned SrcDelta = 0;
3882   GlobalAddressSDNode *G = nullptr;
3883   if (Src.getOpcode() == ISD::GlobalAddress)
3884     G = cast<GlobalAddressSDNode>(Src);
3885   else if (Src.getOpcode() == ISD::ADD &&
3886            Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
3887            Src.getOperand(1).getOpcode() == ISD::Constant) {
3888     G = cast<GlobalAddressSDNode>(Src.getOperand(0));
3889     SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue();
3890   }
3891   if (!G)
3892     return false;
3893 
3894   return getConstantStringInfo(G->getGlobal(), Str, SrcDelta, false);
3895 }
3896 
3897 /// FindOptimalMemOpLowering - Determines the optimial series memory ops
3898 /// to replace the memset / memcpy. Return true if the number of memory ops
3899 /// is below the threshold. It returns the types of the sequence of
3900 /// memory ops to perform memset / memcpy by reference.
FindOptimalMemOpLowering(std::vector<EVT> & MemOps,unsigned Limit,uint64_t Size,unsigned DstAlign,unsigned SrcAlign,bool IsMemset,bool ZeroMemset,bool MemcpyStrSrc,bool AllowOverlap,SelectionDAG & DAG,const TargetLowering & TLI)3901 static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
3902                                      unsigned Limit, uint64_t Size,
3903                                      unsigned DstAlign, unsigned SrcAlign,
3904                                      bool IsMemset,
3905                                      bool ZeroMemset,
3906                                      bool MemcpyStrSrc,
3907                                      bool AllowOverlap,
3908                                      SelectionDAG &DAG,
3909                                      const TargetLowering &TLI) {
3910   assert((SrcAlign == 0 || SrcAlign >= DstAlign) &&
3911          "Expecting memcpy / memset source to meet alignment requirement!");
3912   // If 'SrcAlign' is zero, that means the memory operation does not need to
3913   // load the value, i.e. memset or memcpy from constant string. Otherwise,
3914   // it's the inferred alignment of the source. 'DstAlign', on the other hand,
3915   // is the specified alignment of the memory operation. If it is zero, that
3916   // means it's possible to change the alignment of the destination.
3917   // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does
3918   // not need to be loaded.
3919   EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign,
3920                                    IsMemset, ZeroMemset, MemcpyStrSrc,
3921                                    DAG.getMachineFunction());
3922 
3923   if (VT == MVT::Other) {
3924     unsigned AS = 0;
3925     if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment(AS) ||
3926         TLI.allowsMisalignedMemoryAccesses(VT, AS, DstAlign)) {
3927       VT = TLI.getPointerTy();
3928     } else {
3929       switch (DstAlign & 7) {
3930       case 0:  VT = MVT::i64; break;
3931       case 4:  VT = MVT::i32; break;
3932       case 2:  VT = MVT::i16; break;
3933       default: VT = MVT::i8;  break;
3934       }
3935     }
3936 
3937     MVT LVT = MVT::i64;
3938     while (!TLI.isTypeLegal(LVT))
3939       LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1);
3940     assert(LVT.isInteger());
3941 
3942     if (VT.bitsGT(LVT))
3943       VT = LVT;
3944   }
3945 
3946   unsigned NumMemOps = 0;
3947   while (Size != 0) {
3948     unsigned VTSize = VT.getSizeInBits() / 8;
3949     while (VTSize > Size) {
3950       // For now, only use non-vector load / store's for the left-over pieces.
3951       EVT NewVT = VT;
3952       unsigned NewVTSize;
3953 
3954       bool Found = false;
3955       if (VT.isVector() || VT.isFloatingPoint()) {
3956         NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32;
3957         if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) &&
3958             TLI.isSafeMemOpType(NewVT.getSimpleVT()))
3959           Found = true;
3960         else if (NewVT == MVT::i64 &&
3961                  TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) &&
3962                  TLI.isSafeMemOpType(MVT::f64)) {
3963           // i64 is usually not legal on 32-bit targets, but f64 may be.
3964           NewVT = MVT::f64;
3965           Found = true;
3966         }
3967       }
3968 
3969       if (!Found) {
3970         do {
3971           NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1);
3972           if (NewVT == MVT::i8)
3973             break;
3974         } while (!TLI.isSafeMemOpType(NewVT.getSimpleVT()));
3975       }
3976       NewVTSize = NewVT.getSizeInBits() / 8;
3977 
3978       // If the new VT cannot cover all of the remaining bits, then consider
3979       // issuing a (or a pair of) unaligned and overlapping load / store.
3980       // FIXME: Only does this for 64-bit or more since we don't have proper
3981       // cost model for unaligned load / store.
3982       bool Fast;
3983       unsigned AS = 0;
3984       if (NumMemOps && AllowOverlap &&
3985           VTSize >= 8 && NewVTSize < Size &&
3986           TLI.allowsMisalignedMemoryAccesses(VT, AS, DstAlign, &Fast) && Fast)
3987         VTSize = Size;
3988       else {
3989         VT = NewVT;
3990         VTSize = NewVTSize;
3991       }
3992     }
3993 
3994     if (++NumMemOps > Limit)
3995       return false;
3996 
3997     MemOps.push_back(VT);
3998     Size -= VTSize;
3999   }
4000 
4001   return true;
4002 }
4003 
getMemcpyLoadsAndStores(SelectionDAG & DAG,SDLoc dl,SDValue Chain,SDValue Dst,SDValue Src,uint64_t Size,unsigned Align,bool isVol,bool AlwaysInline,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)4004 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
4005                                        SDValue Chain, SDValue Dst,
4006                                        SDValue Src, uint64_t Size,
4007                                        unsigned Align, bool isVol,
4008                                        bool AlwaysInline,
4009                                        MachinePointerInfo DstPtrInfo,
4010                                        MachinePointerInfo SrcPtrInfo) {
4011   // Turn a memcpy of undef to nop.
4012   if (Src.getOpcode() == ISD::UNDEF)
4013     return Chain;
4014 
4015   // Expand memcpy to a series of load and store ops if the size operand falls
4016   // below a certain threshold.
4017   // TODO: In the AlwaysInline case, if the size is big then generate a loop
4018   // rather than maybe a humongous number of loads and stores.
4019   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4020   std::vector<EVT> MemOps;
4021   bool DstAlignCanChange = false;
4022   MachineFunction &MF = DAG.getMachineFunction();
4023   MachineFrameInfo *MFI = MF.getFrameInfo();
4024   bool OptSize = MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
4025   FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
4026   if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
4027     DstAlignCanChange = true;
4028   unsigned SrcAlign = DAG.InferPtrAlignment(Src);
4029   if (Align > SrcAlign)
4030     SrcAlign = Align;
4031   StringRef Str;
4032   bool CopyFromStr = isMemSrcFromString(Src, Str);
4033   bool isZeroStr = CopyFromStr && Str.empty();
4034   unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
4035 
4036   if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
4037                                 (DstAlignCanChange ? 0 : Align),
4038                                 (isZeroStr ? 0 : SrcAlign),
4039                                 false, false, CopyFromStr, true, DAG, TLI))
4040     return SDValue();
4041 
4042   if (DstAlignCanChange) {
4043     Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
4044     unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
4045 
4046     // Don't promote to an alignment that would require dynamic stack
4047     // realignment.
4048     const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
4049     if (!TRI->needsStackRealignment(MF))
4050        while (NewAlign > Align &&
4051              TLI.getDataLayout()->exceedsNaturalStackAlignment(NewAlign))
4052           NewAlign /= 2;
4053 
4054     if (NewAlign > Align) {
4055       // Give the stack frame object a larger alignment if needed.
4056       if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
4057         MFI->setObjectAlignment(FI->getIndex(), NewAlign);
4058       Align = NewAlign;
4059     }
4060   }
4061 
4062   SmallVector<SDValue, 8> OutChains;
4063   unsigned NumMemOps = MemOps.size();
4064   uint64_t SrcOff = 0, DstOff = 0;
4065   for (unsigned i = 0; i != NumMemOps; ++i) {
4066     EVT VT = MemOps[i];
4067     unsigned VTSize = VT.getSizeInBits() / 8;
4068     SDValue Value, Store;
4069 
4070     if (VTSize > Size) {
4071       // Issuing an unaligned load / store pair  that overlaps with the previous
4072       // pair. Adjust the offset accordingly.
4073       assert(i == NumMemOps-1 && i != 0);
4074       SrcOff -= VTSize - Size;
4075       DstOff -= VTSize - Size;
4076     }
4077 
4078     if (CopyFromStr &&
4079         (isZeroStr || (VT.isInteger() && !VT.isVector()))) {
4080       // It's unlikely a store of a vector immediate can be done in a single
4081       // instruction. It would require a load from a constantpool first.
4082       // We only handle zero vectors here.
4083       // FIXME: Handle other cases where store of vector immediate is done in
4084       // a single instruction.
4085       Value = getMemsetStringVal(VT, dl, DAG, TLI, Str.substr(SrcOff));
4086       if (Value.getNode())
4087         Store = DAG.getStore(Chain, dl, Value,
4088                              getMemBasePlusOffset(Dst, DstOff, dl, DAG),
4089                              DstPtrInfo.getWithOffset(DstOff), isVol,
4090                              false, Align);
4091     }
4092 
4093     if (!Store.getNode()) {
4094       // The type might not be legal for the target.  This should only happen
4095       // if the type is smaller than a legal type, as on PPC, so the right
4096       // thing to do is generate a LoadExt/StoreTrunc pair.  These simplify
4097       // to Load/Store if NVT==VT.
4098       // FIXME does the case above also need this?
4099       EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
4100       assert(NVT.bitsGE(VT));
4101       Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
4102                              getMemBasePlusOffset(Src, SrcOff, dl, DAG),
4103                              SrcPtrInfo.getWithOffset(SrcOff), VT, isVol, false,
4104                              false, MinAlign(SrcAlign, SrcOff));
4105       Store = DAG.getTruncStore(Chain, dl, Value,
4106                                 getMemBasePlusOffset(Dst, DstOff, dl, DAG),
4107                                 DstPtrInfo.getWithOffset(DstOff), VT, isVol,
4108                                 false, Align);
4109     }
4110     OutChains.push_back(Store);
4111     SrcOff += VTSize;
4112     DstOff += VTSize;
4113     Size -= VTSize;
4114   }
4115 
4116   return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
4117 }
4118 
getMemmoveLoadsAndStores(SelectionDAG & DAG,SDLoc dl,SDValue Chain,SDValue Dst,SDValue Src,uint64_t Size,unsigned Align,bool isVol,bool AlwaysInline,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)4119 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
4120                                         SDValue Chain, SDValue Dst,
4121                                         SDValue Src, uint64_t Size,
4122                                         unsigned Align,  bool isVol,
4123                                         bool AlwaysInline,
4124                                         MachinePointerInfo DstPtrInfo,
4125                                         MachinePointerInfo SrcPtrInfo) {
4126   // Turn a memmove of undef to nop.
4127   if (Src.getOpcode() == ISD::UNDEF)
4128     return Chain;
4129 
4130   // Expand memmove to a series of load and store ops if the size operand falls
4131   // below a certain threshold.
4132   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4133   std::vector<EVT> MemOps;
4134   bool DstAlignCanChange = false;
4135   MachineFunction &MF = DAG.getMachineFunction();
4136   MachineFrameInfo *MFI = MF.getFrameInfo();
4137   bool OptSize = MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
4138   FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
4139   if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
4140     DstAlignCanChange = true;
4141   unsigned SrcAlign = DAG.InferPtrAlignment(Src);
4142   if (Align > SrcAlign)
4143     SrcAlign = Align;
4144   unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
4145 
4146   if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
4147                                 (DstAlignCanChange ? 0 : Align), SrcAlign,
4148                                 false, false, false, false, DAG, TLI))
4149     return SDValue();
4150 
4151   if (DstAlignCanChange) {
4152     Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
4153     unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
4154     if (NewAlign > Align) {
4155       // Give the stack frame object a larger alignment if needed.
4156       if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
4157         MFI->setObjectAlignment(FI->getIndex(), NewAlign);
4158       Align = NewAlign;
4159     }
4160   }
4161 
4162   uint64_t SrcOff = 0, DstOff = 0;
4163   SmallVector<SDValue, 8> LoadValues;
4164   SmallVector<SDValue, 8> LoadChains;
4165   SmallVector<SDValue, 8> OutChains;
4166   unsigned NumMemOps = MemOps.size();
4167   for (unsigned i = 0; i < NumMemOps; i++) {
4168     EVT VT = MemOps[i];
4169     unsigned VTSize = VT.getSizeInBits() / 8;
4170     SDValue Value;
4171 
4172     Value = DAG.getLoad(VT, dl, Chain,
4173                         getMemBasePlusOffset(Src, SrcOff, dl, DAG),
4174                         SrcPtrInfo.getWithOffset(SrcOff), isVol,
4175                         false, false, SrcAlign);
4176     LoadValues.push_back(Value);
4177     LoadChains.push_back(Value.getValue(1));
4178     SrcOff += VTSize;
4179   }
4180   Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
4181   OutChains.clear();
4182   for (unsigned i = 0; i < NumMemOps; i++) {
4183     EVT VT = MemOps[i];
4184     unsigned VTSize = VT.getSizeInBits() / 8;
4185     SDValue Store;
4186 
4187     Store = DAG.getStore(Chain, dl, LoadValues[i],
4188                          getMemBasePlusOffset(Dst, DstOff, dl, DAG),
4189                          DstPtrInfo.getWithOffset(DstOff), isVol, false, Align);
4190     OutChains.push_back(Store);
4191     DstOff += VTSize;
4192   }
4193 
4194   return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
4195 }
4196 
4197 /// \brief Lower the call to 'memset' intrinsic function into a series of store
4198 /// operations.
4199 ///
4200 /// \param DAG Selection DAG where lowered code is placed.
4201 /// \param dl Link to corresponding IR location.
4202 /// \param Chain Control flow dependency.
4203 /// \param Dst Pointer to destination memory location.
4204 /// \param Src Value of byte to write into the memory.
4205 /// \param Size Number of bytes to write.
4206 /// \param Align Alignment of the destination in bytes.
4207 /// \param isVol True if destination is volatile.
4208 /// \param DstPtrInfo IR information on the memory pointer.
4209 /// \returns New head in the control flow, if lowering was successful, empty
4210 /// SDValue otherwise.
4211 ///
4212 /// The function tries to replace 'llvm.memset' intrinsic with several store
4213 /// operations and value calculation code. This is usually profitable for small
4214 /// memory size.
getMemsetStores(SelectionDAG & DAG,SDLoc dl,SDValue Chain,SDValue Dst,SDValue Src,uint64_t Size,unsigned Align,bool isVol,MachinePointerInfo DstPtrInfo)4215 static SDValue getMemsetStores(SelectionDAG &DAG, SDLoc dl,
4216                                SDValue Chain, SDValue Dst,
4217                                SDValue Src, uint64_t Size,
4218                                unsigned Align, bool isVol,
4219                                MachinePointerInfo DstPtrInfo) {
4220   // Turn a memset of undef to nop.
4221   if (Src.getOpcode() == ISD::UNDEF)
4222     return Chain;
4223 
4224   // Expand memset to a series of load/store ops if the size operand
4225   // falls below a certain threshold.
4226   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4227   std::vector<EVT> MemOps;
4228   bool DstAlignCanChange = false;
4229   MachineFunction &MF = DAG.getMachineFunction();
4230   MachineFrameInfo *MFI = MF.getFrameInfo();
4231   bool OptSize = MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
4232   FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
4233   if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
4234     DstAlignCanChange = true;
4235   bool IsZeroVal =
4236     isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
4237   if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize),
4238                                 Size, (DstAlignCanChange ? 0 : Align), 0,
4239                                 true, IsZeroVal, false, true, DAG, TLI))
4240     return SDValue();
4241 
4242   if (DstAlignCanChange) {
4243     Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
4244     unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
4245     if (NewAlign > Align) {
4246       // Give the stack frame object a larger alignment if needed.
4247       if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
4248         MFI->setObjectAlignment(FI->getIndex(), NewAlign);
4249       Align = NewAlign;
4250     }
4251   }
4252 
4253   SmallVector<SDValue, 8> OutChains;
4254   uint64_t DstOff = 0;
4255   unsigned NumMemOps = MemOps.size();
4256 
4257   // Find the largest store and generate the bit pattern for it.
4258   EVT LargestVT = MemOps[0];
4259   for (unsigned i = 1; i < NumMemOps; i++)
4260     if (MemOps[i].bitsGT(LargestVT))
4261       LargestVT = MemOps[i];
4262   SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
4263 
4264   for (unsigned i = 0; i < NumMemOps; i++) {
4265     EVT VT = MemOps[i];
4266     unsigned VTSize = VT.getSizeInBits() / 8;
4267     if (VTSize > Size) {
4268       // Issuing an unaligned load / store pair  that overlaps with the previous
4269       // pair. Adjust the offset accordingly.
4270       assert(i == NumMemOps-1 && i != 0);
4271       DstOff -= VTSize - Size;
4272     }
4273 
4274     // If this store is smaller than the largest store see whether we can get
4275     // the smaller value for free with a truncate.
4276     SDValue Value = MemSetValue;
4277     if (VT.bitsLT(LargestVT)) {
4278       if (!LargestVT.isVector() && !VT.isVector() &&
4279           TLI.isTruncateFree(LargestVT, VT))
4280         Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
4281       else
4282         Value = getMemsetValue(Src, VT, DAG, dl);
4283     }
4284     assert(Value.getValueType() == VT && "Value with wrong type.");
4285     SDValue Store = DAG.getStore(Chain, dl, Value,
4286                                  getMemBasePlusOffset(Dst, DstOff, dl, DAG),
4287                                  DstPtrInfo.getWithOffset(DstOff),
4288                                  isVol, false, Align);
4289     OutChains.push_back(Store);
4290     DstOff += VT.getSizeInBits() / 8;
4291     Size -= VTSize;
4292   }
4293 
4294   return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
4295 }
4296 
getMemcpy(SDValue Chain,SDLoc dl,SDValue Dst,SDValue Src,SDValue Size,unsigned Align,bool isVol,bool AlwaysInline,bool isTailCall,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)4297 SDValue SelectionDAG::getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst,
4298                                 SDValue Src, SDValue Size,
4299                                 unsigned Align, bool isVol, bool AlwaysInline,
4300                                 bool isTailCall, MachinePointerInfo DstPtrInfo,
4301                                 MachinePointerInfo SrcPtrInfo) {
4302   assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
4303 
4304   // Check to see if we should lower the memcpy to loads and stores first.
4305   // For cases within the target-specified limits, this is the best choice.
4306   ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4307   if (ConstantSize) {
4308     // Memcpy with size zero? Just return the original chain.
4309     if (ConstantSize->isNullValue())
4310       return Chain;
4311 
4312     SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
4313                                              ConstantSize->getZExtValue(),Align,
4314                                 isVol, false, DstPtrInfo, SrcPtrInfo);
4315     if (Result.getNode())
4316       return Result;
4317   }
4318 
4319   // Then check to see if we should lower the memcpy with target-specific
4320   // code. If the target chooses to do this, this is the next best.
4321   if (TSI) {
4322     SDValue Result = TSI->EmitTargetCodeForMemcpy(
4323         *this, dl, Chain, Dst, Src, Size, Align, isVol, AlwaysInline,
4324         DstPtrInfo, SrcPtrInfo);
4325     if (Result.getNode())
4326       return Result;
4327   }
4328 
4329   // If we really need inline code and the target declined to provide it,
4330   // use a (potentially long) sequence of loads and stores.
4331   if (AlwaysInline) {
4332     assert(ConstantSize && "AlwaysInline requires a constant size!");
4333     return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
4334                                    ConstantSize->getZExtValue(), Align, isVol,
4335                                    true, DstPtrInfo, SrcPtrInfo);
4336   }
4337 
4338   // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
4339   // memcpy is not guaranteed to be safe. libc memcpys aren't required to
4340   // respect volatile, so they may do things like read or write memory
4341   // beyond the given memory regions. But fixing this isn't easy, and most
4342   // people don't care.
4343 
4344   // Emit a library call.
4345   TargetLowering::ArgListTy Args;
4346   TargetLowering::ArgListEntry Entry;
4347   Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext());
4348   Entry.Node = Dst; Args.push_back(Entry);
4349   Entry.Node = Src; Args.push_back(Entry);
4350   Entry.Node = Size; Args.push_back(Entry);
4351   // FIXME: pass in SDLoc
4352   TargetLowering::CallLoweringInfo CLI(*this);
4353   CLI.setDebugLoc(dl).setChain(Chain)
4354     .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY),
4355                Type::getVoidTy(*getContext()),
4356                getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
4357                                  TLI->getPointerTy()), std::move(Args), 0)
4358     .setDiscardResult()
4359     .setTailCall(isTailCall);
4360 
4361   std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
4362   return CallResult.second;
4363 }
4364 
getMemmove(SDValue Chain,SDLoc dl,SDValue Dst,SDValue Src,SDValue Size,unsigned Align,bool isVol,bool isTailCall,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)4365 SDValue SelectionDAG::getMemmove(SDValue Chain, SDLoc dl, SDValue Dst,
4366                                  SDValue Src, SDValue Size,
4367                                  unsigned Align, bool isVol, bool isTailCall,
4368                                  MachinePointerInfo DstPtrInfo,
4369                                  MachinePointerInfo SrcPtrInfo) {
4370   assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
4371 
4372   // Check to see if we should lower the memmove to loads and stores first.
4373   // For cases within the target-specified limits, this is the best choice.
4374   ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4375   if (ConstantSize) {
4376     // Memmove with size zero? Just return the original chain.
4377     if (ConstantSize->isNullValue())
4378       return Chain;
4379 
4380     SDValue Result =
4381       getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src,
4382                                ConstantSize->getZExtValue(), Align, isVol,
4383                                false, DstPtrInfo, SrcPtrInfo);
4384     if (Result.getNode())
4385       return Result;
4386   }
4387 
4388   // Then check to see if we should lower the memmove with target-specific
4389   // code. If the target chooses to do this, this is the next best.
4390   if (TSI) {
4391     SDValue Result = TSI->EmitTargetCodeForMemmove(
4392         *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo, SrcPtrInfo);
4393     if (Result.getNode())
4394       return Result;
4395   }
4396 
4397   // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
4398   // not be safe.  See memcpy above for more details.
4399 
4400   // Emit a library call.
4401   TargetLowering::ArgListTy Args;
4402   TargetLowering::ArgListEntry Entry;
4403   Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext());
4404   Entry.Node = Dst; Args.push_back(Entry);
4405   Entry.Node = Src; Args.push_back(Entry);
4406   Entry.Node = Size; Args.push_back(Entry);
4407   // FIXME:  pass in SDLoc
4408   TargetLowering::CallLoweringInfo CLI(*this);
4409   CLI.setDebugLoc(dl).setChain(Chain)
4410     .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE),
4411                Type::getVoidTy(*getContext()),
4412                getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
4413                                  TLI->getPointerTy()), std::move(Args), 0)
4414     .setDiscardResult()
4415     .setTailCall(isTailCall);
4416 
4417   std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
4418   return CallResult.second;
4419 }
4420 
getMemset(SDValue Chain,SDLoc dl,SDValue Dst,SDValue Src,SDValue Size,unsigned Align,bool isVol,bool isTailCall,MachinePointerInfo DstPtrInfo)4421 SDValue SelectionDAG::getMemset(SDValue Chain, SDLoc dl, SDValue Dst,
4422                                 SDValue Src, SDValue Size,
4423                                 unsigned Align, bool isVol, bool isTailCall,
4424                                 MachinePointerInfo DstPtrInfo) {
4425   assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
4426 
4427   // Check to see if we should lower the memset to stores first.
4428   // For cases within the target-specified limits, this is the best choice.
4429   ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4430   if (ConstantSize) {
4431     // Memset with size zero? Just return the original chain.
4432     if (ConstantSize->isNullValue())
4433       return Chain;
4434 
4435     SDValue Result =
4436       getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
4437                       Align, isVol, DstPtrInfo);
4438 
4439     if (Result.getNode())
4440       return Result;
4441   }
4442 
4443   // Then check to see if we should lower the memset with target-specific
4444   // code. If the target chooses to do this, this is the next best.
4445   if (TSI) {
4446     SDValue Result = TSI->EmitTargetCodeForMemset(
4447         *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo);
4448     if (Result.getNode())
4449       return Result;
4450   }
4451 
4452   // Emit a library call.
4453   Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(*getContext());
4454   TargetLowering::ArgListTy Args;
4455   TargetLowering::ArgListEntry Entry;
4456   Entry.Node = Dst; Entry.Ty = IntPtrTy;
4457   Args.push_back(Entry);
4458   Entry.Node = Src;
4459   Entry.Ty = Src.getValueType().getTypeForEVT(*getContext());
4460   Args.push_back(Entry);
4461   Entry.Node = Size;
4462   Entry.Ty = IntPtrTy;
4463   Args.push_back(Entry);
4464 
4465   // FIXME: pass in SDLoc
4466   TargetLowering::CallLoweringInfo CLI(*this);
4467   CLI.setDebugLoc(dl).setChain(Chain)
4468     .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET),
4469                Type::getVoidTy(*getContext()),
4470                getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
4471                                  TLI->getPointerTy()), std::move(Args), 0)
4472     .setDiscardResult()
4473     .setTailCall(isTailCall);
4474 
4475   std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
4476   return CallResult.second;
4477 }
4478 
getAtomic(unsigned Opcode,SDLoc dl,EVT MemVT,SDVTList VTList,ArrayRef<SDValue> Ops,MachineMemOperand * MMO,AtomicOrdering SuccessOrdering,AtomicOrdering FailureOrdering,SynchronizationScope SynchScope)4479 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4480                                 SDVTList VTList, ArrayRef<SDValue> Ops,
4481                                 MachineMemOperand *MMO,
4482                                 AtomicOrdering SuccessOrdering,
4483                                 AtomicOrdering FailureOrdering,
4484                                 SynchronizationScope SynchScope) {
4485   FoldingSetNodeID ID;
4486   ID.AddInteger(MemVT.getRawBits());
4487   AddNodeIDNode(ID, Opcode, VTList, Ops);
4488   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4489   void* IP = nullptr;
4490   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4491     cast<AtomicSDNode>(E)->refineAlignment(MMO);
4492     return SDValue(E, 0);
4493   }
4494 
4495   // Allocate the operands array for the node out of the BumpPtrAllocator, since
4496   // SDNode doesn't have access to it.  This memory will be "leaked" when
4497   // the node is deallocated, but recovered when the allocator is released.
4498   // If the number of operands is less than 5 we use AtomicSDNode's internal
4499   // storage.
4500   unsigned NumOps = Ops.size();
4501   SDUse *DynOps = NumOps > 4 ? OperandAllocator.Allocate<SDUse>(NumOps)
4502                              : nullptr;
4503 
4504   SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl.getIROrder(),
4505                                                dl.getDebugLoc(), VTList, MemVT,
4506                                                Ops.data(), DynOps, NumOps, MMO,
4507                                                SuccessOrdering, FailureOrdering,
4508                                                SynchScope);
4509   CSEMap.InsertNode(N, IP);
4510   InsertNode(N);
4511   return SDValue(N, 0);
4512 }
4513 
getAtomic(unsigned Opcode,SDLoc dl,EVT MemVT,SDVTList VTList,ArrayRef<SDValue> Ops,MachineMemOperand * MMO,AtomicOrdering Ordering,SynchronizationScope SynchScope)4514 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4515                                 SDVTList VTList, ArrayRef<SDValue> Ops,
4516                                 MachineMemOperand *MMO,
4517                                 AtomicOrdering Ordering,
4518                                 SynchronizationScope SynchScope) {
4519   return getAtomic(Opcode, dl, MemVT, VTList, Ops, MMO, Ordering,
4520                    Ordering, SynchScope);
4521 }
4522 
getAtomicCmpSwap(unsigned Opcode,SDLoc dl,EVT MemVT,SDVTList VTs,SDValue Chain,SDValue Ptr,SDValue Cmp,SDValue Swp,MachinePointerInfo PtrInfo,unsigned Alignment,AtomicOrdering SuccessOrdering,AtomicOrdering FailureOrdering,SynchronizationScope SynchScope)4523 SDValue SelectionDAG::getAtomicCmpSwap(
4524     unsigned Opcode, SDLoc dl, EVT MemVT, SDVTList VTs, SDValue Chain,
4525     SDValue Ptr, SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo,
4526     unsigned Alignment, AtomicOrdering SuccessOrdering,
4527     AtomicOrdering FailureOrdering, SynchronizationScope SynchScope) {
4528   assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
4529          Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
4530   assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
4531 
4532   if (Alignment == 0)  // Ensure that codegen never sees alignment 0
4533     Alignment = getEVTAlignment(MemVT);
4534 
4535   MachineFunction &MF = getMachineFunction();
4536 
4537   // FIXME: Volatile isn't really correct; we should keep track of atomic
4538   // orderings in the memoperand.
4539   unsigned Flags = MachineMemOperand::MOVolatile;
4540   Flags |= MachineMemOperand::MOLoad;
4541   Flags |= MachineMemOperand::MOStore;
4542 
4543   MachineMemOperand *MMO =
4544     MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment);
4545 
4546   return getAtomicCmpSwap(Opcode, dl, MemVT, VTs, Chain, Ptr, Cmp, Swp, MMO,
4547                           SuccessOrdering, FailureOrdering, SynchScope);
4548 }
4549 
getAtomicCmpSwap(unsigned Opcode,SDLoc dl,EVT MemVT,SDVTList VTs,SDValue Chain,SDValue Ptr,SDValue Cmp,SDValue Swp,MachineMemOperand * MMO,AtomicOrdering SuccessOrdering,AtomicOrdering FailureOrdering,SynchronizationScope SynchScope)4550 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, SDLoc dl, EVT MemVT,
4551                                        SDVTList VTs, SDValue Chain, SDValue Ptr,
4552                                        SDValue Cmp, SDValue Swp,
4553                                        MachineMemOperand *MMO,
4554                                        AtomicOrdering SuccessOrdering,
4555                                        AtomicOrdering FailureOrdering,
4556                                        SynchronizationScope SynchScope) {
4557   assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
4558          Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
4559   assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
4560 
4561   SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
4562   return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO,
4563                    SuccessOrdering, FailureOrdering, SynchScope);
4564 }
4565 
getAtomic(unsigned Opcode,SDLoc dl,EVT MemVT,SDValue Chain,SDValue Ptr,SDValue Val,const Value * PtrVal,unsigned Alignment,AtomicOrdering Ordering,SynchronizationScope SynchScope)4566 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4567                                 SDValue Chain,
4568                                 SDValue Ptr, SDValue Val,
4569                                 const Value* PtrVal,
4570                                 unsigned Alignment,
4571                                 AtomicOrdering Ordering,
4572                                 SynchronizationScope SynchScope) {
4573   if (Alignment == 0)  // Ensure that codegen never sees alignment 0
4574     Alignment = getEVTAlignment(MemVT);
4575 
4576   MachineFunction &MF = getMachineFunction();
4577   // An atomic store does not load. An atomic load does not store.
4578   // (An atomicrmw obviously both loads and stores.)
4579   // For now, atomics are considered to be volatile always, and they are
4580   // chained as such.
4581   // FIXME: Volatile isn't really correct; we should keep track of atomic
4582   // orderings in the memoperand.
4583   unsigned Flags = MachineMemOperand::MOVolatile;
4584   if (Opcode != ISD::ATOMIC_STORE)
4585     Flags |= MachineMemOperand::MOLoad;
4586   if (Opcode != ISD::ATOMIC_LOAD)
4587     Flags |= MachineMemOperand::MOStore;
4588 
4589   MachineMemOperand *MMO =
4590     MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
4591                             MemVT.getStoreSize(), Alignment);
4592 
4593   return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO,
4594                    Ordering, SynchScope);
4595 }
4596 
getAtomic(unsigned Opcode,SDLoc dl,EVT MemVT,SDValue Chain,SDValue Ptr,SDValue Val,MachineMemOperand * MMO,AtomicOrdering Ordering,SynchronizationScope SynchScope)4597 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4598                                 SDValue Chain,
4599                                 SDValue Ptr, SDValue Val,
4600                                 MachineMemOperand *MMO,
4601                                 AtomicOrdering Ordering,
4602                                 SynchronizationScope SynchScope) {
4603   assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
4604           Opcode == ISD::ATOMIC_LOAD_SUB ||
4605           Opcode == ISD::ATOMIC_LOAD_AND ||
4606           Opcode == ISD::ATOMIC_LOAD_OR ||
4607           Opcode == ISD::ATOMIC_LOAD_XOR ||
4608           Opcode == ISD::ATOMIC_LOAD_NAND ||
4609           Opcode == ISD::ATOMIC_LOAD_MIN ||
4610           Opcode == ISD::ATOMIC_LOAD_MAX ||
4611           Opcode == ISD::ATOMIC_LOAD_UMIN ||
4612           Opcode == ISD::ATOMIC_LOAD_UMAX ||
4613           Opcode == ISD::ATOMIC_SWAP ||
4614           Opcode == ISD::ATOMIC_STORE) &&
4615          "Invalid Atomic Op");
4616 
4617   EVT VT = Val.getValueType();
4618 
4619   SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
4620                                                getVTList(VT, MVT::Other);
4621   SDValue Ops[] = {Chain, Ptr, Val};
4622   return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO, Ordering, SynchScope);
4623 }
4624 
getAtomic(unsigned Opcode,SDLoc dl,EVT MemVT,EVT VT,SDValue Chain,SDValue Ptr,MachineMemOperand * MMO,AtomicOrdering Ordering,SynchronizationScope SynchScope)4625 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4626                                 EVT VT, SDValue Chain,
4627                                 SDValue Ptr,
4628                                 MachineMemOperand *MMO,
4629                                 AtomicOrdering Ordering,
4630                                 SynchronizationScope SynchScope) {
4631   assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op");
4632 
4633   SDVTList VTs = getVTList(VT, MVT::Other);
4634   SDValue Ops[] = {Chain, Ptr};
4635   return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO, Ordering, SynchScope);
4636 }
4637 
4638 /// getMergeValues - Create a MERGE_VALUES node from the given operands.
getMergeValues(ArrayRef<SDValue> Ops,SDLoc dl)4639 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, SDLoc dl) {
4640   if (Ops.size() == 1)
4641     return Ops[0];
4642 
4643   SmallVector<EVT, 4> VTs;
4644   VTs.reserve(Ops.size());
4645   for (unsigned i = 0; i < Ops.size(); ++i)
4646     VTs.push_back(Ops[i].getValueType());
4647   return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops);
4648 }
4649 
4650 SDValue
getMemIntrinsicNode(unsigned Opcode,SDLoc dl,SDVTList VTList,ArrayRef<SDValue> Ops,EVT MemVT,MachinePointerInfo PtrInfo,unsigned Align,bool Vol,bool ReadMem,bool WriteMem,unsigned Size)4651 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
4652                                   ArrayRef<SDValue> Ops,
4653                                   EVT MemVT, MachinePointerInfo PtrInfo,
4654                                   unsigned Align, bool Vol,
4655                                   bool ReadMem, bool WriteMem, unsigned Size) {
4656   if (Align == 0)  // Ensure that codegen never sees alignment 0
4657     Align = getEVTAlignment(MemVT);
4658 
4659   MachineFunction &MF = getMachineFunction();
4660   unsigned Flags = 0;
4661   if (WriteMem)
4662     Flags |= MachineMemOperand::MOStore;
4663   if (ReadMem)
4664     Flags |= MachineMemOperand::MOLoad;
4665   if (Vol)
4666     Flags |= MachineMemOperand::MOVolatile;
4667   if (!Size)
4668     Size = MemVT.getStoreSize();
4669   MachineMemOperand *MMO =
4670     MF.getMachineMemOperand(PtrInfo, Flags, Size, Align);
4671 
4672   return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO);
4673 }
4674 
4675 SDValue
getMemIntrinsicNode(unsigned Opcode,SDLoc dl,SDVTList VTList,ArrayRef<SDValue> Ops,EVT MemVT,MachineMemOperand * MMO)4676 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
4677                                   ArrayRef<SDValue> Ops, EVT MemVT,
4678                                   MachineMemOperand *MMO) {
4679   assert((Opcode == ISD::INTRINSIC_VOID ||
4680           Opcode == ISD::INTRINSIC_W_CHAIN ||
4681           Opcode == ISD::PREFETCH ||
4682           Opcode == ISD::LIFETIME_START ||
4683           Opcode == ISD::LIFETIME_END ||
4684           (Opcode <= INT_MAX &&
4685            (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
4686          "Opcode is not a memory-accessing opcode!");
4687 
4688   // Memoize the node unless it returns a flag.
4689   MemIntrinsicSDNode *N;
4690   if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
4691     FoldingSetNodeID ID;
4692     AddNodeIDNode(ID, Opcode, VTList, Ops);
4693     ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4694     void *IP = nullptr;
4695     if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4696       cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
4697       return SDValue(E, 0);
4698     }
4699 
4700     N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(),
4701                                                dl.getDebugLoc(), VTList, Ops,
4702                                                MemVT, MMO);
4703     CSEMap.InsertNode(N, IP);
4704   } else {
4705     N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(),
4706                                                dl.getDebugLoc(), VTList, Ops,
4707                                                MemVT, MMO);
4708   }
4709   InsertNode(N);
4710   return SDValue(N, 0);
4711 }
4712 
4713 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
4714 /// MachinePointerInfo record from it.  This is particularly useful because the
4715 /// code generator has many cases where it doesn't bother passing in a
4716 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
InferPointerInfo(SDValue Ptr,int64_t Offset=0)4717 static MachinePointerInfo InferPointerInfo(SDValue Ptr, int64_t Offset = 0) {
4718   // If this is FI+Offset, we can model it.
4719   if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
4720     return MachinePointerInfo::getFixedStack(FI->getIndex(), Offset);
4721 
4722   // If this is (FI+Offset1)+Offset2, we can model it.
4723   if (Ptr.getOpcode() != ISD::ADD ||
4724       !isa<ConstantSDNode>(Ptr.getOperand(1)) ||
4725       !isa<FrameIndexSDNode>(Ptr.getOperand(0)))
4726     return MachinePointerInfo();
4727 
4728   int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
4729   return MachinePointerInfo::getFixedStack(FI, Offset+
4730                        cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
4731 }
4732 
4733 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
4734 /// MachinePointerInfo record from it.  This is particularly useful because the
4735 /// code generator has many cases where it doesn't bother passing in a
4736 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
InferPointerInfo(SDValue Ptr,SDValue OffsetOp)4737 static MachinePointerInfo InferPointerInfo(SDValue Ptr, SDValue OffsetOp) {
4738   // If the 'Offset' value isn't a constant, we can't handle this.
4739   if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
4740     return InferPointerInfo(Ptr, OffsetNode->getSExtValue());
4741   if (OffsetOp.getOpcode() == ISD::UNDEF)
4742     return InferPointerInfo(Ptr);
4743   return MachinePointerInfo();
4744 }
4745 
4746 
4747 SDValue
getLoad(ISD::MemIndexedMode AM,ISD::LoadExtType ExtType,EVT VT,SDLoc dl,SDValue Chain,SDValue Ptr,SDValue Offset,MachinePointerInfo PtrInfo,EVT MemVT,bool isVolatile,bool isNonTemporal,bool isInvariant,unsigned Alignment,const AAMDNodes & AAInfo,const MDNode * Ranges)4748 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
4749                       EVT VT, SDLoc dl, SDValue Chain,
4750                       SDValue Ptr, SDValue Offset,
4751                       MachinePointerInfo PtrInfo, EVT MemVT,
4752                       bool isVolatile, bool isNonTemporal, bool isInvariant,
4753                       unsigned Alignment, const AAMDNodes &AAInfo,
4754                       const MDNode *Ranges) {
4755   assert(Chain.getValueType() == MVT::Other &&
4756         "Invalid chain type");
4757   if (Alignment == 0)  // Ensure that codegen never sees alignment 0
4758     Alignment = getEVTAlignment(VT);
4759 
4760   unsigned Flags = MachineMemOperand::MOLoad;
4761   if (isVolatile)
4762     Flags |= MachineMemOperand::MOVolatile;
4763   if (isNonTemporal)
4764     Flags |= MachineMemOperand::MONonTemporal;
4765   if (isInvariant)
4766     Flags |= MachineMemOperand::MOInvariant;
4767 
4768   // If we don't have a PtrInfo, infer the trivial frame index case to simplify
4769   // clients.
4770   if (PtrInfo.V.isNull())
4771     PtrInfo = InferPointerInfo(Ptr, Offset);
4772 
4773   MachineFunction &MF = getMachineFunction();
4774   MachineMemOperand *MMO =
4775     MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment,
4776                             AAInfo, Ranges);
4777   return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
4778 }
4779 
4780 SDValue
getLoad(ISD::MemIndexedMode AM,ISD::LoadExtType ExtType,EVT VT,SDLoc dl,SDValue Chain,SDValue Ptr,SDValue Offset,EVT MemVT,MachineMemOperand * MMO)4781 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
4782                       EVT VT, SDLoc dl, SDValue Chain,
4783                       SDValue Ptr, SDValue Offset, EVT MemVT,
4784                       MachineMemOperand *MMO) {
4785   if (VT == MemVT) {
4786     ExtType = ISD::NON_EXTLOAD;
4787   } else if (ExtType == ISD::NON_EXTLOAD) {
4788     assert(VT == MemVT && "Non-extending load from different memory type!");
4789   } else {
4790     // Extending load.
4791     assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
4792            "Should only be an extending load, not truncating!");
4793     assert(VT.isInteger() == MemVT.isInteger() &&
4794            "Cannot convert from FP to Int or Int -> FP!");
4795     assert(VT.isVector() == MemVT.isVector() &&
4796            "Cannot use an ext load to convert to or from a vector!");
4797     assert((!VT.isVector() ||
4798             VT.getVectorNumElements() == MemVT.getVectorNumElements()) &&
4799            "Cannot use an ext load to change the number of vector elements!");
4800   }
4801 
4802   bool Indexed = AM != ISD::UNINDEXED;
4803   assert((Indexed || Offset.getOpcode() == ISD::UNDEF) &&
4804          "Unindexed load with an offset!");
4805 
4806   SDVTList VTs = Indexed ?
4807     getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
4808   SDValue Ops[] = { Chain, Ptr, Offset };
4809   FoldingSetNodeID ID;
4810   AddNodeIDNode(ID, ISD::LOAD, VTs, Ops);
4811   ID.AddInteger(MemVT.getRawBits());
4812   ID.AddInteger(encodeMemSDNodeFlags(ExtType, AM, MMO->isVolatile(),
4813                                      MMO->isNonTemporal(),
4814                                      MMO->isInvariant()));
4815   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4816   void *IP = nullptr;
4817   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4818     cast<LoadSDNode>(E)->refineAlignment(MMO);
4819     return SDValue(E, 0);
4820   }
4821   SDNode *N = new (NodeAllocator) LoadSDNode(Ops, dl.getIROrder(),
4822                                              dl.getDebugLoc(), VTs, AM, ExtType,
4823                                              MemVT, MMO);
4824   CSEMap.InsertNode(N, IP);
4825   InsertNode(N);
4826   return SDValue(N, 0);
4827 }
4828 
getLoad(EVT VT,SDLoc dl,SDValue Chain,SDValue Ptr,MachinePointerInfo PtrInfo,bool isVolatile,bool isNonTemporal,bool isInvariant,unsigned Alignment,const AAMDNodes & AAInfo,const MDNode * Ranges)4829 SDValue SelectionDAG::getLoad(EVT VT, SDLoc dl,
4830                               SDValue Chain, SDValue Ptr,
4831                               MachinePointerInfo PtrInfo,
4832                               bool isVolatile, bool isNonTemporal,
4833                               bool isInvariant, unsigned Alignment,
4834                               const AAMDNodes &AAInfo,
4835                               const MDNode *Ranges) {
4836   SDValue Undef = getUNDEF(Ptr.getValueType());
4837   return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
4838                  PtrInfo, VT, isVolatile, isNonTemporal, isInvariant, Alignment,
4839                  AAInfo, Ranges);
4840 }
4841 
getLoad(EVT VT,SDLoc dl,SDValue Chain,SDValue Ptr,MachineMemOperand * MMO)4842 SDValue SelectionDAG::getLoad(EVT VT, SDLoc dl,
4843                               SDValue Chain, SDValue Ptr,
4844                               MachineMemOperand *MMO) {
4845   SDValue Undef = getUNDEF(Ptr.getValueType());
4846   return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
4847                  VT, MMO);
4848 }
4849 
getExtLoad(ISD::LoadExtType ExtType,SDLoc dl,EVT VT,SDValue Chain,SDValue Ptr,MachinePointerInfo PtrInfo,EVT MemVT,bool isVolatile,bool isNonTemporal,bool isInvariant,unsigned Alignment,const AAMDNodes & AAInfo)4850 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT,
4851                                  SDValue Chain, SDValue Ptr,
4852                                  MachinePointerInfo PtrInfo, EVT MemVT,
4853                                  bool isVolatile, bool isNonTemporal,
4854                                  bool isInvariant, unsigned Alignment,
4855                                  const AAMDNodes &AAInfo) {
4856   SDValue Undef = getUNDEF(Ptr.getValueType());
4857   return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
4858                  PtrInfo, MemVT, isVolatile, isNonTemporal, isInvariant,
4859                  Alignment, AAInfo);
4860 }
4861 
4862 
getExtLoad(ISD::LoadExtType ExtType,SDLoc dl,EVT VT,SDValue Chain,SDValue Ptr,EVT MemVT,MachineMemOperand * MMO)4863 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT,
4864                                  SDValue Chain, SDValue Ptr, EVT MemVT,
4865                                  MachineMemOperand *MMO) {
4866   SDValue Undef = getUNDEF(Ptr.getValueType());
4867   return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
4868                  MemVT, MMO);
4869 }
4870 
4871 SDValue
getIndexedLoad(SDValue OrigLoad,SDLoc dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)4872 SelectionDAG::getIndexedLoad(SDValue OrigLoad, SDLoc dl, SDValue Base,
4873                              SDValue Offset, ISD::MemIndexedMode AM) {
4874   LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
4875   assert(LD->getOffset().getOpcode() == ISD::UNDEF &&
4876          "Load is already a indexed load!");
4877   return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
4878                  LD->getChain(), Base, Offset, LD->getPointerInfo(),
4879                  LD->getMemoryVT(), LD->isVolatile(), LD->isNonTemporal(),
4880                  false, LD->getAlignment());
4881 }
4882 
getStore(SDValue Chain,SDLoc dl,SDValue Val,SDValue Ptr,MachinePointerInfo PtrInfo,bool isVolatile,bool isNonTemporal,unsigned Alignment,const AAMDNodes & AAInfo)4883 SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val,
4884                                SDValue Ptr, MachinePointerInfo PtrInfo,
4885                                bool isVolatile, bool isNonTemporal,
4886                                unsigned Alignment, const AAMDNodes &AAInfo) {
4887   assert(Chain.getValueType() == MVT::Other &&
4888         "Invalid chain type");
4889   if (Alignment == 0)  // Ensure that codegen never sees alignment 0
4890     Alignment = getEVTAlignment(Val.getValueType());
4891 
4892   unsigned Flags = MachineMemOperand::MOStore;
4893   if (isVolatile)
4894     Flags |= MachineMemOperand::MOVolatile;
4895   if (isNonTemporal)
4896     Flags |= MachineMemOperand::MONonTemporal;
4897 
4898   if (PtrInfo.V.isNull())
4899     PtrInfo = InferPointerInfo(Ptr);
4900 
4901   MachineFunction &MF = getMachineFunction();
4902   MachineMemOperand *MMO =
4903     MF.getMachineMemOperand(PtrInfo, Flags,
4904                             Val.getValueType().getStoreSize(), Alignment,
4905                             AAInfo);
4906 
4907   return getStore(Chain, dl, Val, Ptr, MMO);
4908 }
4909 
getStore(SDValue Chain,SDLoc dl,SDValue Val,SDValue Ptr,MachineMemOperand * MMO)4910 SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val,
4911                                SDValue Ptr, MachineMemOperand *MMO) {
4912   assert(Chain.getValueType() == MVT::Other &&
4913         "Invalid chain type");
4914   EVT VT = Val.getValueType();
4915   SDVTList VTs = getVTList(MVT::Other);
4916   SDValue Undef = getUNDEF(Ptr.getValueType());
4917   SDValue Ops[] = { Chain, Val, Ptr, Undef };
4918   FoldingSetNodeID ID;
4919   AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
4920   ID.AddInteger(VT.getRawBits());
4921   ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(),
4922                                      MMO->isNonTemporal(), MMO->isInvariant()));
4923   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4924   void *IP = nullptr;
4925   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4926     cast<StoreSDNode>(E)->refineAlignment(MMO);
4927     return SDValue(E, 0);
4928   }
4929   SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
4930                                               dl.getDebugLoc(), VTs,
4931                                               ISD::UNINDEXED, false, VT, MMO);
4932   CSEMap.InsertNode(N, IP);
4933   InsertNode(N);
4934   return SDValue(N, 0);
4935 }
4936 
getTruncStore(SDValue Chain,SDLoc dl,SDValue Val,SDValue Ptr,MachinePointerInfo PtrInfo,EVT SVT,bool isVolatile,bool isNonTemporal,unsigned Alignment,const AAMDNodes & AAInfo)4937 SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val,
4938                                     SDValue Ptr, MachinePointerInfo PtrInfo,
4939                                     EVT SVT,bool isVolatile, bool isNonTemporal,
4940                                     unsigned Alignment,
4941                                     const AAMDNodes &AAInfo) {
4942   assert(Chain.getValueType() == MVT::Other &&
4943         "Invalid chain type");
4944   if (Alignment == 0)  // Ensure that codegen never sees alignment 0
4945     Alignment = getEVTAlignment(SVT);
4946 
4947   unsigned Flags = MachineMemOperand::MOStore;
4948   if (isVolatile)
4949     Flags |= MachineMemOperand::MOVolatile;
4950   if (isNonTemporal)
4951     Flags |= MachineMemOperand::MONonTemporal;
4952 
4953   if (PtrInfo.V.isNull())
4954     PtrInfo = InferPointerInfo(Ptr);
4955 
4956   MachineFunction &MF = getMachineFunction();
4957   MachineMemOperand *MMO =
4958     MF.getMachineMemOperand(PtrInfo, Flags, SVT.getStoreSize(), Alignment,
4959                             AAInfo);
4960 
4961   return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
4962 }
4963 
getTruncStore(SDValue Chain,SDLoc dl,SDValue Val,SDValue Ptr,EVT SVT,MachineMemOperand * MMO)4964 SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val,
4965                                     SDValue Ptr, EVT SVT,
4966                                     MachineMemOperand *MMO) {
4967   EVT VT = Val.getValueType();
4968 
4969   assert(Chain.getValueType() == MVT::Other &&
4970         "Invalid chain type");
4971   if (VT == SVT)
4972     return getStore(Chain, dl, Val, Ptr, MMO);
4973 
4974   assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
4975          "Should only be a truncating store, not extending!");
4976   assert(VT.isInteger() == SVT.isInteger() &&
4977          "Can't do FP-INT conversion!");
4978   assert(VT.isVector() == SVT.isVector() &&
4979          "Cannot use trunc store to convert to or from a vector!");
4980   assert((!VT.isVector() ||
4981           VT.getVectorNumElements() == SVT.getVectorNumElements()) &&
4982          "Cannot use trunc store to change the number of vector elements!");
4983 
4984   SDVTList VTs = getVTList(MVT::Other);
4985   SDValue Undef = getUNDEF(Ptr.getValueType());
4986   SDValue Ops[] = { Chain, Val, Ptr, Undef };
4987   FoldingSetNodeID ID;
4988   AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
4989   ID.AddInteger(SVT.getRawBits());
4990   ID.AddInteger(encodeMemSDNodeFlags(true, ISD::UNINDEXED, MMO->isVolatile(),
4991                                      MMO->isNonTemporal(), MMO->isInvariant()));
4992   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4993   void *IP = nullptr;
4994   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4995     cast<StoreSDNode>(E)->refineAlignment(MMO);
4996     return SDValue(E, 0);
4997   }
4998   SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
4999                                               dl.getDebugLoc(), VTs,
5000                                               ISD::UNINDEXED, true, SVT, MMO);
5001   CSEMap.InsertNode(N, IP);
5002   InsertNode(N);
5003   return SDValue(N, 0);
5004 }
5005 
5006 SDValue
getIndexedStore(SDValue OrigStore,SDLoc dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)5007 SelectionDAG::getIndexedStore(SDValue OrigStore, SDLoc dl, SDValue Base,
5008                               SDValue Offset, ISD::MemIndexedMode AM) {
5009   StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
5010   assert(ST->getOffset().getOpcode() == ISD::UNDEF &&
5011          "Store is already a indexed store!");
5012   SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
5013   SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
5014   FoldingSetNodeID ID;
5015   AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
5016   ID.AddInteger(ST->getMemoryVT().getRawBits());
5017   ID.AddInteger(ST->getRawSubclassData());
5018   ID.AddInteger(ST->getPointerInfo().getAddrSpace());
5019   void *IP = nullptr;
5020   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
5021     return SDValue(E, 0);
5022 
5023   SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
5024                                               dl.getDebugLoc(), VTs, AM,
5025                                               ST->isTruncatingStore(),
5026                                               ST->getMemoryVT(),
5027                                               ST->getMemOperand());
5028   CSEMap.InsertNode(N, IP);
5029   InsertNode(N);
5030   return SDValue(N, 0);
5031 }
5032 
5033 SDValue
getMaskedLoad(EVT VT,SDLoc dl,SDValue Chain,SDValue Ptr,SDValue Mask,SDValue Src0,EVT MemVT,MachineMemOperand * MMO,ISD::LoadExtType ExtTy)5034 SelectionDAG::getMaskedLoad(EVT VT, SDLoc dl, SDValue Chain,
5035                             SDValue Ptr, SDValue Mask, SDValue Src0, EVT MemVT,
5036                             MachineMemOperand *MMO, ISD::LoadExtType ExtTy) {
5037 
5038   SDVTList VTs = getVTList(VT, MVT::Other);
5039   SDValue Ops[] = { Chain, Ptr, Mask, Src0 };
5040   FoldingSetNodeID ID;
5041   AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops);
5042   ID.AddInteger(VT.getRawBits());
5043   ID.AddInteger(encodeMemSDNodeFlags(ExtTy, ISD::UNINDEXED,
5044                                      MMO->isVolatile(),
5045                                      MMO->isNonTemporal(),
5046                                      MMO->isInvariant()));
5047   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5048   void *IP = nullptr;
5049   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
5050     cast<MaskedLoadSDNode>(E)->refineAlignment(MMO);
5051     return SDValue(E, 0);
5052   }
5053   SDNode *N = new (NodeAllocator) MaskedLoadSDNode(dl.getIROrder(),
5054                                              dl.getDebugLoc(), Ops, 4, VTs,
5055                                              ExtTy, MemVT, MMO);
5056   CSEMap.InsertNode(N, IP);
5057   InsertNode(N);
5058   return SDValue(N, 0);
5059 }
5060 
getMaskedStore(SDValue Chain,SDLoc dl,SDValue Val,SDValue Ptr,SDValue Mask,EVT MemVT,MachineMemOperand * MMO,bool isTrunc)5061 SDValue SelectionDAG::getMaskedStore(SDValue Chain, SDLoc dl, SDValue Val,
5062                                      SDValue Ptr, SDValue Mask, EVT MemVT,
5063                                      MachineMemOperand *MMO, bool isTrunc) {
5064   assert(Chain.getValueType() == MVT::Other &&
5065         "Invalid chain type");
5066   EVT VT = Val.getValueType();
5067   SDVTList VTs = getVTList(MVT::Other);
5068   SDValue Ops[] = { Chain, Ptr, Mask, Val };
5069   FoldingSetNodeID ID;
5070   AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops);
5071   ID.AddInteger(VT.getRawBits());
5072   ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(),
5073                                      MMO->isNonTemporal(), MMO->isInvariant()));
5074   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5075   void *IP = nullptr;
5076   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
5077     cast<MaskedStoreSDNode>(E)->refineAlignment(MMO);
5078     return SDValue(E, 0);
5079   }
5080   SDNode *N = new (NodeAllocator) MaskedStoreSDNode(dl.getIROrder(),
5081                                                     dl.getDebugLoc(), Ops, 4,
5082                                                     VTs, isTrunc, MemVT, MMO);
5083   CSEMap.InsertNode(N, IP);
5084   InsertNode(N);
5085   return SDValue(N, 0);
5086 }
5087 
getVAArg(EVT VT,SDLoc dl,SDValue Chain,SDValue Ptr,SDValue SV,unsigned Align)5088 SDValue SelectionDAG::getVAArg(EVT VT, SDLoc dl,
5089                                SDValue Chain, SDValue Ptr,
5090                                SDValue SV,
5091                                unsigned Align) {
5092   SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, MVT::i32) };
5093   return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops);
5094 }
5095 
getNode(unsigned Opcode,SDLoc DL,EVT VT,ArrayRef<SDUse> Ops)5096 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
5097                               ArrayRef<SDUse> Ops) {
5098   switch (Ops.size()) {
5099   case 0: return getNode(Opcode, DL, VT);
5100   case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0]));
5101   case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
5102   case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
5103   default: break;
5104   }
5105 
5106   // Copy from an SDUse array into an SDValue array for use with
5107   // the regular getNode logic.
5108   SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end());
5109   return getNode(Opcode, DL, VT, NewOps);
5110 }
5111 
getNode(unsigned Opcode,SDLoc DL,EVT VT,ArrayRef<SDValue> Ops)5112 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
5113                               ArrayRef<SDValue> Ops) {
5114   unsigned NumOps = Ops.size();
5115   switch (NumOps) {
5116   case 0: return getNode(Opcode, DL, VT);
5117   case 1: return getNode(Opcode, DL, VT, Ops[0]);
5118   case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
5119   case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
5120   default: break;
5121   }
5122 
5123   switch (Opcode) {
5124   default: break;
5125   case ISD::SELECT_CC: {
5126     assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
5127     assert(Ops[0].getValueType() == Ops[1].getValueType() &&
5128            "LHS and RHS of condition must have same type!");
5129     assert(Ops[2].getValueType() == Ops[3].getValueType() &&
5130            "True and False arms of SelectCC must have same type!");
5131     assert(Ops[2].getValueType() == VT &&
5132            "select_cc node must be of same type as true and false value!");
5133     break;
5134   }
5135   case ISD::BR_CC: {
5136     assert(NumOps == 5 && "BR_CC takes 5 operands!");
5137     assert(Ops[2].getValueType() == Ops[3].getValueType() &&
5138            "LHS/RHS of comparison should match types!");
5139     break;
5140   }
5141   }
5142 
5143   // Memoize nodes.
5144   SDNode *N;
5145   SDVTList VTs = getVTList(VT);
5146 
5147   if (VT != MVT::Glue) {
5148     FoldingSetNodeID ID;
5149     AddNodeIDNode(ID, Opcode, VTs, Ops);
5150     void *IP = nullptr;
5151 
5152     if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
5153       return SDValue(E, 0);
5154 
5155     N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
5156                                    VTs, Ops);
5157     CSEMap.InsertNode(N, IP);
5158   } else {
5159     N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
5160                                    VTs, Ops);
5161   }
5162 
5163   InsertNode(N);
5164   return SDValue(N, 0);
5165 }
5166 
getNode(unsigned Opcode,SDLoc DL,ArrayRef<EVT> ResultTys,ArrayRef<SDValue> Ops)5167 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
5168                               ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) {
5169   return getNode(Opcode, DL, getVTList(ResultTys), Ops);
5170 }
5171 
getNode(unsigned Opcode,SDLoc DL,SDVTList VTList,ArrayRef<SDValue> Ops)5172 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
5173                               ArrayRef<SDValue> Ops) {
5174   if (VTList.NumVTs == 1)
5175     return getNode(Opcode, DL, VTList.VTs[0], Ops);
5176 
5177 #if 0
5178   switch (Opcode) {
5179   // FIXME: figure out how to safely handle things like
5180   // int foo(int x) { return 1 << (x & 255); }
5181   // int bar() { return foo(256); }
5182   case ISD::SRA_PARTS:
5183   case ISD::SRL_PARTS:
5184   case ISD::SHL_PARTS:
5185     if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
5186         cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
5187       return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
5188     else if (N3.getOpcode() == ISD::AND)
5189       if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
5190         // If the and is only masking out bits that cannot effect the shift,
5191         // eliminate the and.
5192         unsigned NumBits = VT.getScalarType().getSizeInBits()*2;
5193         if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
5194           return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
5195       }
5196     break;
5197   }
5198 #endif
5199 
5200   // Memoize the node unless it returns a flag.
5201   SDNode *N;
5202   unsigned NumOps = Ops.size();
5203   if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
5204     FoldingSetNodeID ID;
5205     AddNodeIDNode(ID, Opcode, VTList, Ops);
5206     void *IP = nullptr;
5207     if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
5208       return SDValue(E, 0);
5209 
5210     if (NumOps == 1) {
5211       N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
5212                                           DL.getDebugLoc(), VTList, Ops[0]);
5213     } else if (NumOps == 2) {
5214       N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
5215                                            DL.getDebugLoc(), VTList, Ops[0],
5216                                            Ops[1]);
5217     } else if (NumOps == 3) {
5218       N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
5219                                             DL.getDebugLoc(), VTList, Ops[0],
5220                                             Ops[1], Ops[2]);
5221     } else {
5222       N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
5223                                      VTList, Ops);
5224     }
5225     CSEMap.InsertNode(N, IP);
5226   } else {
5227     if (NumOps == 1) {
5228       N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
5229                                           DL.getDebugLoc(), VTList, Ops[0]);
5230     } else if (NumOps == 2) {
5231       N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
5232                                            DL.getDebugLoc(), VTList, Ops[0],
5233                                            Ops[1]);
5234     } else if (NumOps == 3) {
5235       N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
5236                                             DL.getDebugLoc(), VTList, Ops[0],
5237                                             Ops[1], Ops[2]);
5238     } else {
5239       N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
5240                                      VTList, Ops);
5241     }
5242   }
5243   InsertNode(N);
5244   return SDValue(N, 0);
5245 }
5246 
getNode(unsigned Opcode,SDLoc DL,SDVTList VTList)5247 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList) {
5248   return getNode(Opcode, DL, VTList, None);
5249 }
5250 
getNode(unsigned Opcode,SDLoc DL,SDVTList VTList,SDValue N1)5251 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
5252                               SDValue N1) {
5253   SDValue Ops[] = { N1 };
5254   return getNode(Opcode, DL, VTList, Ops);
5255 }
5256 
getNode(unsigned Opcode,SDLoc DL,SDVTList VTList,SDValue N1,SDValue N2)5257 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
5258                               SDValue N1, SDValue N2) {
5259   SDValue Ops[] = { N1, N2 };
5260   return getNode(Opcode, DL, VTList, Ops);
5261 }
5262 
getNode(unsigned Opcode,SDLoc DL,SDVTList VTList,SDValue N1,SDValue N2,SDValue N3)5263 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
5264                               SDValue N1, SDValue N2, SDValue N3) {
5265   SDValue Ops[] = { N1, N2, N3 };
5266   return getNode(Opcode, DL, VTList, Ops);
5267 }
5268 
getNode(unsigned Opcode,SDLoc DL,SDVTList VTList,SDValue N1,SDValue N2,SDValue N3,SDValue N4)5269 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
5270                               SDValue N1, SDValue N2, SDValue N3,
5271                               SDValue N4) {
5272   SDValue Ops[] = { N1, N2, N3, N4 };
5273   return getNode(Opcode, DL, VTList, Ops);
5274 }
5275 
getNode(unsigned Opcode,SDLoc DL,SDVTList VTList,SDValue N1,SDValue N2,SDValue N3,SDValue N4,SDValue N5)5276 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
5277                               SDValue N1, SDValue N2, SDValue N3,
5278                               SDValue N4, SDValue N5) {
5279   SDValue Ops[] = { N1, N2, N3, N4, N5 };
5280   return getNode(Opcode, DL, VTList, Ops);
5281 }
5282 
getVTList(EVT VT)5283 SDVTList SelectionDAG::getVTList(EVT VT) {
5284   return makeVTList(SDNode::getValueTypeList(VT), 1);
5285 }
5286 
getVTList(EVT VT1,EVT VT2)5287 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) {
5288   FoldingSetNodeID ID;
5289   ID.AddInteger(2U);
5290   ID.AddInteger(VT1.getRawBits());
5291   ID.AddInteger(VT2.getRawBits());
5292 
5293   void *IP = nullptr;
5294   SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5295   if (!Result) {
5296     EVT *Array = Allocator.Allocate<EVT>(2);
5297     Array[0] = VT1;
5298     Array[1] = VT2;
5299     Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2);
5300     VTListMap.InsertNode(Result, IP);
5301   }
5302   return Result->getSDVTList();
5303 }
5304 
getVTList(EVT VT1,EVT VT2,EVT VT3)5305 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) {
5306   FoldingSetNodeID ID;
5307   ID.AddInteger(3U);
5308   ID.AddInteger(VT1.getRawBits());
5309   ID.AddInteger(VT2.getRawBits());
5310   ID.AddInteger(VT3.getRawBits());
5311 
5312   void *IP = nullptr;
5313   SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5314   if (!Result) {
5315     EVT *Array = Allocator.Allocate<EVT>(3);
5316     Array[0] = VT1;
5317     Array[1] = VT2;
5318     Array[2] = VT3;
5319     Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3);
5320     VTListMap.InsertNode(Result, IP);
5321   }
5322   return Result->getSDVTList();
5323 }
5324 
getVTList(EVT VT1,EVT VT2,EVT VT3,EVT VT4)5325 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
5326   FoldingSetNodeID ID;
5327   ID.AddInteger(4U);
5328   ID.AddInteger(VT1.getRawBits());
5329   ID.AddInteger(VT2.getRawBits());
5330   ID.AddInteger(VT3.getRawBits());
5331   ID.AddInteger(VT4.getRawBits());
5332 
5333   void *IP = nullptr;
5334   SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5335   if (!Result) {
5336     EVT *Array = Allocator.Allocate<EVT>(4);
5337     Array[0] = VT1;
5338     Array[1] = VT2;
5339     Array[2] = VT3;
5340     Array[3] = VT4;
5341     Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4);
5342     VTListMap.InsertNode(Result, IP);
5343   }
5344   return Result->getSDVTList();
5345 }
5346 
getVTList(ArrayRef<EVT> VTs)5347 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) {
5348   unsigned NumVTs = VTs.size();
5349   FoldingSetNodeID ID;
5350   ID.AddInteger(NumVTs);
5351   for (unsigned index = 0; index < NumVTs; index++) {
5352     ID.AddInteger(VTs[index].getRawBits());
5353   }
5354 
5355   void *IP = nullptr;
5356   SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5357   if (!Result) {
5358     EVT *Array = Allocator.Allocate<EVT>(NumVTs);
5359     std::copy(VTs.begin(), VTs.end(), Array);
5360     Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs);
5361     VTListMap.InsertNode(Result, IP);
5362   }
5363   return Result->getSDVTList();
5364 }
5365 
5366 
5367 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the
5368 /// specified operands.  If the resultant node already exists in the DAG,
5369 /// this does not modify the specified node, instead it returns the node that
5370 /// already exists.  If the resultant node does not exist in the DAG, the
5371 /// input node is returned.  As a degenerate case, if you specify the same
5372 /// input operands as the node already has, the input node is returned.
UpdateNodeOperands(SDNode * N,SDValue Op)5373 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
5374   assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
5375 
5376   // Check to see if there is no change.
5377   if (Op == N->getOperand(0)) return N;
5378 
5379   // See if the modified node already exists.
5380   void *InsertPos = nullptr;
5381   if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
5382     return Existing;
5383 
5384   // Nope it doesn't.  Remove the node from its current place in the maps.
5385   if (InsertPos)
5386     if (!RemoveNodeFromCSEMaps(N))
5387       InsertPos = nullptr;
5388 
5389   // Now we update the operands.
5390   N->OperandList[0].set(Op);
5391 
5392   // If this gets put into a CSE map, add it.
5393   if (InsertPos) CSEMap.InsertNode(N, InsertPos);
5394   return N;
5395 }
5396 
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2)5397 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
5398   assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
5399 
5400   // Check to see if there is no change.
5401   if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
5402     return N;   // No operands changed, just return the input node.
5403 
5404   // See if the modified node already exists.
5405   void *InsertPos = nullptr;
5406   if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
5407     return Existing;
5408 
5409   // Nope it doesn't.  Remove the node from its current place in the maps.
5410   if (InsertPos)
5411     if (!RemoveNodeFromCSEMaps(N))
5412       InsertPos = nullptr;
5413 
5414   // Now we update the operands.
5415   if (N->OperandList[0] != Op1)
5416     N->OperandList[0].set(Op1);
5417   if (N->OperandList[1] != Op2)
5418     N->OperandList[1].set(Op2);
5419 
5420   // If this gets put into a CSE map, add it.
5421   if (InsertPos) CSEMap.InsertNode(N, InsertPos);
5422   return N;
5423 }
5424 
5425 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2,SDValue Op3)5426 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
5427   SDValue Ops[] = { Op1, Op2, Op3 };
5428   return UpdateNodeOperands(N, Ops);
5429 }
5430 
5431 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2,SDValue Op3,SDValue Op4)5432 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
5433                    SDValue Op3, SDValue Op4) {
5434   SDValue Ops[] = { Op1, Op2, Op3, Op4 };
5435   return UpdateNodeOperands(N, Ops);
5436 }
5437 
5438 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2,SDValue Op3,SDValue Op4,SDValue Op5)5439 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
5440                    SDValue Op3, SDValue Op4, SDValue Op5) {
5441   SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
5442   return UpdateNodeOperands(N, Ops);
5443 }
5444 
5445 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,ArrayRef<SDValue> Ops)5446 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) {
5447   unsigned NumOps = Ops.size();
5448   assert(N->getNumOperands() == NumOps &&
5449          "Update with wrong number of operands");
5450 
5451   // If no operands changed just return the input node.
5452   if (Ops.empty() || std::equal(Ops.begin(), Ops.end(), N->op_begin()))
5453     return N;
5454 
5455   // See if the modified node already exists.
5456   void *InsertPos = nullptr;
5457   if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos))
5458     return Existing;
5459 
5460   // Nope it doesn't.  Remove the node from its current place in the maps.
5461   if (InsertPos)
5462     if (!RemoveNodeFromCSEMaps(N))
5463       InsertPos = nullptr;
5464 
5465   // Now we update the operands.
5466   for (unsigned i = 0; i != NumOps; ++i)
5467     if (N->OperandList[i] != Ops[i])
5468       N->OperandList[i].set(Ops[i]);
5469 
5470   // If this gets put into a CSE map, add it.
5471   if (InsertPos) CSEMap.InsertNode(N, InsertPos);
5472   return N;
5473 }
5474 
5475 /// DropOperands - Release the operands and set this node to have
5476 /// zero operands.
DropOperands()5477 void SDNode::DropOperands() {
5478   // Unlike the code in MorphNodeTo that does this, we don't need to
5479   // watch for dead nodes here.
5480   for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
5481     SDUse &Use = *I++;
5482     Use.set(SDValue());
5483   }
5484 }
5485 
5486 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
5487 /// machine opcode.
5488 ///
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT)5489 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5490                                    EVT VT) {
5491   SDVTList VTs = getVTList(VT);
5492   return SelectNodeTo(N, MachineOpc, VTs, None);
5493 }
5494 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,SDValue Op1)5495 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5496                                    EVT VT, SDValue Op1) {
5497   SDVTList VTs = getVTList(VT);
5498   SDValue Ops[] = { Op1 };
5499   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5500 }
5501 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,SDValue Op1,SDValue Op2)5502 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5503                                    EVT VT, SDValue Op1,
5504                                    SDValue Op2) {
5505   SDVTList VTs = getVTList(VT);
5506   SDValue Ops[] = { Op1, Op2 };
5507   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5508 }
5509 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,SDValue Op1,SDValue Op2,SDValue Op3)5510 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5511                                    EVT VT, SDValue Op1,
5512                                    SDValue Op2, SDValue Op3) {
5513   SDVTList VTs = getVTList(VT);
5514   SDValue Ops[] = { Op1, Op2, Op3 };
5515   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5516 }
5517 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,ArrayRef<SDValue> Ops)5518 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5519                                    EVT VT, ArrayRef<SDValue> Ops) {
5520   SDVTList VTs = getVTList(VT);
5521   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5522 }
5523 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,ArrayRef<SDValue> Ops)5524 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5525                                    EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) {
5526   SDVTList VTs = getVTList(VT1, VT2);
5527   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5528 }
5529 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2)5530 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5531                                    EVT VT1, EVT VT2) {
5532   SDVTList VTs = getVTList(VT1, VT2);
5533   return SelectNodeTo(N, MachineOpc, VTs, None);
5534 }
5535 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,EVT VT3,ArrayRef<SDValue> Ops)5536 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5537                                    EVT VT1, EVT VT2, EVT VT3,
5538                                    ArrayRef<SDValue> Ops) {
5539   SDVTList VTs = getVTList(VT1, VT2, VT3);
5540   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5541 }
5542 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,EVT VT3,EVT VT4,ArrayRef<SDValue> Ops)5543 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5544                                    EVT VT1, EVT VT2, EVT VT3, EVT VT4,
5545                                    ArrayRef<SDValue> Ops) {
5546   SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
5547   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5548 }
5549 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,SDValue Op1)5550 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5551                                    EVT VT1, EVT VT2,
5552                                    SDValue Op1) {
5553   SDVTList VTs = getVTList(VT1, VT2);
5554   SDValue Ops[] = { Op1 };
5555   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5556 }
5557 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2)5558 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5559                                    EVT VT1, EVT VT2,
5560                                    SDValue Op1, SDValue Op2) {
5561   SDVTList VTs = getVTList(VT1, VT2);
5562   SDValue Ops[] = { Op1, Op2 };
5563   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5564 }
5565 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2,SDValue Op3)5566 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5567                                    EVT VT1, EVT VT2,
5568                                    SDValue Op1, SDValue Op2,
5569                                    SDValue Op3) {
5570   SDVTList VTs = getVTList(VT1, VT2);
5571   SDValue Ops[] = { Op1, Op2, Op3 };
5572   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5573 }
5574 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,EVT VT3,SDValue Op1,SDValue Op2,SDValue Op3)5575 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5576                                    EVT VT1, EVT VT2, EVT VT3,
5577                                    SDValue Op1, SDValue Op2,
5578                                    SDValue Op3) {
5579   SDVTList VTs = getVTList(VT1, VT2, VT3);
5580   SDValue Ops[] = { Op1, Op2, Op3 };
5581   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5582 }
5583 
SelectNodeTo(SDNode * N,unsigned MachineOpc,SDVTList VTs,ArrayRef<SDValue> Ops)5584 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5585                                    SDVTList VTs,ArrayRef<SDValue> Ops) {
5586   N = MorphNodeTo(N, ~MachineOpc, VTs, Ops);
5587   // Reset the NodeID to -1.
5588   N->setNodeId(-1);
5589   return N;
5590 }
5591 
5592 /// UpdadeSDLocOnMergedSDNode - If the opt level is -O0 then it throws away
5593 /// the line number information on the merged node since it is not possible to
5594 /// preserve the information that operation is associated with multiple lines.
5595 /// This will make the debugger working better at -O0, were there is a higher
5596 /// probability having other instructions associated with that line.
5597 ///
5598 /// For IROrder, we keep the smaller of the two
UpdadeSDLocOnMergedSDNode(SDNode * N,SDLoc OLoc)5599 SDNode *SelectionDAG::UpdadeSDLocOnMergedSDNode(SDNode *N, SDLoc OLoc) {
5600   DebugLoc NLoc = N->getDebugLoc();
5601   if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) {
5602     N->setDebugLoc(DebugLoc());
5603   }
5604   unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder());
5605   N->setIROrder(Order);
5606   return N;
5607 }
5608 
5609 /// MorphNodeTo - This *mutates* the specified node to have the specified
5610 /// return type, opcode, and operands.
5611 ///
5612 /// Note that MorphNodeTo returns the resultant node.  If there is already a
5613 /// node of the specified opcode and operands, it returns that node instead of
5614 /// the current one.  Note that the SDLoc need not be the same.
5615 ///
5616 /// Using MorphNodeTo is faster than creating a new node and swapping it in
5617 /// with ReplaceAllUsesWith both because it often avoids allocating a new
5618 /// node, and because it doesn't require CSE recalculation for any of
5619 /// the node's users.
5620 ///
5621 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG.
5622 /// As a consequence it isn't appropriate to use from within the DAG combiner or
5623 /// the legalizer which maintain worklists that would need to be updated when
5624 /// deleting things.
MorphNodeTo(SDNode * N,unsigned Opc,SDVTList VTs,ArrayRef<SDValue> Ops)5625 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
5626                                   SDVTList VTs, ArrayRef<SDValue> Ops) {
5627   unsigned NumOps = Ops.size();
5628   // If an identical node already exists, use it.
5629   void *IP = nullptr;
5630   if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
5631     FoldingSetNodeID ID;
5632     AddNodeIDNode(ID, Opc, VTs, Ops);
5633     if (SDNode *ON = CSEMap.FindNodeOrInsertPos(ID, IP))
5634       return UpdadeSDLocOnMergedSDNode(ON, SDLoc(N));
5635   }
5636 
5637   if (!RemoveNodeFromCSEMaps(N))
5638     IP = nullptr;
5639 
5640   // Start the morphing.
5641   N->NodeType = Opc;
5642   N->ValueList = VTs.VTs;
5643   N->NumValues = VTs.NumVTs;
5644 
5645   // Clear the operands list, updating used nodes to remove this from their
5646   // use list.  Keep track of any operands that become dead as a result.
5647   SmallPtrSet<SDNode*, 16> DeadNodeSet;
5648   for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
5649     SDUse &Use = *I++;
5650     SDNode *Used = Use.getNode();
5651     Use.set(SDValue());
5652     if (Used->use_empty())
5653       DeadNodeSet.insert(Used);
5654   }
5655 
5656   if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) {
5657     // Initialize the memory references information.
5658     MN->setMemRefs(nullptr, nullptr);
5659     // If NumOps is larger than the # of operands we can have in a
5660     // MachineSDNode, reallocate the operand list.
5661     if (NumOps > MN->NumOperands || !MN->OperandsNeedDelete) {
5662       if (MN->OperandsNeedDelete)
5663         delete[] MN->OperandList;
5664       if (NumOps > array_lengthof(MN->LocalOperands))
5665         // We're creating a final node that will live unmorphed for the
5666         // remainder of the current SelectionDAG iteration, so we can allocate
5667         // the operands directly out of a pool with no recycling metadata.
5668         MN->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
5669                          Ops.data(), NumOps);
5670       else
5671         MN->InitOperands(MN->LocalOperands, Ops.data(), NumOps);
5672       MN->OperandsNeedDelete = false;
5673     } else
5674       MN->InitOperands(MN->OperandList, Ops.data(), NumOps);
5675   } else {
5676     // If NumOps is larger than the # of operands we currently have, reallocate
5677     // the operand list.
5678     if (NumOps > N->NumOperands) {
5679       if (N->OperandsNeedDelete)
5680         delete[] N->OperandList;
5681       N->InitOperands(new SDUse[NumOps], Ops.data(), NumOps);
5682       N->OperandsNeedDelete = true;
5683     } else
5684       N->InitOperands(N->OperandList, Ops.data(), NumOps);
5685   }
5686 
5687   // Delete any nodes that are still dead after adding the uses for the
5688   // new operands.
5689   if (!DeadNodeSet.empty()) {
5690     SmallVector<SDNode *, 16> DeadNodes;
5691     for (SDNode *N : DeadNodeSet)
5692       if (N->use_empty())
5693         DeadNodes.push_back(N);
5694     RemoveDeadNodes(DeadNodes);
5695   }
5696 
5697   if (IP)
5698     CSEMap.InsertNode(N, IP);   // Memoize the new node.
5699   return N;
5700 }
5701 
5702 
5703 /// getMachineNode - These are used for target selectors to create a new node
5704 /// with specified return type(s), MachineInstr opcode, and operands.
5705 ///
5706 /// Note that getMachineNode returns the resultant node.  If there is already a
5707 /// node of the specified opcode and operands, it returns that node instead of
5708 /// the current one.
5709 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT)5710 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT) {
5711   SDVTList VTs = getVTList(VT);
5712   return getMachineNode(Opcode, dl, VTs, None);
5713 }
5714 
5715 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT,SDValue Op1)5716 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT, SDValue Op1) {
5717   SDVTList VTs = getVTList(VT);
5718   SDValue Ops[] = { Op1 };
5719   return getMachineNode(Opcode, dl, VTs, Ops);
5720 }
5721 
5722 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT,SDValue Op1,SDValue Op2)5723 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
5724                              SDValue Op1, SDValue Op2) {
5725   SDVTList VTs = getVTList(VT);
5726   SDValue Ops[] = { Op1, Op2 };
5727   return getMachineNode(Opcode, dl, VTs, Ops);
5728 }
5729 
5730 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT,SDValue Op1,SDValue Op2,SDValue Op3)5731 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
5732                              SDValue Op1, SDValue Op2, SDValue Op3) {
5733   SDVTList VTs = getVTList(VT);
5734   SDValue Ops[] = { Op1, Op2, Op3 };
5735   return getMachineNode(Opcode, dl, VTs, Ops);
5736 }
5737 
5738 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT,ArrayRef<SDValue> Ops)5739 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
5740                              ArrayRef<SDValue> Ops) {
5741   SDVTList VTs = getVTList(VT);
5742   return getMachineNode(Opcode, dl, VTs, Ops);
5743 }
5744 
5745 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT1,EVT VT2)5746 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2) {
5747   SDVTList VTs = getVTList(VT1, VT2);
5748   return getMachineNode(Opcode, dl, VTs, None);
5749 }
5750 
5751 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT1,EVT VT2,SDValue Op1)5752 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5753                              EVT VT1, EVT VT2, SDValue Op1) {
5754   SDVTList VTs = getVTList(VT1, VT2);
5755   SDValue Ops[] = { Op1 };
5756   return getMachineNode(Opcode, dl, VTs, Ops);
5757 }
5758 
5759 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2)5760 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5761                              EVT VT1, EVT VT2, SDValue Op1, SDValue Op2) {
5762   SDVTList VTs = getVTList(VT1, VT2);
5763   SDValue Ops[] = { Op1, Op2 };
5764   return getMachineNode(Opcode, dl, VTs, Ops);
5765 }
5766 
5767 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2,SDValue Op3)5768 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5769                              EVT VT1, EVT VT2, SDValue Op1,
5770                              SDValue Op2, SDValue Op3) {
5771   SDVTList VTs = getVTList(VT1, VT2);
5772   SDValue Ops[] = { Op1, Op2, Op3 };
5773   return getMachineNode(Opcode, dl, VTs, Ops);
5774 }
5775 
5776 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT1,EVT VT2,ArrayRef<SDValue> Ops)5777 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5778                              EVT VT1, EVT VT2,
5779                              ArrayRef<SDValue> Ops) {
5780   SDVTList VTs = getVTList(VT1, VT2);
5781   return getMachineNode(Opcode, dl, VTs, Ops);
5782 }
5783 
5784 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT1,EVT VT2,EVT VT3,SDValue Op1,SDValue Op2)5785 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5786                              EVT VT1, EVT VT2, EVT VT3,
5787                              SDValue Op1, SDValue Op2) {
5788   SDVTList VTs = getVTList(VT1, VT2, VT3);
5789   SDValue Ops[] = { Op1, Op2 };
5790   return getMachineNode(Opcode, dl, VTs, Ops);
5791 }
5792 
5793 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT1,EVT VT2,EVT VT3,SDValue Op1,SDValue Op2,SDValue Op3)5794 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5795                              EVT VT1, EVT VT2, EVT VT3,
5796                              SDValue Op1, SDValue Op2, SDValue Op3) {
5797   SDVTList VTs = getVTList(VT1, VT2, VT3);
5798   SDValue Ops[] = { Op1, Op2, Op3 };
5799   return getMachineNode(Opcode, dl, VTs, Ops);
5800 }
5801 
5802 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT1,EVT VT2,EVT VT3,ArrayRef<SDValue> Ops)5803 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5804                              EVT VT1, EVT VT2, EVT VT3,
5805                              ArrayRef<SDValue> Ops) {
5806   SDVTList VTs = getVTList(VT1, VT2, VT3);
5807   return getMachineNode(Opcode, dl, VTs, Ops);
5808 }
5809 
5810 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT1,EVT VT2,EVT VT3,EVT VT4,ArrayRef<SDValue> Ops)5811 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1,
5812                              EVT VT2, EVT VT3, EVT VT4,
5813                              ArrayRef<SDValue> Ops) {
5814   SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
5815   return getMachineNode(Opcode, dl, VTs, Ops);
5816 }
5817 
5818 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,ArrayRef<EVT> ResultTys,ArrayRef<SDValue> Ops)5819 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5820                              ArrayRef<EVT> ResultTys,
5821                              ArrayRef<SDValue> Ops) {
5822   SDVTList VTs = getVTList(ResultTys);
5823   return getMachineNode(Opcode, dl, VTs, Ops);
5824 }
5825 
5826 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc DL,SDVTList VTs,ArrayRef<SDValue> OpsArray)5827 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc DL, SDVTList VTs,
5828                              ArrayRef<SDValue> OpsArray) {
5829   bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
5830   MachineSDNode *N;
5831   void *IP = nullptr;
5832   const SDValue *Ops = OpsArray.data();
5833   unsigned NumOps = OpsArray.size();
5834 
5835   if (DoCSE) {
5836     FoldingSetNodeID ID;
5837     AddNodeIDNode(ID, ~Opcode, VTs, OpsArray);
5838     IP = nullptr;
5839     if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
5840       return cast<MachineSDNode>(UpdadeSDLocOnMergedSDNode(E, DL));
5841     }
5842   }
5843 
5844   // Allocate a new MachineSDNode.
5845   N = new (NodeAllocator) MachineSDNode(~Opcode, DL.getIROrder(),
5846                                         DL.getDebugLoc(), VTs);
5847 
5848   // Initialize the operands list.
5849   if (NumOps > array_lengthof(N->LocalOperands))
5850     // We're creating a final node that will live unmorphed for the
5851     // remainder of the current SelectionDAG iteration, so we can allocate
5852     // the operands directly out of a pool with no recycling metadata.
5853     N->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
5854                     Ops, NumOps);
5855   else
5856     N->InitOperands(N->LocalOperands, Ops, NumOps);
5857   N->OperandsNeedDelete = false;
5858 
5859   if (DoCSE)
5860     CSEMap.InsertNode(N, IP);
5861 
5862   InsertNode(N);
5863   return N;
5864 }
5865 
5866 /// getTargetExtractSubreg - A convenience function for creating
5867 /// TargetOpcode::EXTRACT_SUBREG nodes.
5868 SDValue
getTargetExtractSubreg(int SRIdx,SDLoc DL,EVT VT,SDValue Operand)5869 SelectionDAG::getTargetExtractSubreg(int SRIdx, SDLoc DL, EVT VT,
5870                                      SDValue Operand) {
5871   SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32);
5872   SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
5873                                   VT, Operand, SRIdxVal);
5874   return SDValue(Subreg, 0);
5875 }
5876 
5877 /// getTargetInsertSubreg - A convenience function for creating
5878 /// TargetOpcode::INSERT_SUBREG nodes.
5879 SDValue
getTargetInsertSubreg(int SRIdx,SDLoc DL,EVT VT,SDValue Operand,SDValue Subreg)5880 SelectionDAG::getTargetInsertSubreg(int SRIdx, SDLoc DL, EVT VT,
5881                                     SDValue Operand, SDValue Subreg) {
5882   SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32);
5883   SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
5884                                   VT, Operand, Subreg, SRIdxVal);
5885   return SDValue(Result, 0);
5886 }
5887 
5888 /// getNodeIfExists - Get the specified node if it's already available, or
5889 /// else return NULL.
getNodeIfExists(unsigned Opcode,SDVTList VTList,ArrayRef<SDValue> Ops,bool nuw,bool nsw,bool exact)5890 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
5891                                       ArrayRef<SDValue> Ops, bool nuw, bool nsw,
5892                                       bool exact) {
5893   if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
5894     FoldingSetNodeID ID;
5895     AddNodeIDNode(ID, Opcode, VTList, Ops);
5896     if (isBinOpWithFlags(Opcode))
5897       AddBinaryNodeIDCustom(ID, nuw, nsw, exact);
5898     void *IP = nullptr;
5899     if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
5900       return E;
5901   }
5902   return nullptr;
5903 }
5904 
5905 /// getDbgValue - Creates a SDDbgValue node.
5906 ///
5907 /// SDNode
getDbgValue(MDNode * Var,MDNode * Expr,SDNode * N,unsigned R,bool IsIndirect,uint64_t Off,DebugLoc DL,unsigned O)5908 SDDbgValue *SelectionDAG::getDbgValue(MDNode *Var, MDNode *Expr, SDNode *N,
5909                                       unsigned R, bool IsIndirect, uint64_t Off,
5910                                       DebugLoc DL, unsigned O) {
5911   assert(cast<MDLocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
5912          "Expected inlined-at fields to agree");
5913   return new (Allocator) SDDbgValue(Var, Expr, N, R, IsIndirect, Off, DL, O);
5914 }
5915 
5916 /// Constant
getConstantDbgValue(MDNode * Var,MDNode * Expr,const Value * C,uint64_t Off,DebugLoc DL,unsigned O)5917 SDDbgValue *SelectionDAG::getConstantDbgValue(MDNode *Var, MDNode *Expr,
5918                                               const Value *C, uint64_t Off,
5919                                               DebugLoc DL, unsigned O) {
5920   assert(cast<MDLocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
5921          "Expected inlined-at fields to agree");
5922   return new (Allocator) SDDbgValue(Var, Expr, C, Off, DL, O);
5923 }
5924 
5925 /// FrameIndex
getFrameIndexDbgValue(MDNode * Var,MDNode * Expr,unsigned FI,uint64_t Off,DebugLoc DL,unsigned O)5926 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(MDNode *Var, MDNode *Expr,
5927                                                 unsigned FI, uint64_t Off,
5928                                                 DebugLoc DL, unsigned O) {
5929   assert(cast<MDLocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
5930          "Expected inlined-at fields to agree");
5931   return new (Allocator) SDDbgValue(Var, Expr, FI, Off, DL, O);
5932 }
5933 
5934 namespace {
5935 
5936 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
5937 /// pointed to by a use iterator is deleted, increment the use iterator
5938 /// so that it doesn't dangle.
5939 ///
5940 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
5941   SDNode::use_iterator &UI;
5942   SDNode::use_iterator &UE;
5943 
NodeDeleted(SDNode * N,SDNode * E)5944   void NodeDeleted(SDNode *N, SDNode *E) override {
5945     // Increment the iterator as needed.
5946     while (UI != UE && N == *UI)
5947       ++UI;
5948   }
5949 
5950 public:
RAUWUpdateListener(SelectionDAG & d,SDNode::use_iterator & ui,SDNode::use_iterator & ue)5951   RAUWUpdateListener(SelectionDAG &d,
5952                      SDNode::use_iterator &ui,
5953                      SDNode::use_iterator &ue)
5954     : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
5955 };
5956 
5957 }
5958 
5959 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
5960 /// This can cause recursive merging of nodes in the DAG.
5961 ///
5962 /// This version assumes From has a single result value.
5963 ///
ReplaceAllUsesWith(SDValue FromN,SDValue To)5964 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) {
5965   SDNode *From = FromN.getNode();
5966   assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
5967          "Cannot replace with this method!");
5968   assert(From != To.getNode() && "Cannot replace uses of with self");
5969 
5970   // Iterate over all the existing uses of From. New uses will be added
5971   // to the beginning of the use list, which we avoid visiting.
5972   // This specifically avoids visiting uses of From that arise while the
5973   // replacement is happening, because any such uses would be the result
5974   // of CSE: If an existing node looks like From after one of its operands
5975   // is replaced by To, we don't want to replace of all its users with To
5976   // too. See PR3018 for more info.
5977   SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
5978   RAUWUpdateListener Listener(*this, UI, UE);
5979   while (UI != UE) {
5980     SDNode *User = *UI;
5981 
5982     // This node is about to morph, remove its old self from the CSE maps.
5983     RemoveNodeFromCSEMaps(User);
5984 
5985     // A user can appear in a use list multiple times, and when this
5986     // happens the uses are usually next to each other in the list.
5987     // To help reduce the number of CSE recomputations, process all
5988     // the uses of this user that we can find this way.
5989     do {
5990       SDUse &Use = UI.getUse();
5991       ++UI;
5992       Use.set(To);
5993     } while (UI != UE && *UI == User);
5994 
5995     // Now that we have modified User, add it back to the CSE maps.  If it
5996     // already exists there, recursively merge the results together.
5997     AddModifiedNodeToCSEMaps(User);
5998   }
5999 
6000   // If we just RAUW'd the root, take note.
6001   if (FromN == getRoot())
6002     setRoot(To);
6003 }
6004 
6005 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
6006 /// This can cause recursive merging of nodes in the DAG.
6007 ///
6008 /// This version assumes that for each value of From, there is a
6009 /// corresponding value in To in the same position with the same type.
6010 ///
ReplaceAllUsesWith(SDNode * From,SDNode * To)6011 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) {
6012 #ifndef NDEBUG
6013   for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
6014     assert((!From->hasAnyUseOfValue(i) ||
6015             From->getValueType(i) == To->getValueType(i)) &&
6016            "Cannot use this version of ReplaceAllUsesWith!");
6017 #endif
6018 
6019   // Handle the trivial case.
6020   if (From == To)
6021     return;
6022 
6023   // Iterate over just the existing users of From. See the comments in
6024   // the ReplaceAllUsesWith above.
6025   SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
6026   RAUWUpdateListener Listener(*this, UI, UE);
6027   while (UI != UE) {
6028     SDNode *User = *UI;
6029 
6030     // This node is about to morph, remove its old self from the CSE maps.
6031     RemoveNodeFromCSEMaps(User);
6032 
6033     // A user can appear in a use list multiple times, and when this
6034     // happens the uses are usually next to each other in the list.
6035     // To help reduce the number of CSE recomputations, process all
6036     // the uses of this user that we can find this way.
6037     do {
6038       SDUse &Use = UI.getUse();
6039       ++UI;
6040       Use.setNode(To);
6041     } while (UI != UE && *UI == User);
6042 
6043     // Now that we have modified User, add it back to the CSE maps.  If it
6044     // already exists there, recursively merge the results together.
6045     AddModifiedNodeToCSEMaps(User);
6046   }
6047 
6048   // If we just RAUW'd the root, take note.
6049   if (From == getRoot().getNode())
6050     setRoot(SDValue(To, getRoot().getResNo()));
6051 }
6052 
6053 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
6054 /// This can cause recursive merging of nodes in the DAG.
6055 ///
6056 /// This version can replace From with any result values.  To must match the
6057 /// number and types of values returned by From.
ReplaceAllUsesWith(SDNode * From,const SDValue * To)6058 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) {
6059   if (From->getNumValues() == 1)  // Handle the simple case efficiently.
6060     return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
6061 
6062   // Iterate over just the existing users of From. See the comments in
6063   // the ReplaceAllUsesWith above.
6064   SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
6065   RAUWUpdateListener Listener(*this, UI, UE);
6066   while (UI != UE) {
6067     SDNode *User = *UI;
6068 
6069     // This node is about to morph, remove its old self from the CSE maps.
6070     RemoveNodeFromCSEMaps(User);
6071 
6072     // A user can appear in a use list multiple times, and when this
6073     // happens the uses are usually next to each other in the list.
6074     // To help reduce the number of CSE recomputations, process all
6075     // the uses of this user that we can find this way.
6076     do {
6077       SDUse &Use = UI.getUse();
6078       const SDValue &ToOp = To[Use.getResNo()];
6079       ++UI;
6080       Use.set(ToOp);
6081     } while (UI != UE && *UI == User);
6082 
6083     // Now that we have modified User, add it back to the CSE maps.  If it
6084     // already exists there, recursively merge the results together.
6085     AddModifiedNodeToCSEMaps(User);
6086   }
6087 
6088   // If we just RAUW'd the root, take note.
6089   if (From == getRoot().getNode())
6090     setRoot(SDValue(To[getRoot().getResNo()]));
6091 }
6092 
6093 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
6094 /// uses of other values produced by From.getNode() alone.  The Deleted
6095 /// vector is handled the same way as for ReplaceAllUsesWith.
ReplaceAllUsesOfValueWith(SDValue From,SDValue To)6096 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){
6097   // Handle the really simple, really trivial case efficiently.
6098   if (From == To) return;
6099 
6100   // Handle the simple, trivial, case efficiently.
6101   if (From.getNode()->getNumValues() == 1) {
6102     ReplaceAllUsesWith(From, To);
6103     return;
6104   }
6105 
6106   // Iterate over just the existing users of From. See the comments in
6107   // the ReplaceAllUsesWith above.
6108   SDNode::use_iterator UI = From.getNode()->use_begin(),
6109                        UE = From.getNode()->use_end();
6110   RAUWUpdateListener Listener(*this, UI, UE);
6111   while (UI != UE) {
6112     SDNode *User = *UI;
6113     bool UserRemovedFromCSEMaps = false;
6114 
6115     // A user can appear in a use list multiple times, and when this
6116     // happens the uses are usually next to each other in the list.
6117     // To help reduce the number of CSE recomputations, process all
6118     // the uses of this user that we can find this way.
6119     do {
6120       SDUse &Use = UI.getUse();
6121 
6122       // Skip uses of different values from the same node.
6123       if (Use.getResNo() != From.getResNo()) {
6124         ++UI;
6125         continue;
6126       }
6127 
6128       // If this node hasn't been modified yet, it's still in the CSE maps,
6129       // so remove its old self from the CSE maps.
6130       if (!UserRemovedFromCSEMaps) {
6131         RemoveNodeFromCSEMaps(User);
6132         UserRemovedFromCSEMaps = true;
6133       }
6134 
6135       ++UI;
6136       Use.set(To);
6137     } while (UI != UE && *UI == User);
6138 
6139     // We are iterating over all uses of the From node, so if a use
6140     // doesn't use the specific value, no changes are made.
6141     if (!UserRemovedFromCSEMaps)
6142       continue;
6143 
6144     // Now that we have modified User, add it back to the CSE maps.  If it
6145     // already exists there, recursively merge the results together.
6146     AddModifiedNodeToCSEMaps(User);
6147   }
6148 
6149   // If we just RAUW'd the root, take note.
6150   if (From == getRoot())
6151     setRoot(To);
6152 }
6153 
6154 namespace {
6155   /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
6156   /// to record information about a use.
6157   struct UseMemo {
6158     SDNode *User;
6159     unsigned Index;
6160     SDUse *Use;
6161   };
6162 
6163   /// operator< - Sort Memos by User.
operator <(const UseMemo & L,const UseMemo & R)6164   bool operator<(const UseMemo &L, const UseMemo &R) {
6165     return (intptr_t)L.User < (intptr_t)R.User;
6166   }
6167 }
6168 
6169 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
6170 /// uses of other values produced by From.getNode() alone.  The same value
6171 /// may appear in both the From and To list.  The Deleted vector is
6172 /// handled the same way as for ReplaceAllUsesWith.
ReplaceAllUsesOfValuesWith(const SDValue * From,const SDValue * To,unsigned Num)6173 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
6174                                               const SDValue *To,
6175                                               unsigned Num){
6176   // Handle the simple, trivial case efficiently.
6177   if (Num == 1)
6178     return ReplaceAllUsesOfValueWith(*From, *To);
6179 
6180   // Read up all the uses and make records of them. This helps
6181   // processing new uses that are introduced during the
6182   // replacement process.
6183   SmallVector<UseMemo, 4> Uses;
6184   for (unsigned i = 0; i != Num; ++i) {
6185     unsigned FromResNo = From[i].getResNo();
6186     SDNode *FromNode = From[i].getNode();
6187     for (SDNode::use_iterator UI = FromNode->use_begin(),
6188          E = FromNode->use_end(); UI != E; ++UI) {
6189       SDUse &Use = UI.getUse();
6190       if (Use.getResNo() == FromResNo) {
6191         UseMemo Memo = { *UI, i, &Use };
6192         Uses.push_back(Memo);
6193       }
6194     }
6195   }
6196 
6197   // Sort the uses, so that all the uses from a given User are together.
6198   std::sort(Uses.begin(), Uses.end());
6199 
6200   for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
6201        UseIndex != UseIndexEnd; ) {
6202     // We know that this user uses some value of From.  If it is the right
6203     // value, update it.
6204     SDNode *User = Uses[UseIndex].User;
6205 
6206     // This node is about to morph, remove its old self from the CSE maps.
6207     RemoveNodeFromCSEMaps(User);
6208 
6209     // The Uses array is sorted, so all the uses for a given User
6210     // are next to each other in the list.
6211     // To help reduce the number of CSE recomputations, process all
6212     // the uses of this user that we can find this way.
6213     do {
6214       unsigned i = Uses[UseIndex].Index;
6215       SDUse &Use = *Uses[UseIndex].Use;
6216       ++UseIndex;
6217 
6218       Use.set(To[i]);
6219     } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
6220 
6221     // Now that we have modified User, add it back to the CSE maps.  If it
6222     // already exists there, recursively merge the results together.
6223     AddModifiedNodeToCSEMaps(User);
6224   }
6225 }
6226 
6227 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
6228 /// based on their topological order. It returns the maximum id and a vector
6229 /// of the SDNodes* in assigned order by reference.
AssignTopologicalOrder()6230 unsigned SelectionDAG::AssignTopologicalOrder() {
6231 
6232   unsigned DAGSize = 0;
6233 
6234   // SortedPos tracks the progress of the algorithm. Nodes before it are
6235   // sorted, nodes after it are unsorted. When the algorithm completes
6236   // it is at the end of the list.
6237   allnodes_iterator SortedPos = allnodes_begin();
6238 
6239   // Visit all the nodes. Move nodes with no operands to the front of
6240   // the list immediately. Annotate nodes that do have operands with their
6241   // operand count. Before we do this, the Node Id fields of the nodes
6242   // may contain arbitrary values. After, the Node Id fields for nodes
6243   // before SortedPos will contain the topological sort index, and the
6244   // Node Id fields for nodes At SortedPos and after will contain the
6245   // count of outstanding operands.
6246   for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) {
6247     SDNode *N = I++;
6248     checkForCycles(N, this);
6249     unsigned Degree = N->getNumOperands();
6250     if (Degree == 0) {
6251       // A node with no uses, add it to the result array immediately.
6252       N->setNodeId(DAGSize++);
6253       allnodes_iterator Q = N;
6254       if (Q != SortedPos)
6255         SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
6256       assert(SortedPos != AllNodes.end() && "Overran node list");
6257       ++SortedPos;
6258     } else {
6259       // Temporarily use the Node Id as scratch space for the degree count.
6260       N->setNodeId(Degree);
6261     }
6262   }
6263 
6264   // Visit all the nodes. As we iterate, move nodes into sorted order,
6265   // such that by the time the end is reached all nodes will be sorted.
6266   for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ++I) {
6267     SDNode *N = I;
6268     checkForCycles(N, this);
6269     // N is in sorted position, so all its uses have one less operand
6270     // that needs to be sorted.
6271     for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
6272          UI != UE; ++UI) {
6273       SDNode *P = *UI;
6274       unsigned Degree = P->getNodeId();
6275       assert(Degree != 0 && "Invalid node degree");
6276       --Degree;
6277       if (Degree == 0) {
6278         // All of P's operands are sorted, so P may sorted now.
6279         P->setNodeId(DAGSize++);
6280         if (P != SortedPos)
6281           SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
6282         assert(SortedPos != AllNodes.end() && "Overran node list");
6283         ++SortedPos;
6284       } else {
6285         // Update P's outstanding operand count.
6286         P->setNodeId(Degree);
6287       }
6288     }
6289     if (I == SortedPos) {
6290 #ifndef NDEBUG
6291       SDNode *S = ++I;
6292       dbgs() << "Overran sorted position:\n";
6293       S->dumprFull(this); dbgs() << "\n";
6294       dbgs() << "Checking if this is due to cycles\n";
6295       checkForCycles(this, true);
6296 #endif
6297       llvm_unreachable(nullptr);
6298     }
6299   }
6300 
6301   assert(SortedPos == AllNodes.end() &&
6302          "Topological sort incomplete!");
6303   assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
6304          "First node in topological sort is not the entry token!");
6305   assert(AllNodes.front().getNodeId() == 0 &&
6306          "First node in topological sort has non-zero id!");
6307   assert(AllNodes.front().getNumOperands() == 0 &&
6308          "First node in topological sort has operands!");
6309   assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
6310          "Last node in topologic sort has unexpected id!");
6311   assert(AllNodes.back().use_empty() &&
6312          "Last node in topologic sort has users!");
6313   assert(DAGSize == allnodes_size() && "Node count mismatch!");
6314   return DAGSize;
6315 }
6316 
6317 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
6318 /// value is produced by SD.
AddDbgValue(SDDbgValue * DB,SDNode * SD,bool isParameter)6319 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) {
6320   if (SD) {
6321     assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue());
6322     SD->setHasDebugValue(true);
6323   }
6324   DbgInfo->add(DB, SD, isParameter);
6325 }
6326 
6327 /// TransferDbgValues - Transfer SDDbgValues.
TransferDbgValues(SDValue From,SDValue To)6328 void SelectionDAG::TransferDbgValues(SDValue From, SDValue To) {
6329   if (From == To || !From.getNode()->getHasDebugValue())
6330     return;
6331   SDNode *FromNode = From.getNode();
6332   SDNode *ToNode = To.getNode();
6333   ArrayRef<SDDbgValue *> DVs = GetDbgValues(FromNode);
6334   SmallVector<SDDbgValue *, 2> ClonedDVs;
6335   for (ArrayRef<SDDbgValue *>::iterator I = DVs.begin(), E = DVs.end();
6336        I != E; ++I) {
6337     SDDbgValue *Dbg = *I;
6338     if (Dbg->getKind() == SDDbgValue::SDNODE) {
6339       SDDbgValue *Clone =
6340           getDbgValue(Dbg->getVariable(), Dbg->getExpression(), ToNode,
6341                       To.getResNo(), Dbg->isIndirect(), Dbg->getOffset(),
6342                       Dbg->getDebugLoc(), Dbg->getOrder());
6343       ClonedDVs.push_back(Clone);
6344     }
6345   }
6346   for (SmallVectorImpl<SDDbgValue *>::iterator I = ClonedDVs.begin(),
6347          E = ClonedDVs.end(); I != E; ++I)
6348     AddDbgValue(*I, ToNode, false);
6349 }
6350 
6351 //===----------------------------------------------------------------------===//
6352 //                              SDNode Class
6353 //===----------------------------------------------------------------------===//
6354 
~HandleSDNode()6355 HandleSDNode::~HandleSDNode() {
6356   DropOperands();
6357 }
6358 
GlobalAddressSDNode(unsigned Opc,unsigned Order,DebugLoc DL,const GlobalValue * GA,EVT VT,int64_t o,unsigned char TF)6359 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order,
6360                                          DebugLoc DL, const GlobalValue *GA,
6361                                          EVT VT, int64_t o, unsigned char TF)
6362   : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) {
6363   TheGlobal = GA;
6364 }
6365 
AddrSpaceCastSDNode(unsigned Order,DebugLoc dl,EVT VT,SDValue X,unsigned SrcAS,unsigned DestAS)6366 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, DebugLoc dl, EVT VT,
6367                                          SDValue X, unsigned SrcAS,
6368                                          unsigned DestAS)
6369  : UnarySDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT), X),
6370    SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {}
6371 
MemSDNode(unsigned Opc,unsigned Order,DebugLoc dl,SDVTList VTs,EVT memvt,MachineMemOperand * mmo)6372 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
6373                      EVT memvt, MachineMemOperand *mmo)
6374  : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
6375   SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
6376                                       MMO->isNonTemporal(), MMO->isInvariant());
6377   assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
6378   assert(isNonTemporal() == MMO->isNonTemporal() &&
6379          "Non-temporal encoding error!");
6380   // We check here that the size of the memory operand fits within the size of
6381   // the MMO. This is because the MMO might indicate only a possible address
6382   // range instead of specifying the affected memory addresses precisely.
6383   assert(memvt.getStoreSize() <= MMO->getSize() && "Size mismatch!");
6384 }
6385 
MemSDNode(unsigned Opc,unsigned Order,DebugLoc dl,SDVTList VTs,ArrayRef<SDValue> Ops,EVT memvt,MachineMemOperand * mmo)6386 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
6387                      ArrayRef<SDValue> Ops, EVT memvt, MachineMemOperand *mmo)
6388    : SDNode(Opc, Order, dl, VTs, Ops),
6389      MemoryVT(memvt), MMO(mmo) {
6390   SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
6391                                       MMO->isNonTemporal(), MMO->isInvariant());
6392   assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
6393   assert(memvt.getStoreSize() <= MMO->getSize() && "Size mismatch!");
6394 }
6395 
6396 /// Profile - Gather unique data for the node.
6397 ///
Profile(FoldingSetNodeID & ID) const6398 void SDNode::Profile(FoldingSetNodeID &ID) const {
6399   AddNodeIDNode(ID, this);
6400 }
6401 
6402 namespace {
6403   struct EVTArray {
6404     std::vector<EVT> VTs;
6405 
EVTArray__anon54267b090411::EVTArray6406     EVTArray() {
6407       VTs.reserve(MVT::LAST_VALUETYPE);
6408       for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i)
6409         VTs.push_back(MVT((MVT::SimpleValueType)i));
6410     }
6411   };
6412 }
6413 
6414 static ManagedStatic<std::set<EVT, EVT::compareRawBits> > EVTs;
6415 static ManagedStatic<EVTArray> SimpleVTArray;
6416 static ManagedStatic<sys::SmartMutex<true> > VTMutex;
6417 
6418 /// getValueTypeList - Return a pointer to the specified value type.
6419 ///
getValueTypeList(EVT VT)6420 const EVT *SDNode::getValueTypeList(EVT VT) {
6421   if (VT.isExtended()) {
6422     sys::SmartScopedLock<true> Lock(*VTMutex);
6423     return &(*EVTs->insert(VT).first);
6424   } else {
6425     assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
6426            "Value type out of range!");
6427     return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
6428   }
6429 }
6430 
6431 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
6432 /// indicated value.  This method ignores uses of other values defined by this
6433 /// operation.
hasNUsesOfValue(unsigned NUses,unsigned Value) const6434 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
6435   assert(Value < getNumValues() && "Bad value!");
6436 
6437   // TODO: Only iterate over uses of a given value of the node
6438   for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
6439     if (UI.getUse().getResNo() == Value) {
6440       if (NUses == 0)
6441         return false;
6442       --NUses;
6443     }
6444   }
6445 
6446   // Found exactly the right number of uses?
6447   return NUses == 0;
6448 }
6449 
6450 
6451 /// hasAnyUseOfValue - Return true if there are any use of the indicated
6452 /// value. This method ignores uses of other values defined by this operation.
hasAnyUseOfValue(unsigned Value) const6453 bool SDNode::hasAnyUseOfValue(unsigned Value) const {
6454   assert(Value < getNumValues() && "Bad value!");
6455 
6456   for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI)
6457     if (UI.getUse().getResNo() == Value)
6458       return true;
6459 
6460   return false;
6461 }
6462 
6463 
6464 /// isOnlyUserOf - Return true if this node is the only use of N.
6465 ///
isOnlyUserOf(SDNode * N) const6466 bool SDNode::isOnlyUserOf(SDNode *N) const {
6467   bool Seen = false;
6468   for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
6469     SDNode *User = *I;
6470     if (User == this)
6471       Seen = true;
6472     else
6473       return false;
6474   }
6475 
6476   return Seen;
6477 }
6478 
6479 /// isOperand - Return true if this node is an operand of N.
6480 ///
isOperandOf(SDNode * N) const6481 bool SDValue::isOperandOf(SDNode *N) const {
6482   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
6483     if (*this == N->getOperand(i))
6484       return true;
6485   return false;
6486 }
6487 
isOperandOf(SDNode * N) const6488 bool SDNode::isOperandOf(SDNode *N) const {
6489   for (unsigned i = 0, e = N->NumOperands; i != e; ++i)
6490     if (this == N->OperandList[i].getNode())
6491       return true;
6492   return false;
6493 }
6494 
6495 /// reachesChainWithoutSideEffects - Return true if this operand (which must
6496 /// be a chain) reaches the specified operand without crossing any
6497 /// side-effecting instructions on any chain path.  In practice, this looks
6498 /// through token factors and non-volatile loads.  In order to remain efficient,
6499 /// this only looks a couple of nodes in, it does not do an exhaustive search.
reachesChainWithoutSideEffects(SDValue Dest,unsigned Depth) const6500 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
6501                                                unsigned Depth) const {
6502   if (*this == Dest) return true;
6503 
6504   // Don't search too deeply, we just want to be able to see through
6505   // TokenFactor's etc.
6506   if (Depth == 0) return false;
6507 
6508   // If this is a token factor, all inputs to the TF happen in parallel.  If any
6509   // of the operands of the TF does not reach dest, then we cannot do the xform.
6510   if (getOpcode() == ISD::TokenFactor) {
6511     for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
6512       if (!getOperand(i).reachesChainWithoutSideEffects(Dest, Depth-1))
6513         return false;
6514     return true;
6515   }
6516 
6517   // Loads don't have side effects, look through them.
6518   if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
6519     if (!Ld->isVolatile())
6520       return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
6521   }
6522   return false;
6523 }
6524 
6525 /// hasPredecessor - Return true if N is a predecessor of this node.
6526 /// N is either an operand of this node, or can be reached by recursively
6527 /// traversing up the operands.
6528 /// NOTE: This is an expensive method. Use it carefully.
hasPredecessor(const SDNode * N) const6529 bool SDNode::hasPredecessor(const SDNode *N) const {
6530   SmallPtrSet<const SDNode *, 32> Visited;
6531   SmallVector<const SDNode *, 16> Worklist;
6532   return hasPredecessorHelper(N, Visited, Worklist);
6533 }
6534 
6535 bool
hasPredecessorHelper(const SDNode * N,SmallPtrSetImpl<const SDNode * > & Visited,SmallVectorImpl<const SDNode * > & Worklist) const6536 SDNode::hasPredecessorHelper(const SDNode *N,
6537                              SmallPtrSetImpl<const SDNode *> &Visited,
6538                              SmallVectorImpl<const SDNode *> &Worklist) const {
6539   if (Visited.empty()) {
6540     Worklist.push_back(this);
6541   } else {
6542     // Take a look in the visited set. If we've already encountered this node
6543     // we needn't search further.
6544     if (Visited.count(N))
6545       return true;
6546   }
6547 
6548   // Haven't visited N yet. Continue the search.
6549   while (!Worklist.empty()) {
6550     const SDNode *M = Worklist.pop_back_val();
6551     for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
6552       SDNode *Op = M->getOperand(i).getNode();
6553       if (Visited.insert(Op).second)
6554         Worklist.push_back(Op);
6555       if (Op == N)
6556         return true;
6557     }
6558   }
6559 
6560   return false;
6561 }
6562 
getConstantOperandVal(unsigned Num) const6563 uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
6564   assert(Num < NumOperands && "Invalid child # of SDNode!");
6565   return cast<ConstantSDNode>(OperandList[Num])->getZExtValue();
6566 }
6567 
UnrollVectorOp(SDNode * N,unsigned ResNE)6568 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
6569   assert(N->getNumValues() == 1 &&
6570          "Can't unroll a vector with multiple results!");
6571 
6572   EVT VT = N->getValueType(0);
6573   unsigned NE = VT.getVectorNumElements();
6574   EVT EltVT = VT.getVectorElementType();
6575   SDLoc dl(N);
6576 
6577   SmallVector<SDValue, 8> Scalars;
6578   SmallVector<SDValue, 4> Operands(N->getNumOperands());
6579 
6580   // If ResNE is 0, fully unroll the vector op.
6581   if (ResNE == 0)
6582     ResNE = NE;
6583   else if (NE > ResNE)
6584     NE = ResNE;
6585 
6586   unsigned i;
6587   for (i= 0; i != NE; ++i) {
6588     for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
6589       SDValue Operand = N->getOperand(j);
6590       EVT OperandVT = Operand.getValueType();
6591       if (OperandVT.isVector()) {
6592         // A vector operand; extract a single element.
6593         EVT OperandEltVT = OperandVT.getVectorElementType();
6594         Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl,
6595                               OperandEltVT,
6596                               Operand,
6597                               getConstant(i, TLI->getVectorIdxTy()));
6598       } else {
6599         // A scalar operand; just use it as is.
6600         Operands[j] = Operand;
6601       }
6602     }
6603 
6604     switch (N->getOpcode()) {
6605     default:
6606       Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands));
6607       break;
6608     case ISD::VSELECT:
6609       Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands));
6610       break;
6611     case ISD::SHL:
6612     case ISD::SRA:
6613     case ISD::SRL:
6614     case ISD::ROTL:
6615     case ISD::ROTR:
6616       Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
6617                                getShiftAmountOperand(Operands[0].getValueType(),
6618                                                      Operands[1])));
6619       break;
6620     case ISD::SIGN_EXTEND_INREG:
6621     case ISD::FP_ROUND_INREG: {
6622       EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType();
6623       Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
6624                                 Operands[0],
6625                                 getValueType(ExtVT)));
6626     }
6627     }
6628   }
6629 
6630   for (; i < ResNE; ++i)
6631     Scalars.push_back(getUNDEF(EltVT));
6632 
6633   return getNode(ISD::BUILD_VECTOR, dl,
6634                  EVT::getVectorVT(*getContext(), EltVT, ResNE), Scalars);
6635 }
6636 
6637 
6638 /// isConsecutiveLoad - Return true if LD is loading 'Bytes' bytes from a
6639 /// location that is 'Dist' units away from the location that the 'Base' load
6640 /// is loading from.
isConsecutiveLoad(LoadSDNode * LD,LoadSDNode * Base,unsigned Bytes,int Dist) const6641 bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base,
6642                                      unsigned Bytes, int Dist) const {
6643   if (LD->getChain() != Base->getChain())
6644     return false;
6645   EVT VT = LD->getValueType(0);
6646   if (VT.getSizeInBits() / 8 != Bytes)
6647     return false;
6648 
6649   SDValue Loc = LD->getOperand(1);
6650   SDValue BaseLoc = Base->getOperand(1);
6651   if (Loc.getOpcode() == ISD::FrameIndex) {
6652     if (BaseLoc.getOpcode() != ISD::FrameIndex)
6653       return false;
6654     const MachineFrameInfo *MFI = getMachineFunction().getFrameInfo();
6655     int FI  = cast<FrameIndexSDNode>(Loc)->getIndex();
6656     int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
6657     int FS  = MFI->getObjectSize(FI);
6658     int BFS = MFI->getObjectSize(BFI);
6659     if (FS != BFS || FS != (int)Bytes) return false;
6660     return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes);
6661   }
6662 
6663   // Handle X + C.
6664   if (isBaseWithConstantOffset(Loc)) {
6665     int64_t LocOffset = cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
6666     if (Loc.getOperand(0) == BaseLoc) {
6667       // If the base location is a simple address with no offset itself, then
6668       // the second load's first add operand should be the base address.
6669       if (LocOffset == Dist * (int)Bytes)
6670         return true;
6671     } else if (isBaseWithConstantOffset(BaseLoc)) {
6672       // The base location itself has an offset, so subtract that value from the
6673       // second load's offset before comparing to distance * size.
6674       int64_t BOffset =
6675         cast<ConstantSDNode>(BaseLoc.getOperand(1))->getSExtValue();
6676       if (Loc.getOperand(0) == BaseLoc.getOperand(0)) {
6677         if ((LocOffset - BOffset) == Dist * (int)Bytes)
6678           return true;
6679       }
6680     }
6681   }
6682   const GlobalValue *GV1 = nullptr;
6683   const GlobalValue *GV2 = nullptr;
6684   int64_t Offset1 = 0;
6685   int64_t Offset2 = 0;
6686   bool isGA1 = TLI->isGAPlusOffset(Loc.getNode(), GV1, Offset1);
6687   bool isGA2 = TLI->isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
6688   if (isGA1 && isGA2 && GV1 == GV2)
6689     return Offset1 == (Offset2 + Dist*Bytes);
6690   return false;
6691 }
6692 
6693 
6694 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if
6695 /// it cannot be inferred.
InferPtrAlignment(SDValue Ptr) const6696 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
6697   // If this is a GlobalAddress + cst, return the alignment.
6698   const GlobalValue *GV;
6699   int64_t GVOffset = 0;
6700   if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
6701     unsigned PtrWidth = TLI->getPointerTypeSizeInBits(GV->getType());
6702     APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0);
6703     llvm::computeKnownBits(const_cast<GlobalValue *>(GV), KnownZero, KnownOne,
6704                            *TLI->getDataLayout());
6705     unsigned AlignBits = KnownZero.countTrailingOnes();
6706     unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
6707     if (Align)
6708       return MinAlign(Align, GVOffset);
6709   }
6710 
6711   // If this is a direct reference to a stack slot, use information about the
6712   // stack slot's alignment.
6713   int FrameIdx = 1 << 31;
6714   int64_t FrameOffset = 0;
6715   if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
6716     FrameIdx = FI->getIndex();
6717   } else if (isBaseWithConstantOffset(Ptr) &&
6718              isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
6719     // Handle FI+Cst
6720     FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
6721     FrameOffset = Ptr.getConstantOperandVal(1);
6722   }
6723 
6724   if (FrameIdx != (1 << 31)) {
6725     const MachineFrameInfo &MFI = *getMachineFunction().getFrameInfo();
6726     unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx),
6727                                     FrameOffset);
6728     return FIInfoAlign;
6729   }
6730 
6731   return 0;
6732 }
6733 
6734 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
6735 /// which is split (or expanded) into two not necessarily identical pieces.
GetSplitDestVTs(const EVT & VT) const6736 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const {
6737   // Currently all types are split in half.
6738   EVT LoVT, HiVT;
6739   if (!VT.isVector()) {
6740     LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT);
6741   } else {
6742     unsigned NumElements = VT.getVectorNumElements();
6743     assert(!(NumElements & 1) && "Splitting vector, but not in half!");
6744     LoVT = HiVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(),
6745                                    NumElements/2);
6746   }
6747   return std::make_pair(LoVT, HiVT);
6748 }
6749 
6750 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the
6751 /// low/high part.
6752 std::pair<SDValue, SDValue>
SplitVector(const SDValue & N,const SDLoc & DL,const EVT & LoVT,const EVT & HiVT)6753 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT,
6754                           const EVT &HiVT) {
6755   assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <=
6756          N.getValueType().getVectorNumElements() &&
6757          "More vector elements requested than available!");
6758   SDValue Lo, Hi;
6759   Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N,
6760                getConstant(0, TLI->getVectorIdxTy()));
6761   Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N,
6762                getConstant(LoVT.getVectorNumElements(), TLI->getVectorIdxTy()));
6763   return std::make_pair(Lo, Hi);
6764 }
6765 
ExtractVectorElements(SDValue Op,SmallVectorImpl<SDValue> & Args,unsigned Start,unsigned Count)6766 void SelectionDAG::ExtractVectorElements(SDValue Op,
6767                                          SmallVectorImpl<SDValue> &Args,
6768                                          unsigned Start, unsigned Count) {
6769   EVT VT = Op.getValueType();
6770   if (Count == 0)
6771     Count = VT.getVectorNumElements();
6772 
6773   EVT EltVT = VT.getVectorElementType();
6774   EVT IdxTy = TLI->getVectorIdxTy();
6775   SDLoc SL(Op);
6776   for (unsigned i = Start, e = Start + Count; i != e; ++i) {
6777     Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
6778                            Op, getConstant(i, IdxTy)));
6779   }
6780 }
6781 
6782 // getAddressSpace - Return the address space this GlobalAddress belongs to.
getAddressSpace() const6783 unsigned GlobalAddressSDNode::getAddressSpace() const {
6784   return getGlobal()->getType()->getAddressSpace();
6785 }
6786 
6787 
getType() const6788 Type *ConstantPoolSDNode::getType() const {
6789   if (isMachineConstantPoolEntry())
6790     return Val.MachineCPVal->getType();
6791   return Val.ConstVal->getType();
6792 }
6793 
isConstantSplat(APInt & SplatValue,APInt & SplatUndef,unsigned & SplatBitSize,bool & HasAnyUndefs,unsigned MinSplatBits,bool isBigEndian) const6794 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue,
6795                                         APInt &SplatUndef,
6796                                         unsigned &SplatBitSize,
6797                                         bool &HasAnyUndefs,
6798                                         unsigned MinSplatBits,
6799                                         bool isBigEndian) const {
6800   EVT VT = getValueType(0);
6801   assert(VT.isVector() && "Expected a vector type");
6802   unsigned sz = VT.getSizeInBits();
6803   if (MinSplatBits > sz)
6804     return false;
6805 
6806   SplatValue = APInt(sz, 0);
6807   SplatUndef = APInt(sz, 0);
6808 
6809   // Get the bits.  Bits with undefined values (when the corresponding element
6810   // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
6811   // in SplatValue.  If any of the values are not constant, give up and return
6812   // false.
6813   unsigned int nOps = getNumOperands();
6814   assert(nOps > 0 && "isConstantSplat has 0-size build vector");
6815   unsigned EltBitSize = VT.getVectorElementType().getSizeInBits();
6816 
6817   for (unsigned j = 0; j < nOps; ++j) {
6818     unsigned i = isBigEndian ? nOps-1-j : j;
6819     SDValue OpVal = getOperand(i);
6820     unsigned BitPos = j * EltBitSize;
6821 
6822     if (OpVal.getOpcode() == ISD::UNDEF)
6823       SplatUndef |= APInt::getBitsSet(sz, BitPos, BitPos + EltBitSize);
6824     else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal))
6825       SplatValue |= CN->getAPIntValue().zextOrTrunc(EltBitSize).
6826                     zextOrTrunc(sz) << BitPos;
6827     else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal))
6828       SplatValue |= CN->getValueAPF().bitcastToAPInt().zextOrTrunc(sz) <<BitPos;
6829      else
6830       return false;
6831   }
6832 
6833   // The build_vector is all constants or undefs.  Find the smallest element
6834   // size that splats the vector.
6835 
6836   HasAnyUndefs = (SplatUndef != 0);
6837   while (sz > 8) {
6838 
6839     unsigned HalfSize = sz / 2;
6840     APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize);
6841     APInt LowValue = SplatValue.trunc(HalfSize);
6842     APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize);
6843     APInt LowUndef = SplatUndef.trunc(HalfSize);
6844 
6845     // If the two halves do not match (ignoring undef bits), stop here.
6846     if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
6847         MinSplatBits > HalfSize)
6848       break;
6849 
6850     SplatValue = HighValue | LowValue;
6851     SplatUndef = HighUndef & LowUndef;
6852 
6853     sz = HalfSize;
6854   }
6855 
6856   SplatBitSize = sz;
6857   return true;
6858 }
6859 
getSplatValue(BitVector * UndefElements) const6860 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const {
6861   if (UndefElements) {
6862     UndefElements->clear();
6863     UndefElements->resize(getNumOperands());
6864   }
6865   SDValue Splatted;
6866   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
6867     SDValue Op = getOperand(i);
6868     if (Op.getOpcode() == ISD::UNDEF) {
6869       if (UndefElements)
6870         (*UndefElements)[i] = true;
6871     } else if (!Splatted) {
6872       Splatted = Op;
6873     } else if (Splatted != Op) {
6874       return SDValue();
6875     }
6876   }
6877 
6878   if (!Splatted) {
6879     assert(getOperand(0).getOpcode() == ISD::UNDEF &&
6880            "Can only have a splat without a constant for all undefs.");
6881     return getOperand(0);
6882   }
6883 
6884   return Splatted;
6885 }
6886 
6887 ConstantSDNode *
getConstantSplatNode(BitVector * UndefElements) const6888 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const {
6889   return dyn_cast_or_null<ConstantSDNode>(
6890       getSplatValue(UndefElements).getNode());
6891 }
6892 
6893 ConstantFPSDNode *
getConstantFPSplatNode(BitVector * UndefElements) const6894 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const {
6895   return dyn_cast_or_null<ConstantFPSDNode>(
6896       getSplatValue(UndefElements).getNode());
6897 }
6898 
isConstant() const6899 bool BuildVectorSDNode::isConstant() const {
6900   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
6901     unsigned Opc = getOperand(i).getOpcode();
6902     if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP)
6903       return false;
6904   }
6905   return true;
6906 }
6907 
isSplatMask(const int * Mask,EVT VT)6908 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
6909   // Find the first non-undef value in the shuffle mask.
6910   unsigned i, e;
6911   for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i)
6912     /* search */;
6913 
6914   assert(i != e && "VECTOR_SHUFFLE node with all undef indices!");
6915 
6916   // Make sure all remaining elements are either undef or the same as the first
6917   // non-undef value.
6918   for (int Idx = Mask[i]; i != e; ++i)
6919     if (Mask[i] >= 0 && Mask[i] != Idx)
6920       return false;
6921   return true;
6922 }
6923 
6924 #ifndef NDEBUG
checkForCyclesHelper(const SDNode * N,SmallPtrSetImpl<const SDNode * > & Visited,SmallPtrSetImpl<const SDNode * > & Checked,const llvm::SelectionDAG * DAG)6925 static void checkForCyclesHelper(const SDNode *N,
6926                                  SmallPtrSetImpl<const SDNode*> &Visited,
6927                                  SmallPtrSetImpl<const SDNode*> &Checked,
6928                                  const llvm::SelectionDAG *DAG) {
6929   // If this node has already been checked, don't check it again.
6930   if (Checked.count(N))
6931     return;
6932 
6933   // If a node has already been visited on this depth-first walk, reject it as
6934   // a cycle.
6935   if (!Visited.insert(N).second) {
6936     errs() << "Detected cycle in SelectionDAG\n";
6937     dbgs() << "Offending node:\n";
6938     N->dumprFull(DAG); dbgs() << "\n";
6939     abort();
6940   }
6941 
6942   for(unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
6943     checkForCyclesHelper(N->getOperand(i).getNode(), Visited, Checked, DAG);
6944 
6945   Checked.insert(N);
6946   Visited.erase(N);
6947 }
6948 #endif
6949 
checkForCycles(const llvm::SDNode * N,const llvm::SelectionDAG * DAG,bool force)6950 void llvm::checkForCycles(const llvm::SDNode *N,
6951                           const llvm::SelectionDAG *DAG,
6952                           bool force) {
6953 #ifndef NDEBUG
6954   bool check = force;
6955 #ifdef XDEBUG
6956   check = true;
6957 #endif  // XDEBUG
6958   if (check) {
6959     assert(N && "Checking nonexistent SDNode");
6960     SmallPtrSet<const SDNode*, 32> visited;
6961     SmallPtrSet<const SDNode*, 32> checked;
6962     checkForCyclesHelper(N, visited, checked, DAG);
6963   }
6964 #endif  // !NDEBUG
6965 }
6966 
checkForCycles(const llvm::SelectionDAG * DAG,bool force)6967 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) {
6968   checkForCycles(DAG->getRoot().getNode(), DAG, force);
6969 }
6970