1 //===-- SelectionDAG.cpp - Implement the SelectionDAG data structures -----===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements the SelectionDAG class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/SelectionDAG.h"
15 #include "SDNodeDbgValue.h"
16 #include "llvm/ADT/APSInt.h"
17 #include "llvm/ADT/SetVector.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/CodeGen/MachineBasicBlock.h"
24 #include "llvm/CodeGen/MachineConstantPool.h"
25 #include "llvm/CodeGen/MachineFrameInfo.h"
26 #include "llvm/CodeGen/MachineModuleInfo.h"
27 #include "llvm/IR/CallingConv.h"
28 #include "llvm/IR/Constants.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/IR/DebugInfo.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/GlobalAlias.h"
34 #include "llvm/IR/GlobalVariable.h"
35 #include "llvm/IR/Intrinsics.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/ManagedStatic.h"
40 #include "llvm/Support/MathExtras.h"
41 #include "llvm/Support/Mutex.h"
42 #include "llvm/Support/raw_ostream.h"
43 #include "llvm/Target/TargetInstrInfo.h"
44 #include "llvm/Target/TargetIntrinsicInfo.h"
45 #include "llvm/Target/TargetLowering.h"
46 #include "llvm/Target/TargetMachine.h"
47 #include "llvm/Target/TargetOptions.h"
48 #include "llvm/Target/TargetRegisterInfo.h"
49 #include "llvm/Target/TargetSelectionDAGInfo.h"
50 #include "llvm/Target/TargetSubtargetInfo.h"
51 #include <algorithm>
52 #include <cmath>
53 #include <utility>
54 
55 using namespace llvm;
56 
57 /// makeVTList - Return an instance of the SDVTList struct initialized with the
58 /// specified members.
makeVTList(const EVT * VTs,unsigned NumVTs)59 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
60   SDVTList Res = {VTs, NumVTs};
61   return Res;
62 }
63 
64 // Default null implementations of the callbacks.
NodeDeleted(SDNode *,SDNode *)65 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
NodeUpdated(SDNode *)66 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
67 
68 //===----------------------------------------------------------------------===//
69 //                              ConstantFPSDNode Class
70 //===----------------------------------------------------------------------===//
71 
72 /// isExactlyValue - We don't rely on operator== working on double values, as
73 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
74 /// As such, this method can be used to do an exact bit-for-bit comparison of
75 /// two floating point values.
isExactlyValue(const APFloat & V) const76 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
77   return getValueAPF().bitwiseIsEqual(V);
78 }
79 
isValueValidForType(EVT VT,const APFloat & Val)80 bool ConstantFPSDNode::isValueValidForType(EVT VT,
81                                            const APFloat& Val) {
82   assert(VT.isFloatingPoint() && "Can only convert between FP types");
83 
84   // convert modifies in place, so make a copy.
85   APFloat Val2 = APFloat(Val);
86   bool losesInfo;
87   (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT),
88                       APFloat::rmNearestTiesToEven,
89                       &losesInfo);
90   return !losesInfo;
91 }
92 
93 //===----------------------------------------------------------------------===//
94 //                              ISD Namespace
95 //===----------------------------------------------------------------------===//
96 
97 /// isBuildVectorAllOnes - Return true if the specified node is a
98 /// BUILD_VECTOR where all of the elements are ~0 or undef.
isBuildVectorAllOnes(const SDNode * N)99 bool ISD::isBuildVectorAllOnes(const SDNode *N) {
100   // Look through a bit convert.
101   while (N->getOpcode() == ISD::BITCAST)
102     N = N->getOperand(0).getNode();
103 
104   if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
105 
106   unsigned i = 0, e = N->getNumOperands();
107 
108   // Skip over all of the undef values.
109   while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF)
110     ++i;
111 
112   // Do not accept an all-undef vector.
113   if (i == e) return false;
114 
115   // Do not accept build_vectors that aren't all constants or which have non-~0
116   // elements. We have to be a bit careful here, as the type of the constant
117   // may not be the same as the type of the vector elements due to type
118   // legalization (the elements are promoted to a legal type for the target and
119   // a vector of a type may be legal when the base element type is not).
120   // We only want to check enough bits to cover the vector elements, because
121   // we care if the resultant vector is all ones, not whether the individual
122   // constants are.
123   SDValue NotZero = N->getOperand(i);
124   unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
125   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
126     if (CN->getAPIntValue().countTrailingOnes() < EltSize)
127       return false;
128   } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
129     if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize)
130       return false;
131   } else
132     return false;
133 
134   // Okay, we have at least one ~0 value, check to see if the rest match or are
135   // undefs. Even with the above element type twiddling, this should be OK, as
136   // the same type legalization should have applied to all the elements.
137   for (++i; i != e; ++i)
138     if (N->getOperand(i) != NotZero &&
139         N->getOperand(i).getOpcode() != ISD::UNDEF)
140       return false;
141   return true;
142 }
143 
144 
145 /// isBuildVectorAllZeros - Return true if the specified node is a
146 /// BUILD_VECTOR where all of the elements are 0 or undef.
isBuildVectorAllZeros(const SDNode * N)147 bool ISD::isBuildVectorAllZeros(const SDNode *N) {
148   // Look through a bit convert.
149   while (N->getOpcode() == ISD::BITCAST)
150     N = N->getOperand(0).getNode();
151 
152   if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
153 
154   bool IsAllUndef = true;
155   for (const SDValue &Op : N->op_values()) {
156     if (Op.getOpcode() == ISD::UNDEF)
157       continue;
158     IsAllUndef = false;
159     // Do not accept build_vectors that aren't all constants or which have non-0
160     // elements. We have to be a bit careful here, as the type of the constant
161     // may not be the same as the type of the vector elements due to type
162     // legalization (the elements are promoted to a legal type for the target
163     // and a vector of a type may be legal when the base element type is not).
164     // We only want to check enough bits to cover the vector elements, because
165     // we care if the resultant vector is all zeros, not whether the individual
166     // constants are.
167     unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
168     if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) {
169       if (CN->getAPIntValue().countTrailingZeros() < EltSize)
170         return false;
171     } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) {
172       if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize)
173         return false;
174     } else
175       return false;
176   }
177 
178   // Do not accept an all-undef vector.
179   if (IsAllUndef)
180     return false;
181   return true;
182 }
183 
184 /// \brief Return true if the specified node is a BUILD_VECTOR node of
185 /// all ConstantSDNode or undef.
isBuildVectorOfConstantSDNodes(const SDNode * N)186 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) {
187   if (N->getOpcode() != ISD::BUILD_VECTOR)
188     return false;
189 
190   for (const SDValue &Op : N->op_values()) {
191     if (Op.getOpcode() == ISD::UNDEF)
192       continue;
193     if (!isa<ConstantSDNode>(Op))
194       return false;
195   }
196   return true;
197 }
198 
199 /// \brief Return true if the specified node is a BUILD_VECTOR node of
200 /// all ConstantFPSDNode or undef.
isBuildVectorOfConstantFPSDNodes(const SDNode * N)201 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) {
202   if (N->getOpcode() != ISD::BUILD_VECTOR)
203     return false;
204 
205   for (const SDValue &Op : N->op_values()) {
206     if (Op.getOpcode() == ISD::UNDEF)
207       continue;
208     if (!isa<ConstantFPSDNode>(Op))
209       return false;
210   }
211   return true;
212 }
213 
214 /// allOperandsUndef - Return true if the node has at least one operand
215 /// and all operands of the specified node are ISD::UNDEF.
allOperandsUndef(const SDNode * N)216 bool ISD::allOperandsUndef(const SDNode *N) {
217   // Return false if the node has no operands.
218   // This is "logically inconsistent" with the definition of "all" but
219   // is probably the desired behavior.
220   if (N->getNumOperands() == 0)
221     return false;
222 
223   for (const SDValue &Op : N->op_values())
224     if (Op.getOpcode() != ISD::UNDEF)
225       return false;
226 
227   return true;
228 }
229 
getExtForLoadExtType(bool IsFP,ISD::LoadExtType ExtType)230 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) {
231   switch (ExtType) {
232   case ISD::EXTLOAD:
233     return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND;
234   case ISD::SEXTLOAD:
235     return ISD::SIGN_EXTEND;
236   case ISD::ZEXTLOAD:
237     return ISD::ZERO_EXTEND;
238   default:
239     break;
240   }
241 
242   llvm_unreachable("Invalid LoadExtType");
243 }
244 
245 /// getSetCCSwappedOperands - Return the operation corresponding to (Y op X)
246 /// when given the operation for (X op Y).
getSetCCSwappedOperands(ISD::CondCode Operation)247 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
248   // To perform this operation, we just need to swap the L and G bits of the
249   // operation.
250   unsigned OldL = (Operation >> 2) & 1;
251   unsigned OldG = (Operation >> 1) & 1;
252   return ISD::CondCode((Operation & ~6) |  // Keep the N, U, E bits
253                        (OldL << 1) |       // New G bit
254                        (OldG << 2));       // New L bit.
255 }
256 
257 /// getSetCCInverse - Return the operation corresponding to !(X op Y), where
258 /// 'op' is a valid SetCC operation.
getSetCCInverse(ISD::CondCode Op,bool isInteger)259 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) {
260   unsigned Operation = Op;
261   if (isInteger)
262     Operation ^= 7;   // Flip L, G, E bits, but not U.
263   else
264     Operation ^= 15;  // Flip all of the condition bits.
265 
266   if (Operation > ISD::SETTRUE2)
267     Operation &= ~8;  // Don't let N and U bits get set.
268 
269   return ISD::CondCode(Operation);
270 }
271 
272 
273 /// isSignedOp - For an integer comparison, return 1 if the comparison is a
274 /// signed operation and 2 if the result is an unsigned comparison.  Return zero
275 /// if the operation does not depend on the sign of the input (setne and seteq).
isSignedOp(ISD::CondCode Opcode)276 static int isSignedOp(ISD::CondCode Opcode) {
277   switch (Opcode) {
278   default: llvm_unreachable("Illegal integer setcc operation!");
279   case ISD::SETEQ:
280   case ISD::SETNE: return 0;
281   case ISD::SETLT:
282   case ISD::SETLE:
283   case ISD::SETGT:
284   case ISD::SETGE: return 1;
285   case ISD::SETULT:
286   case ISD::SETULE:
287   case ISD::SETUGT:
288   case ISD::SETUGE: return 2;
289   }
290 }
291 
292 /// getSetCCOrOperation - Return the result of a logical OR between different
293 /// comparisons of identical values: ((X op1 Y) | (X op2 Y)).  This function
294 /// returns SETCC_INVALID if it is not possible to represent the resultant
295 /// comparison.
getSetCCOrOperation(ISD::CondCode Op1,ISD::CondCode Op2,bool isInteger)296 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
297                                        bool isInteger) {
298   if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
299     // Cannot fold a signed integer setcc with an unsigned integer setcc.
300     return ISD::SETCC_INVALID;
301 
302   unsigned Op = Op1 | Op2;  // Combine all of the condition bits.
303 
304   // If the N and U bits get set then the resultant comparison DOES suddenly
305   // care about orderedness, and is true when ordered.
306   if (Op > ISD::SETTRUE2)
307     Op &= ~16;     // Clear the U bit if the N bit is set.
308 
309   // Canonicalize illegal integer setcc's.
310   if (isInteger && Op == ISD::SETUNE)  // e.g. SETUGT | SETULT
311     Op = ISD::SETNE;
312 
313   return ISD::CondCode(Op);
314 }
315 
316 /// getSetCCAndOperation - Return the result of a logical AND between different
317 /// comparisons of identical values: ((X op1 Y) & (X op2 Y)).  This
318 /// function returns zero if it is not possible to represent the resultant
319 /// comparison.
getSetCCAndOperation(ISD::CondCode Op1,ISD::CondCode Op2,bool isInteger)320 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
321                                         bool isInteger) {
322   if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
323     // Cannot fold a signed setcc with an unsigned setcc.
324     return ISD::SETCC_INVALID;
325 
326   // Combine all of the condition bits.
327   ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
328 
329   // Canonicalize illegal integer setcc's.
330   if (isInteger) {
331     switch (Result) {
332     default: break;
333     case ISD::SETUO : Result = ISD::SETFALSE; break;  // SETUGT & SETULT
334     case ISD::SETOEQ:                                 // SETEQ  & SETU[LG]E
335     case ISD::SETUEQ: Result = ISD::SETEQ   ; break;  // SETUGE & SETULE
336     case ISD::SETOLT: Result = ISD::SETULT  ; break;  // SETULT & SETNE
337     case ISD::SETOGT: Result = ISD::SETUGT  ; break;  // SETUGT & SETNE
338     }
339   }
340 
341   return Result;
342 }
343 
344 //===----------------------------------------------------------------------===//
345 //                           SDNode Profile Support
346 //===----------------------------------------------------------------------===//
347 
348 /// AddNodeIDOpcode - Add the node opcode to the NodeID data.
349 ///
AddNodeIDOpcode(FoldingSetNodeID & ID,unsigned OpC)350 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC)  {
351   ID.AddInteger(OpC);
352 }
353 
354 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
355 /// solely with their pointer.
AddNodeIDValueTypes(FoldingSetNodeID & ID,SDVTList VTList)356 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
357   ID.AddPointer(VTList.VTs);
358 }
359 
360 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
361 ///
AddNodeIDOperands(FoldingSetNodeID & ID,ArrayRef<SDValue> Ops)362 static void AddNodeIDOperands(FoldingSetNodeID &ID,
363                               ArrayRef<SDValue> Ops) {
364   for (auto& Op : Ops) {
365     ID.AddPointer(Op.getNode());
366     ID.AddInteger(Op.getResNo());
367   }
368 }
369 
370 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
371 ///
AddNodeIDOperands(FoldingSetNodeID & ID,ArrayRef<SDUse> Ops)372 static void AddNodeIDOperands(FoldingSetNodeID &ID,
373                               ArrayRef<SDUse> Ops) {
374   for (auto& Op : Ops) {
375     ID.AddPointer(Op.getNode());
376     ID.AddInteger(Op.getResNo());
377   }
378 }
379 
380 /// Add logical or fast math flag values to FoldingSetNodeID value.
AddNodeIDFlags(FoldingSetNodeID & ID,unsigned Opcode,const SDNodeFlags * Flags)381 static void AddNodeIDFlags(FoldingSetNodeID &ID, unsigned Opcode,
382                            const SDNodeFlags *Flags) {
383   if (!isBinOpWithFlags(Opcode))
384     return;
385 
386   unsigned RawFlags = 0;
387   if (Flags)
388     RawFlags = Flags->getRawFlags();
389   ID.AddInteger(RawFlags);
390 }
391 
AddNodeIDFlags(FoldingSetNodeID & ID,const SDNode * N)392 static void AddNodeIDFlags(FoldingSetNodeID &ID, const SDNode *N) {
393   AddNodeIDFlags(ID, N->getOpcode(), N->getFlags());
394 }
395 
AddNodeIDNode(FoldingSetNodeID & ID,unsigned short OpC,SDVTList VTList,ArrayRef<SDValue> OpList)396 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC,
397                           SDVTList VTList, ArrayRef<SDValue> OpList) {
398   AddNodeIDOpcode(ID, OpC);
399   AddNodeIDValueTypes(ID, VTList);
400   AddNodeIDOperands(ID, OpList);
401 }
402 
403 /// If this is an SDNode with special info, add this info to the NodeID data.
AddNodeIDCustom(FoldingSetNodeID & ID,const SDNode * N)404 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
405   switch (N->getOpcode()) {
406   case ISD::TargetExternalSymbol:
407   case ISD::ExternalSymbol:
408   case ISD::MCSymbol:
409     llvm_unreachable("Should only be used on nodes with operands");
410   default: break;  // Normal nodes don't need extra info.
411   case ISD::TargetConstant:
412   case ISD::Constant: {
413     const ConstantSDNode *C = cast<ConstantSDNode>(N);
414     ID.AddPointer(C->getConstantIntValue());
415     ID.AddBoolean(C->isOpaque());
416     break;
417   }
418   case ISD::TargetConstantFP:
419   case ISD::ConstantFP: {
420     ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
421     break;
422   }
423   case ISD::TargetGlobalAddress:
424   case ISD::GlobalAddress:
425   case ISD::TargetGlobalTLSAddress:
426   case ISD::GlobalTLSAddress: {
427     const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
428     ID.AddPointer(GA->getGlobal());
429     ID.AddInteger(GA->getOffset());
430     ID.AddInteger(GA->getTargetFlags());
431     ID.AddInteger(GA->getAddressSpace());
432     break;
433   }
434   case ISD::BasicBlock:
435     ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
436     break;
437   case ISD::Register:
438     ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
439     break;
440   case ISD::RegisterMask:
441     ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
442     break;
443   case ISD::SRCVALUE:
444     ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
445     break;
446   case ISD::FrameIndex:
447   case ISD::TargetFrameIndex:
448     ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
449     break;
450   case ISD::JumpTable:
451   case ISD::TargetJumpTable:
452     ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
453     ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
454     break;
455   case ISD::ConstantPool:
456   case ISD::TargetConstantPool: {
457     const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
458     ID.AddInteger(CP->getAlignment());
459     ID.AddInteger(CP->getOffset());
460     if (CP->isMachineConstantPoolEntry())
461       CP->getMachineCPVal()->addSelectionDAGCSEId(ID);
462     else
463       ID.AddPointer(CP->getConstVal());
464     ID.AddInteger(CP->getTargetFlags());
465     break;
466   }
467   case ISD::TargetIndex: {
468     const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
469     ID.AddInteger(TI->getIndex());
470     ID.AddInteger(TI->getOffset());
471     ID.AddInteger(TI->getTargetFlags());
472     break;
473   }
474   case ISD::LOAD: {
475     const LoadSDNode *LD = cast<LoadSDNode>(N);
476     ID.AddInteger(LD->getMemoryVT().getRawBits());
477     ID.AddInteger(LD->getRawSubclassData());
478     ID.AddInteger(LD->getPointerInfo().getAddrSpace());
479     break;
480   }
481   case ISD::STORE: {
482     const StoreSDNode *ST = cast<StoreSDNode>(N);
483     ID.AddInteger(ST->getMemoryVT().getRawBits());
484     ID.AddInteger(ST->getRawSubclassData());
485     ID.AddInteger(ST->getPointerInfo().getAddrSpace());
486     break;
487   }
488   case ISD::ATOMIC_CMP_SWAP:
489   case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
490   case ISD::ATOMIC_SWAP:
491   case ISD::ATOMIC_LOAD_ADD:
492   case ISD::ATOMIC_LOAD_SUB:
493   case ISD::ATOMIC_LOAD_AND:
494   case ISD::ATOMIC_LOAD_OR:
495   case ISD::ATOMIC_LOAD_XOR:
496   case ISD::ATOMIC_LOAD_NAND:
497   case ISD::ATOMIC_LOAD_MIN:
498   case ISD::ATOMIC_LOAD_MAX:
499   case ISD::ATOMIC_LOAD_UMIN:
500   case ISD::ATOMIC_LOAD_UMAX:
501   case ISD::ATOMIC_LOAD:
502   case ISD::ATOMIC_STORE: {
503     const AtomicSDNode *AT = cast<AtomicSDNode>(N);
504     ID.AddInteger(AT->getMemoryVT().getRawBits());
505     ID.AddInteger(AT->getRawSubclassData());
506     ID.AddInteger(AT->getPointerInfo().getAddrSpace());
507     break;
508   }
509   case ISD::PREFETCH: {
510     const MemSDNode *PF = cast<MemSDNode>(N);
511     ID.AddInteger(PF->getPointerInfo().getAddrSpace());
512     break;
513   }
514   case ISD::VECTOR_SHUFFLE: {
515     const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
516     for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
517          i != e; ++i)
518       ID.AddInteger(SVN->getMaskElt(i));
519     break;
520   }
521   case ISD::TargetBlockAddress:
522   case ISD::BlockAddress: {
523     const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
524     ID.AddPointer(BA->getBlockAddress());
525     ID.AddInteger(BA->getOffset());
526     ID.AddInteger(BA->getTargetFlags());
527     break;
528   }
529   } // end switch (N->getOpcode())
530 
531   AddNodeIDFlags(ID, N);
532 
533   // Target specific memory nodes could also have address spaces to check.
534   if (N->isTargetMemoryOpcode())
535     ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
536 }
537 
538 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
539 /// data.
AddNodeIDNode(FoldingSetNodeID & ID,const SDNode * N)540 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
541   AddNodeIDOpcode(ID, N->getOpcode());
542   // Add the return value info.
543   AddNodeIDValueTypes(ID, N->getVTList());
544   // Add the operand info.
545   AddNodeIDOperands(ID, N->ops());
546 
547   // Handle SDNode leafs with special info.
548   AddNodeIDCustom(ID, N);
549 }
550 
551 /// encodeMemSDNodeFlags - Generic routine for computing a value for use in
552 /// the CSE map that carries volatility, temporalness, indexing mode, and
553 /// extension/truncation information.
554 ///
555 static inline unsigned
encodeMemSDNodeFlags(int ConvType,ISD::MemIndexedMode AM,bool isVolatile,bool isNonTemporal,bool isInvariant)556 encodeMemSDNodeFlags(int ConvType, ISD::MemIndexedMode AM, bool isVolatile,
557                      bool isNonTemporal, bool isInvariant) {
558   assert((ConvType & 3) == ConvType &&
559          "ConvType may not require more than 2 bits!");
560   assert((AM & 7) == AM &&
561          "AM may not require more than 3 bits!");
562   return ConvType |
563          (AM << 2) |
564          (isVolatile << 5) |
565          (isNonTemporal << 6) |
566          (isInvariant << 7);
567 }
568 
569 //===----------------------------------------------------------------------===//
570 //                              SelectionDAG Class
571 //===----------------------------------------------------------------------===//
572 
573 /// doNotCSE - Return true if CSE should not be performed for this node.
doNotCSE(SDNode * N)574 static bool doNotCSE(SDNode *N) {
575   if (N->getValueType(0) == MVT::Glue)
576     return true; // Never CSE anything that produces a flag.
577 
578   switch (N->getOpcode()) {
579   default: break;
580   case ISD::HANDLENODE:
581   case ISD::EH_LABEL:
582     return true;   // Never CSE these nodes.
583   }
584 
585   // Check that remaining values produced are not flags.
586   for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
587     if (N->getValueType(i) == MVT::Glue)
588       return true; // Never CSE anything that produces a flag.
589 
590   return false;
591 }
592 
593 /// RemoveDeadNodes - This method deletes all unreachable nodes in the
594 /// SelectionDAG.
RemoveDeadNodes()595 void SelectionDAG::RemoveDeadNodes() {
596   // Create a dummy node (which is not added to allnodes), that adds a reference
597   // to the root node, preventing it from being deleted.
598   HandleSDNode Dummy(getRoot());
599 
600   SmallVector<SDNode*, 128> DeadNodes;
601 
602   // Add all obviously-dead nodes to the DeadNodes worklist.
603   for (SDNode &Node : allnodes())
604     if (Node.use_empty())
605       DeadNodes.push_back(&Node);
606 
607   RemoveDeadNodes(DeadNodes);
608 
609   // If the root changed (e.g. it was a dead load, update the root).
610   setRoot(Dummy.getValue());
611 }
612 
613 /// RemoveDeadNodes - This method deletes the unreachable nodes in the
614 /// given list, and any nodes that become unreachable as a result.
RemoveDeadNodes(SmallVectorImpl<SDNode * > & DeadNodes)615 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
616 
617   // Process the worklist, deleting the nodes and adding their uses to the
618   // worklist.
619   while (!DeadNodes.empty()) {
620     SDNode *N = DeadNodes.pop_back_val();
621 
622     for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
623       DUL->NodeDeleted(N, nullptr);
624 
625     // Take the node out of the appropriate CSE map.
626     RemoveNodeFromCSEMaps(N);
627 
628     // Next, brutally remove the operand list.  This is safe to do, as there are
629     // no cycles in the graph.
630     for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
631       SDUse &Use = *I++;
632       SDNode *Operand = Use.getNode();
633       Use.set(SDValue());
634 
635       // Now that we removed this operand, see if there are no uses of it left.
636       if (Operand->use_empty())
637         DeadNodes.push_back(Operand);
638     }
639 
640     DeallocateNode(N);
641   }
642 }
643 
RemoveDeadNode(SDNode * N)644 void SelectionDAG::RemoveDeadNode(SDNode *N){
645   SmallVector<SDNode*, 16> DeadNodes(1, N);
646 
647   // Create a dummy node that adds a reference to the root node, preventing
648   // it from being deleted.  (This matters if the root is an operand of the
649   // dead node.)
650   HandleSDNode Dummy(getRoot());
651 
652   RemoveDeadNodes(DeadNodes);
653 }
654 
DeleteNode(SDNode * N)655 void SelectionDAG::DeleteNode(SDNode *N) {
656   // First take this out of the appropriate CSE map.
657   RemoveNodeFromCSEMaps(N);
658 
659   // Finally, remove uses due to operands of this node, remove from the
660   // AllNodes list, and delete the node.
661   DeleteNodeNotInCSEMaps(N);
662 }
663 
DeleteNodeNotInCSEMaps(SDNode * N)664 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
665   assert(N != AllNodes.begin() && "Cannot delete the entry node!");
666   assert(N->use_empty() && "Cannot delete a node that is not dead!");
667 
668   // Drop all of the operands and decrement used node's use counts.
669   N->DropOperands();
670 
671   DeallocateNode(N);
672 }
673 
erase(const SDNode * Node)674 void SDDbgInfo::erase(const SDNode *Node) {
675   DbgValMapType::iterator I = DbgValMap.find(Node);
676   if (I == DbgValMap.end())
677     return;
678   for (auto &Val: I->second)
679     Val->setIsInvalidated();
680   DbgValMap.erase(I);
681 }
682 
DeallocateNode(SDNode * N)683 void SelectionDAG::DeallocateNode(SDNode *N) {
684   if (N->OperandsNeedDelete)
685     delete[] N->OperandList;
686 
687   // Set the opcode to DELETED_NODE to help catch bugs when node
688   // memory is reallocated.
689   N->NodeType = ISD::DELETED_NODE;
690 
691   NodeAllocator.Deallocate(AllNodes.remove(N));
692 
693   // If any of the SDDbgValue nodes refer to this SDNode, invalidate
694   // them and forget about that node.
695   DbgInfo->erase(N);
696 }
697 
698 #ifndef NDEBUG
699 /// VerifySDNode - Sanity check the given SDNode.  Aborts if it is invalid.
VerifySDNode(SDNode * N)700 static void VerifySDNode(SDNode *N) {
701   switch (N->getOpcode()) {
702   default:
703     break;
704   case ISD::BUILD_PAIR: {
705     EVT VT = N->getValueType(0);
706     assert(N->getNumValues() == 1 && "Too many results!");
707     assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
708            "Wrong return type!");
709     assert(N->getNumOperands() == 2 && "Wrong number of operands!");
710     assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
711            "Mismatched operand types!");
712     assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
713            "Wrong operand type!");
714     assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
715            "Wrong return type size");
716     break;
717   }
718   case ISD::BUILD_VECTOR: {
719     assert(N->getNumValues() == 1 && "Too many results!");
720     assert(N->getValueType(0).isVector() && "Wrong return type!");
721     assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
722            "Wrong number of operands!");
723     EVT EltVT = N->getValueType(0).getVectorElementType();
724     for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) {
725       assert((I->getValueType() == EltVT ||
726              (EltVT.isInteger() && I->getValueType().isInteger() &&
727               EltVT.bitsLE(I->getValueType()))) &&
728             "Wrong operand type!");
729       assert(I->getValueType() == N->getOperand(0).getValueType() &&
730              "Operands must all have the same type");
731     }
732     break;
733   }
734   }
735 }
736 #endif // NDEBUG
737 
738 /// \brief Insert a newly allocated node into the DAG.
739 ///
740 /// Handles insertion into the all nodes list and CSE map, as well as
741 /// verification and other common operations when a new node is allocated.
InsertNode(SDNode * N)742 void SelectionDAG::InsertNode(SDNode *N) {
743   AllNodes.push_back(N);
744 #ifndef NDEBUG
745   N->PersistentId = NextPersistentId++;
746   VerifySDNode(N);
747 #endif
748 }
749 
750 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
751 /// correspond to it.  This is useful when we're about to delete or repurpose
752 /// the node.  We don't want future request for structurally identical nodes
753 /// to return N anymore.
RemoveNodeFromCSEMaps(SDNode * N)754 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
755   bool Erased = false;
756   switch (N->getOpcode()) {
757   case ISD::HANDLENODE: return false;  // noop.
758   case ISD::CONDCODE:
759     assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
760            "Cond code doesn't exist!");
761     Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr;
762     CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr;
763     break;
764   case ISD::ExternalSymbol:
765     Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
766     break;
767   case ISD::TargetExternalSymbol: {
768     ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
769     Erased = TargetExternalSymbols.erase(
770                std::pair<std::string,unsigned char>(ESN->getSymbol(),
771                                                     ESN->getTargetFlags()));
772     break;
773   }
774   case ISD::MCSymbol: {
775     auto *MCSN = cast<MCSymbolSDNode>(N);
776     Erased = MCSymbols.erase(MCSN->getMCSymbol());
777     break;
778   }
779   case ISD::VALUETYPE: {
780     EVT VT = cast<VTSDNode>(N)->getVT();
781     if (VT.isExtended()) {
782       Erased = ExtendedValueTypeNodes.erase(VT);
783     } else {
784       Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr;
785       ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr;
786     }
787     break;
788   }
789   default:
790     // Remove it from the CSE Map.
791     assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
792     assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
793     Erased = CSEMap.RemoveNode(N);
794     break;
795   }
796 #ifndef NDEBUG
797   // Verify that the node was actually in one of the CSE maps, unless it has a
798   // flag result (which cannot be CSE'd) or is one of the special cases that are
799   // not subject to CSE.
800   if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
801       !N->isMachineOpcode() && !doNotCSE(N)) {
802     N->dump(this);
803     dbgs() << "\n";
804     llvm_unreachable("Node is not in map!");
805   }
806 #endif
807   return Erased;
808 }
809 
810 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
811 /// maps and modified in place. Add it back to the CSE maps, unless an identical
812 /// node already exists, in which case transfer all its users to the existing
813 /// node. This transfer can potentially trigger recursive merging.
814 ///
815 void
AddModifiedNodeToCSEMaps(SDNode * N)816 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
817   // For node types that aren't CSE'd, just act as if no identical node
818   // already exists.
819   if (!doNotCSE(N)) {
820     SDNode *Existing = CSEMap.GetOrInsertNode(N);
821     if (Existing != N) {
822       // If there was already an existing matching node, use ReplaceAllUsesWith
823       // to replace the dead one with the existing one.  This can cause
824       // recursive merging of other unrelated nodes down the line.
825       ReplaceAllUsesWith(N, Existing);
826 
827       // N is now dead. Inform the listeners and delete it.
828       for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
829         DUL->NodeDeleted(N, Existing);
830       DeleteNodeNotInCSEMaps(N);
831       return;
832     }
833   }
834 
835   // If the node doesn't already exist, we updated it.  Inform listeners.
836   for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
837     DUL->NodeUpdated(N);
838 }
839 
840 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
841 /// were replaced with those specified.  If this node is never memoized,
842 /// return null, otherwise return a pointer to the slot it would take.  If a
843 /// node already exists with these operands, the slot will be non-null.
FindModifiedNodeSlot(SDNode * N,SDValue Op,void * & InsertPos)844 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
845                                            void *&InsertPos) {
846   if (doNotCSE(N))
847     return nullptr;
848 
849   SDValue Ops[] = { Op };
850   FoldingSetNodeID ID;
851   AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
852   AddNodeIDCustom(ID, N);
853   SDNode *Node = FindNodeOrInsertPos(ID, N->getDebugLoc(), InsertPos);
854   return Node;
855 }
856 
857 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
858 /// were replaced with those specified.  If this node is never memoized,
859 /// return null, otherwise return a pointer to the slot it would take.  If a
860 /// node already exists with these operands, the slot will be non-null.
FindModifiedNodeSlot(SDNode * N,SDValue Op1,SDValue Op2,void * & InsertPos)861 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
862                                            SDValue Op1, SDValue Op2,
863                                            void *&InsertPos) {
864   if (doNotCSE(N))
865     return nullptr;
866 
867   SDValue Ops[] = { Op1, Op2 };
868   FoldingSetNodeID ID;
869   AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
870   AddNodeIDCustom(ID, N);
871   SDNode *Node = FindNodeOrInsertPos(ID, N->getDebugLoc(), InsertPos);
872   return Node;
873 }
874 
875 
876 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
877 /// were replaced with those specified.  If this node is never memoized,
878 /// return null, otherwise return a pointer to the slot it would take.  If a
879 /// node already exists with these operands, the slot will be non-null.
FindModifiedNodeSlot(SDNode * N,ArrayRef<SDValue> Ops,void * & InsertPos)880 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
881                                            void *&InsertPos) {
882   if (doNotCSE(N))
883     return nullptr;
884 
885   FoldingSetNodeID ID;
886   AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
887   AddNodeIDCustom(ID, N);
888   SDNode *Node = FindNodeOrInsertPos(ID, N->getDebugLoc(), InsertPos);
889   return Node;
890 }
891 
892 /// getEVTAlignment - Compute the default alignment value for the
893 /// given type.
894 ///
getEVTAlignment(EVT VT) const895 unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
896   Type *Ty = VT == MVT::iPTR ?
897                    PointerType::get(Type::getInt8Ty(*getContext()), 0) :
898                    VT.getTypeForEVT(*getContext());
899 
900   return getDataLayout().getABITypeAlignment(Ty);
901 }
902 
903 // EntryNode could meaningfully have debug info if we can find it...
SelectionDAG(const TargetMachine & tm,CodeGenOpt::Level OL)904 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
905     : TM(tm), TSI(nullptr), TLI(nullptr), OptLevel(OL),
906       EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
907       Root(getEntryNode()), NewNodesMustHaveLegalTypes(false),
908       UpdateListeners(nullptr) {
909   InsertNode(&EntryNode);
910   DbgInfo = new SDDbgInfo();
911 }
912 
init(MachineFunction & mf)913 void SelectionDAG::init(MachineFunction &mf) {
914   MF = &mf;
915   TLI = getSubtarget().getTargetLowering();
916   TSI = getSubtarget().getSelectionDAGInfo();
917   Context = &mf.getFunction()->getContext();
918 }
919 
~SelectionDAG()920 SelectionDAG::~SelectionDAG() {
921   assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
922   allnodes_clear();
923   delete DbgInfo;
924 }
925 
allnodes_clear()926 void SelectionDAG::allnodes_clear() {
927   assert(&*AllNodes.begin() == &EntryNode);
928   AllNodes.remove(AllNodes.begin());
929   while (!AllNodes.empty())
930     DeallocateNode(&AllNodes.front());
931 #ifndef NDEBUG
932   NextPersistentId = 0;
933 #endif
934 }
935 
GetBinarySDNode(unsigned Opcode,SDLoc DL,SDVTList VTs,SDValue N1,SDValue N2,const SDNodeFlags * Flags)936 BinarySDNode *SelectionDAG::GetBinarySDNode(unsigned Opcode, SDLoc DL,
937                                             SDVTList VTs, SDValue N1,
938                                             SDValue N2,
939                                             const SDNodeFlags *Flags) {
940   if (isBinOpWithFlags(Opcode)) {
941     // If no flags were passed in, use a default flags object.
942     SDNodeFlags F;
943     if (Flags == nullptr)
944       Flags = &F;
945 
946     BinaryWithFlagsSDNode *FN = new (NodeAllocator) BinaryWithFlagsSDNode(
947         Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, N1, N2, *Flags);
948 
949     return FN;
950   }
951 
952   BinarySDNode *N = new (NodeAllocator)
953       BinarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, N1, N2);
954   return N;
955 }
956 
FindNodeOrInsertPos(const FoldingSetNodeID & ID,void * & InsertPos)957 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
958                                           void *&InsertPos) {
959   SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
960   if (N) {
961     switch (N->getOpcode()) {
962     default: break;
963     case ISD::Constant:
964     case ISD::ConstantFP:
965       llvm_unreachable("Querying for Constant and ConstantFP nodes requires "
966                        "debug location.  Use another overload.");
967     }
968   }
969   return N;
970 }
971 
FindNodeOrInsertPos(const FoldingSetNodeID & ID,DebugLoc DL,void * & InsertPos)972 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
973                                           DebugLoc DL, void *&InsertPos) {
974   SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
975   if (N) {
976     switch (N->getOpcode()) {
977     default: break; // Process only regular (non-target) constant nodes.
978     case ISD::Constant:
979     case ISD::ConstantFP:
980       // Erase debug location from the node if the node is used at several
981       // different places to do not propagate one location to all uses as it
982       // leads to incorrect debug info.
983       if (N->getDebugLoc() != DL)
984         N->setDebugLoc(DebugLoc());
985       break;
986     }
987   }
988   return N;
989 }
990 
clear()991 void SelectionDAG::clear() {
992   allnodes_clear();
993   OperandAllocator.Reset();
994   CSEMap.clear();
995 
996   ExtendedValueTypeNodes.clear();
997   ExternalSymbols.clear();
998   TargetExternalSymbols.clear();
999   MCSymbols.clear();
1000   std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
1001             static_cast<CondCodeSDNode*>(nullptr));
1002   std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
1003             static_cast<SDNode*>(nullptr));
1004 
1005   EntryNode.UseList = nullptr;
1006   InsertNode(&EntryNode);
1007   Root = getEntryNode();
1008   DbgInfo->clear();
1009 }
1010 
getAnyExtOrTrunc(SDValue Op,SDLoc DL,EVT VT)1011 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
1012   return VT.bitsGT(Op.getValueType()) ?
1013     getNode(ISD::ANY_EXTEND, DL, VT, Op) :
1014     getNode(ISD::TRUNCATE, DL, VT, Op);
1015 }
1016 
getSExtOrTrunc(SDValue Op,SDLoc DL,EVT VT)1017 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
1018   return VT.bitsGT(Op.getValueType()) ?
1019     getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
1020     getNode(ISD::TRUNCATE, DL, VT, Op);
1021 }
1022 
getZExtOrTrunc(SDValue Op,SDLoc DL,EVT VT)1023 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
1024   return VT.bitsGT(Op.getValueType()) ?
1025     getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
1026     getNode(ISD::TRUNCATE, DL, VT, Op);
1027 }
1028 
getBoolExtOrTrunc(SDValue Op,SDLoc SL,EVT VT,EVT OpVT)1029 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, SDLoc SL, EVT VT,
1030                                         EVT OpVT) {
1031   if (VT.bitsLE(Op.getValueType()))
1032     return getNode(ISD::TRUNCATE, SL, VT, Op);
1033 
1034   TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT);
1035   return getNode(TLI->getExtendForContent(BType), SL, VT, Op);
1036 }
1037 
getZeroExtendInReg(SDValue Op,SDLoc DL,EVT VT)1038 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, SDLoc DL, EVT VT) {
1039   assert(!VT.isVector() &&
1040          "getZeroExtendInReg should use the vector element type instead of "
1041          "the vector type!");
1042   if (Op.getValueType() == VT) return Op;
1043   unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
1044   APInt Imm = APInt::getLowBitsSet(BitWidth,
1045                                    VT.getSizeInBits());
1046   return getNode(ISD::AND, DL, Op.getValueType(), Op,
1047                  getConstant(Imm, DL, Op.getValueType()));
1048 }
1049 
getAnyExtendVectorInReg(SDValue Op,SDLoc DL,EVT VT)1050 SDValue SelectionDAG::getAnyExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT) {
1051   assert(VT.isVector() && "This DAG node is restricted to vector types.");
1052   assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() &&
1053          "The sizes of the input and result must match in order to perform the "
1054          "extend in-register.");
1055   assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
1056          "The destination vector type must have fewer lanes than the input.");
1057   return getNode(ISD::ANY_EXTEND_VECTOR_INREG, DL, VT, Op);
1058 }
1059 
getSignExtendVectorInReg(SDValue Op,SDLoc DL,EVT VT)1060 SDValue SelectionDAG::getSignExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT) {
1061   assert(VT.isVector() && "This DAG node is restricted to vector types.");
1062   assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() &&
1063          "The sizes of the input and result must match in order to perform the "
1064          "extend in-register.");
1065   assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
1066          "The destination vector type must have fewer lanes than the input.");
1067   return getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, VT, Op);
1068 }
1069 
getZeroExtendVectorInReg(SDValue Op,SDLoc DL,EVT VT)1070 SDValue SelectionDAG::getZeroExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT) {
1071   assert(VT.isVector() && "This DAG node is restricted to vector types.");
1072   assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() &&
1073          "The sizes of the input and result must match in order to perform the "
1074          "extend in-register.");
1075   assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
1076          "The destination vector type must have fewer lanes than the input.");
1077   return getNode(ISD::ZERO_EXTEND_VECTOR_INREG, DL, VT, Op);
1078 }
1079 
1080 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
1081 ///
getNOT(SDLoc DL,SDValue Val,EVT VT)1082 SDValue SelectionDAG::getNOT(SDLoc DL, SDValue Val, EVT VT) {
1083   EVT EltVT = VT.getScalarType();
1084   SDValue NegOne =
1085     getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT);
1086   return getNode(ISD::XOR, DL, VT, Val, NegOne);
1087 }
1088 
getLogicalNOT(SDLoc DL,SDValue Val,EVT VT)1089 SDValue SelectionDAG::getLogicalNOT(SDLoc DL, SDValue Val, EVT VT) {
1090   EVT EltVT = VT.getScalarType();
1091   SDValue TrueValue;
1092   switch (TLI->getBooleanContents(VT)) {
1093     case TargetLowering::ZeroOrOneBooleanContent:
1094     case TargetLowering::UndefinedBooleanContent:
1095       TrueValue = getConstant(1, DL, VT);
1096       break;
1097     case TargetLowering::ZeroOrNegativeOneBooleanContent:
1098       TrueValue = getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL,
1099                               VT);
1100       break;
1101   }
1102   return getNode(ISD::XOR, DL, VT, Val, TrueValue);
1103 }
1104 
getConstant(uint64_t Val,SDLoc DL,EVT VT,bool isT,bool isO)1105 SDValue SelectionDAG::getConstant(uint64_t Val, SDLoc DL, EVT VT, bool isT,
1106                                   bool isO) {
1107   EVT EltVT = VT.getScalarType();
1108   assert((EltVT.getSizeInBits() >= 64 ||
1109          (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
1110          "getConstant with a uint64_t value that doesn't fit in the type!");
1111   return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO);
1112 }
1113 
getConstant(const APInt & Val,SDLoc DL,EVT VT,bool isT,bool isO)1114 SDValue SelectionDAG::getConstant(const APInt &Val, SDLoc DL, EVT VT, bool isT,
1115                                   bool isO)
1116 {
1117   return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO);
1118 }
1119 
getConstant(const ConstantInt & Val,SDLoc DL,EVT VT,bool isT,bool isO)1120 SDValue SelectionDAG::getConstant(const ConstantInt &Val, SDLoc DL, EVT VT,
1121                                   bool isT, bool isO) {
1122   assert(VT.isInteger() && "Cannot create FP integer constant!");
1123 
1124   EVT EltVT = VT.getScalarType();
1125   const ConstantInt *Elt = &Val;
1126 
1127   // In some cases the vector type is legal but the element type is illegal and
1128   // needs to be promoted, for example v8i8 on ARM.  In this case, promote the
1129   // inserted value (the type does not need to match the vector element type).
1130   // Any extra bits introduced will be truncated away.
1131   if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
1132       TargetLowering::TypePromoteInteger) {
1133    EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1134    APInt NewVal = Elt->getValue().zext(EltVT.getSizeInBits());
1135    Elt = ConstantInt::get(*getContext(), NewVal);
1136   }
1137   // In other cases the element type is illegal and needs to be expanded, for
1138   // example v2i64 on MIPS32. In this case, find the nearest legal type, split
1139   // the value into n parts and use a vector type with n-times the elements.
1140   // Then bitcast to the type requested.
1141   // Legalizing constants too early makes the DAGCombiner's job harder so we
1142   // only legalize if the DAG tells us we must produce legal types.
1143   else if (NewNodesMustHaveLegalTypes && VT.isVector() &&
1144            TLI->getTypeAction(*getContext(), EltVT) ==
1145            TargetLowering::TypeExpandInteger) {
1146     APInt NewVal = Elt->getValue();
1147     EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1148     unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits();
1149     unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits;
1150     EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts);
1151 
1152     // Check the temporary vector is the correct size. If this fails then
1153     // getTypeToTransformTo() probably returned a type whose size (in bits)
1154     // isn't a power-of-2 factor of the requested type size.
1155     assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits());
1156 
1157     SmallVector<SDValue, 2> EltParts;
1158     for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) {
1159       EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits)
1160                                            .trunc(ViaEltSizeInBits), DL,
1161                                      ViaEltVT, isT, isO));
1162     }
1163 
1164     // EltParts is currently in little endian order. If we actually want
1165     // big-endian order then reverse it now.
1166     if (getDataLayout().isBigEndian())
1167       std::reverse(EltParts.begin(), EltParts.end());
1168 
1169     // The elements must be reversed when the element order is different
1170     // to the endianness of the elements (because the BITCAST is itself a
1171     // vector shuffle in this situation). However, we do not need any code to
1172     // perform this reversal because getConstant() is producing a vector
1173     // splat.
1174     // This situation occurs in MIPS MSA.
1175 
1176     SmallVector<SDValue, 8> Ops;
1177     for (unsigned i = 0; i < VT.getVectorNumElements(); ++i)
1178       Ops.insert(Ops.end(), EltParts.begin(), EltParts.end());
1179 
1180     SDValue Result = getNode(ISD::BITCAST, SDLoc(), VT,
1181                              getNode(ISD::BUILD_VECTOR, SDLoc(), ViaVecVT,
1182                                      Ops));
1183     return Result;
1184   }
1185 
1186   assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&
1187          "APInt size does not match type size!");
1188   unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
1189   FoldingSetNodeID ID;
1190   AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1191   ID.AddPointer(Elt);
1192   ID.AddBoolean(isO);
1193   void *IP = nullptr;
1194   SDNode *N = nullptr;
1195   if ((N = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP)))
1196     if (!VT.isVector())
1197       return SDValue(N, 0);
1198 
1199   if (!N) {
1200     N = new (NodeAllocator) ConstantSDNode(isT, isO, Elt, DL.getDebugLoc(),
1201                                            EltVT);
1202     CSEMap.InsertNode(N, IP);
1203     InsertNode(N);
1204   }
1205 
1206   SDValue Result(N, 0);
1207   if (VT.isVector()) {
1208     SmallVector<SDValue, 8> Ops;
1209     Ops.assign(VT.getVectorNumElements(), Result);
1210     Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Ops);
1211   }
1212   return Result;
1213 }
1214 
getIntPtrConstant(uint64_t Val,SDLoc DL,bool isTarget)1215 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, SDLoc DL, bool isTarget) {
1216   return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget);
1217 }
1218 
getConstantFP(const APFloat & V,SDLoc DL,EVT VT,bool isTarget)1219 SDValue SelectionDAG::getConstantFP(const APFloat& V, SDLoc DL, EVT VT,
1220                                     bool isTarget) {
1221   return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget);
1222 }
1223 
getConstantFP(const ConstantFP & V,SDLoc DL,EVT VT,bool isTarget)1224 SDValue SelectionDAG::getConstantFP(const ConstantFP& V, SDLoc DL, EVT VT,
1225                                     bool isTarget){
1226   assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
1227 
1228   EVT EltVT = VT.getScalarType();
1229 
1230   // Do the map lookup using the actual bit pattern for the floating point
1231   // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
1232   // we don't have issues with SNANs.
1233   unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
1234   FoldingSetNodeID ID;
1235   AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1236   ID.AddPointer(&V);
1237   void *IP = nullptr;
1238   SDNode *N = nullptr;
1239   if ((N = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP)))
1240     if (!VT.isVector())
1241       return SDValue(N, 0);
1242 
1243   if (!N) {
1244     N = new (NodeAllocator) ConstantFPSDNode(isTarget, &V, DL.getDebugLoc(),
1245                                              EltVT);
1246     CSEMap.InsertNode(N, IP);
1247     InsertNode(N);
1248   }
1249 
1250   SDValue Result(N, 0);
1251   if (VT.isVector()) {
1252     SmallVector<SDValue, 8> Ops;
1253     Ops.assign(VT.getVectorNumElements(), Result);
1254     Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Ops);
1255   }
1256   return Result;
1257 }
1258 
getConstantFP(double Val,SDLoc DL,EVT VT,bool isTarget)1259 SDValue SelectionDAG::getConstantFP(double Val, SDLoc DL, EVT VT,
1260                                     bool isTarget) {
1261   EVT EltVT = VT.getScalarType();
1262   if (EltVT==MVT::f32)
1263     return getConstantFP(APFloat((float)Val), DL, VT, isTarget);
1264   else if (EltVT==MVT::f64)
1265     return getConstantFP(APFloat(Val), DL, VT, isTarget);
1266   else if (EltVT==MVT::f80 || EltVT==MVT::f128 || EltVT==MVT::ppcf128 ||
1267            EltVT==MVT::f16) {
1268     bool ignored;
1269     APFloat apf = APFloat(Val);
1270     apf.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
1271                 &ignored);
1272     return getConstantFP(apf, DL, VT, isTarget);
1273   } else
1274     llvm_unreachable("Unsupported type in getConstantFP");
1275 }
1276 
getGlobalAddress(const GlobalValue * GV,SDLoc DL,EVT VT,int64_t Offset,bool isTargetGA,unsigned char TargetFlags)1277 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, SDLoc DL,
1278                                        EVT VT, int64_t Offset,
1279                                        bool isTargetGA,
1280                                        unsigned char TargetFlags) {
1281   assert((TargetFlags == 0 || isTargetGA) &&
1282          "Cannot set target flags on target-independent globals");
1283 
1284   // Truncate (with sign-extension) the offset value to the pointer size.
1285   unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
1286   if (BitWidth < 64)
1287     Offset = SignExtend64(Offset, BitWidth);
1288 
1289   unsigned Opc;
1290   if (GV->isThreadLocal())
1291     Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
1292   else
1293     Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
1294 
1295   FoldingSetNodeID ID;
1296   AddNodeIDNode(ID, Opc, getVTList(VT), None);
1297   ID.AddPointer(GV);
1298   ID.AddInteger(Offset);
1299   ID.AddInteger(TargetFlags);
1300   ID.AddInteger(GV->getType()->getAddressSpace());
1301   void *IP = nullptr;
1302   if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP))
1303     return SDValue(E, 0);
1304 
1305   SDNode *N = new (NodeAllocator) GlobalAddressSDNode(Opc, DL.getIROrder(),
1306                                                       DL.getDebugLoc(), GV, VT,
1307                                                       Offset, TargetFlags);
1308   CSEMap.InsertNode(N, IP);
1309     InsertNode(N);
1310   return SDValue(N, 0);
1311 }
1312 
getFrameIndex(int FI,EVT VT,bool isTarget)1313 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
1314   unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
1315   FoldingSetNodeID ID;
1316   AddNodeIDNode(ID, Opc, getVTList(VT), None);
1317   ID.AddInteger(FI);
1318   void *IP = nullptr;
1319   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1320     return SDValue(E, 0);
1321 
1322   SDNode *N = new (NodeAllocator) FrameIndexSDNode(FI, VT, isTarget);
1323   CSEMap.InsertNode(N, IP);
1324   InsertNode(N);
1325   return SDValue(N, 0);
1326 }
1327 
getJumpTable(int JTI,EVT VT,bool isTarget,unsigned char TargetFlags)1328 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
1329                                    unsigned char TargetFlags) {
1330   assert((TargetFlags == 0 || isTarget) &&
1331          "Cannot set target flags on target-independent jump tables");
1332   unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
1333   FoldingSetNodeID ID;
1334   AddNodeIDNode(ID, Opc, getVTList(VT), None);
1335   ID.AddInteger(JTI);
1336   ID.AddInteger(TargetFlags);
1337   void *IP = nullptr;
1338   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1339     return SDValue(E, 0);
1340 
1341   SDNode *N = new (NodeAllocator) JumpTableSDNode(JTI, VT, isTarget,
1342                                                   TargetFlags);
1343   CSEMap.InsertNode(N, IP);
1344   InsertNode(N);
1345   return SDValue(N, 0);
1346 }
1347 
getConstantPool(const Constant * C,EVT VT,unsigned Alignment,int Offset,bool isTarget,unsigned char TargetFlags)1348 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
1349                                       unsigned Alignment, int Offset,
1350                                       bool isTarget,
1351                                       unsigned char TargetFlags) {
1352   assert((TargetFlags == 0 || isTarget) &&
1353          "Cannot set target flags on target-independent globals");
1354   if (Alignment == 0)
1355     Alignment = getDataLayout().getPrefTypeAlignment(C->getType());
1356   unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1357   FoldingSetNodeID ID;
1358   AddNodeIDNode(ID, Opc, getVTList(VT), None);
1359   ID.AddInteger(Alignment);
1360   ID.AddInteger(Offset);
1361   ID.AddPointer(C);
1362   ID.AddInteger(TargetFlags);
1363   void *IP = nullptr;
1364   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1365     return SDValue(E, 0);
1366 
1367   SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset,
1368                                                      Alignment, TargetFlags);
1369   CSEMap.InsertNode(N, IP);
1370   InsertNode(N);
1371   return SDValue(N, 0);
1372 }
1373 
1374 
getConstantPool(MachineConstantPoolValue * C,EVT VT,unsigned Alignment,int Offset,bool isTarget,unsigned char TargetFlags)1375 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
1376                                       unsigned Alignment, int Offset,
1377                                       bool isTarget,
1378                                       unsigned char TargetFlags) {
1379   assert((TargetFlags == 0 || isTarget) &&
1380          "Cannot set target flags on target-independent globals");
1381   if (Alignment == 0)
1382     Alignment = getDataLayout().getPrefTypeAlignment(C->getType());
1383   unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1384   FoldingSetNodeID ID;
1385   AddNodeIDNode(ID, Opc, getVTList(VT), None);
1386   ID.AddInteger(Alignment);
1387   ID.AddInteger(Offset);
1388   C->addSelectionDAGCSEId(ID);
1389   ID.AddInteger(TargetFlags);
1390   void *IP = nullptr;
1391   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1392     return SDValue(E, 0);
1393 
1394   SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset,
1395                                                      Alignment, TargetFlags);
1396   CSEMap.InsertNode(N, IP);
1397   InsertNode(N);
1398   return SDValue(N, 0);
1399 }
1400 
getTargetIndex(int Index,EVT VT,int64_t Offset,unsigned char TargetFlags)1401 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
1402                                      unsigned char TargetFlags) {
1403   FoldingSetNodeID ID;
1404   AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None);
1405   ID.AddInteger(Index);
1406   ID.AddInteger(Offset);
1407   ID.AddInteger(TargetFlags);
1408   void *IP = nullptr;
1409   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1410     return SDValue(E, 0);
1411 
1412   SDNode *N =
1413       new (NodeAllocator) TargetIndexSDNode(Index, VT, Offset, TargetFlags);
1414   CSEMap.InsertNode(N, IP);
1415   InsertNode(N);
1416   return SDValue(N, 0);
1417 }
1418 
getBasicBlock(MachineBasicBlock * MBB)1419 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
1420   FoldingSetNodeID ID;
1421   AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None);
1422   ID.AddPointer(MBB);
1423   void *IP = nullptr;
1424   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1425     return SDValue(E, 0);
1426 
1427   SDNode *N = new (NodeAllocator) BasicBlockSDNode(MBB);
1428   CSEMap.InsertNode(N, IP);
1429   InsertNode(N);
1430   return SDValue(N, 0);
1431 }
1432 
getValueType(EVT VT)1433 SDValue SelectionDAG::getValueType(EVT VT) {
1434   if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
1435       ValueTypeNodes.size())
1436     ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
1437 
1438   SDNode *&N = VT.isExtended() ?
1439     ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
1440 
1441   if (N) return SDValue(N, 0);
1442   N = new (NodeAllocator) VTSDNode(VT);
1443   InsertNode(N);
1444   return SDValue(N, 0);
1445 }
1446 
getExternalSymbol(const char * Sym,EVT VT)1447 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
1448   SDNode *&N = ExternalSymbols[Sym];
1449   if (N) return SDValue(N, 0);
1450   N = new (NodeAllocator) ExternalSymbolSDNode(false, Sym, 0, VT);
1451   InsertNode(N);
1452   return SDValue(N, 0);
1453 }
1454 
getMCSymbol(MCSymbol * Sym,EVT VT)1455 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) {
1456   SDNode *&N = MCSymbols[Sym];
1457   if (N)
1458     return SDValue(N, 0);
1459   N = new (NodeAllocator) MCSymbolSDNode(Sym, VT);
1460   InsertNode(N);
1461   return SDValue(N, 0);
1462 }
1463 
getTargetExternalSymbol(const char * Sym,EVT VT,unsigned char TargetFlags)1464 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
1465                                               unsigned char TargetFlags) {
1466   SDNode *&N =
1467     TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym,
1468                                                                TargetFlags)];
1469   if (N) return SDValue(N, 0);
1470   N = new (NodeAllocator) ExternalSymbolSDNode(true, Sym, TargetFlags, VT);
1471   InsertNode(N);
1472   return SDValue(N, 0);
1473 }
1474 
getCondCode(ISD::CondCode Cond)1475 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
1476   if ((unsigned)Cond >= CondCodeNodes.size())
1477     CondCodeNodes.resize(Cond+1);
1478 
1479   if (!CondCodeNodes[Cond]) {
1480     CondCodeSDNode *N = new (NodeAllocator) CondCodeSDNode(Cond);
1481     CondCodeNodes[Cond] = N;
1482     InsertNode(N);
1483   }
1484 
1485   return SDValue(CondCodeNodes[Cond], 0);
1486 }
1487 
1488 // commuteShuffle - swaps the values of N1 and N2, and swaps all indices in
1489 // the shuffle mask M that point at N1 to point at N2, and indices that point
1490 // N2 to point at N1.
commuteShuffle(SDValue & N1,SDValue & N2,SmallVectorImpl<int> & M)1491 static void commuteShuffle(SDValue &N1, SDValue &N2, SmallVectorImpl<int> &M) {
1492   std::swap(N1, N2);
1493   ShuffleVectorSDNode::commuteMask(M);
1494 }
1495 
getVectorShuffle(EVT VT,SDLoc dl,SDValue N1,SDValue N2,const int * Mask)1496 SDValue SelectionDAG::getVectorShuffle(EVT VT, SDLoc dl, SDValue N1,
1497                                        SDValue N2, const int *Mask) {
1498   assert(VT == N1.getValueType() && VT == N2.getValueType() &&
1499          "Invalid VECTOR_SHUFFLE");
1500 
1501   // Canonicalize shuffle undef, undef -> undef
1502   if (N1.getOpcode() == ISD::UNDEF && N2.getOpcode() == ISD::UNDEF)
1503     return getUNDEF(VT);
1504 
1505   // Validate that all indices in Mask are within the range of the elements
1506   // input to the shuffle.
1507   unsigned NElts = VT.getVectorNumElements();
1508   SmallVector<int, 8> MaskVec;
1509   for (unsigned i = 0; i != NElts; ++i) {
1510     assert(Mask[i] < (int)(NElts * 2) && "Index out of range");
1511     MaskVec.push_back(Mask[i]);
1512   }
1513 
1514   // Canonicalize shuffle v, v -> v, undef
1515   if (N1 == N2) {
1516     N2 = getUNDEF(VT);
1517     for (unsigned i = 0; i != NElts; ++i)
1518       if (MaskVec[i] >= (int)NElts) MaskVec[i] -= NElts;
1519   }
1520 
1521   // Canonicalize shuffle undef, v -> v, undef.  Commute the shuffle mask.
1522   if (N1.getOpcode() == ISD::UNDEF)
1523     commuteShuffle(N1, N2, MaskVec);
1524 
1525   // If shuffling a splat, try to blend the splat instead. We do this here so
1526   // that even when this arises during lowering we don't have to re-handle it.
1527   auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) {
1528     BitVector UndefElements;
1529     SDValue Splat = BV->getSplatValue(&UndefElements);
1530     if (!Splat)
1531       return;
1532 
1533     for (int i = 0; i < (int)NElts; ++i) {
1534       if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + (int)NElts))
1535         continue;
1536 
1537       // If this input comes from undef, mark it as such.
1538       if (UndefElements[MaskVec[i] - Offset]) {
1539         MaskVec[i] = -1;
1540         continue;
1541       }
1542 
1543       // If we can blend a non-undef lane, use that instead.
1544       if (!UndefElements[i])
1545         MaskVec[i] = i + Offset;
1546     }
1547   };
1548   if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
1549     BlendSplat(N1BV, 0);
1550   if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2))
1551     BlendSplat(N2BV, NElts);
1552 
1553   // Canonicalize all index into lhs, -> shuffle lhs, undef
1554   // Canonicalize all index into rhs, -> shuffle rhs, undef
1555   bool AllLHS = true, AllRHS = true;
1556   bool N2Undef = N2.getOpcode() == ISD::UNDEF;
1557   for (unsigned i = 0; i != NElts; ++i) {
1558     if (MaskVec[i] >= (int)NElts) {
1559       if (N2Undef)
1560         MaskVec[i] = -1;
1561       else
1562         AllLHS = false;
1563     } else if (MaskVec[i] >= 0) {
1564       AllRHS = false;
1565     }
1566   }
1567   if (AllLHS && AllRHS)
1568     return getUNDEF(VT);
1569   if (AllLHS && !N2Undef)
1570     N2 = getUNDEF(VT);
1571   if (AllRHS) {
1572     N1 = getUNDEF(VT);
1573     commuteShuffle(N1, N2, MaskVec);
1574   }
1575   // Reset our undef status after accounting for the mask.
1576   N2Undef = N2.getOpcode() == ISD::UNDEF;
1577   // Re-check whether both sides ended up undef.
1578   if (N1.getOpcode() == ISD::UNDEF && N2Undef)
1579     return getUNDEF(VT);
1580 
1581   // If Identity shuffle return that node.
1582   bool Identity = true, AllSame = true;
1583   for (unsigned i = 0; i != NElts; ++i) {
1584     if (MaskVec[i] >= 0 && MaskVec[i] != (int)i) Identity = false;
1585     if (MaskVec[i] != MaskVec[0]) AllSame = false;
1586   }
1587   if (Identity && NElts)
1588     return N1;
1589 
1590   // Shuffling a constant splat doesn't change the result.
1591   if (N2Undef) {
1592     SDValue V = N1;
1593 
1594     // Look through any bitcasts. We check that these don't change the number
1595     // (and size) of elements and just changes their types.
1596     while (V.getOpcode() == ISD::BITCAST)
1597       V = V->getOperand(0);
1598 
1599     // A splat should always show up as a build vector node.
1600     if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
1601       BitVector UndefElements;
1602       SDValue Splat = BV->getSplatValue(&UndefElements);
1603       // If this is a splat of an undef, shuffling it is also undef.
1604       if (Splat && Splat.getOpcode() == ISD::UNDEF)
1605         return getUNDEF(VT);
1606 
1607       bool SameNumElts =
1608           V.getValueType().getVectorNumElements() == VT.getVectorNumElements();
1609 
1610       // We only have a splat which can skip shuffles if there is a splatted
1611       // value and no undef lanes rearranged by the shuffle.
1612       if (Splat && UndefElements.none()) {
1613         // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the
1614         // number of elements match or the value splatted is a zero constant.
1615         if (SameNumElts)
1616           return N1;
1617         if (auto *C = dyn_cast<ConstantSDNode>(Splat))
1618           if (C->isNullValue())
1619             return N1;
1620       }
1621 
1622       // If the shuffle itself creates a splat, build the vector directly.
1623       if (AllSame && SameNumElts) {
1624         const SDValue &Splatted = BV->getOperand(MaskVec[0]);
1625         SmallVector<SDValue, 8> Ops(NElts, Splatted);
1626 
1627         EVT BuildVT = BV->getValueType(0);
1628         SDValue NewBV = getNode(ISD::BUILD_VECTOR, dl, BuildVT, Ops);
1629 
1630         // We may have jumped through bitcasts, so the type of the
1631         // BUILD_VECTOR may not match the type of the shuffle.
1632         if (BuildVT != VT)
1633           NewBV = getNode(ISD::BITCAST, dl, VT, NewBV);
1634         return NewBV;
1635       }
1636     }
1637   }
1638 
1639   FoldingSetNodeID ID;
1640   SDValue Ops[2] = { N1, N2 };
1641   AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops);
1642   for (unsigned i = 0; i != NElts; ++i)
1643     ID.AddInteger(MaskVec[i]);
1644 
1645   void* IP = nullptr;
1646   if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP))
1647     return SDValue(E, 0);
1648 
1649   // Allocate the mask array for the node out of the BumpPtrAllocator, since
1650   // SDNode doesn't have access to it.  This memory will be "leaked" when
1651   // the node is deallocated, but recovered when the NodeAllocator is released.
1652   int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
1653   memcpy(MaskAlloc, &MaskVec[0], NElts * sizeof(int));
1654 
1655   ShuffleVectorSDNode *N =
1656     new (NodeAllocator) ShuffleVectorSDNode(VT, dl.getIROrder(),
1657                                             dl.getDebugLoc(), N1, N2,
1658                                             MaskAlloc);
1659   CSEMap.InsertNode(N, IP);
1660   InsertNode(N);
1661   return SDValue(N, 0);
1662 }
1663 
getCommutedVectorShuffle(const ShuffleVectorSDNode & SV)1664 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) {
1665   MVT VT = SV.getSimpleValueType(0);
1666   SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end());
1667   ShuffleVectorSDNode::commuteMask(MaskVec);
1668 
1669   SDValue Op0 = SV.getOperand(0);
1670   SDValue Op1 = SV.getOperand(1);
1671   return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, &MaskVec[0]);
1672 }
1673 
getConvertRndSat(EVT VT,SDLoc dl,SDValue Val,SDValue DTy,SDValue STy,SDValue Rnd,SDValue Sat,ISD::CvtCode Code)1674 SDValue SelectionDAG::getConvertRndSat(EVT VT, SDLoc dl,
1675                                        SDValue Val, SDValue DTy,
1676                                        SDValue STy, SDValue Rnd, SDValue Sat,
1677                                        ISD::CvtCode Code) {
1678   // If the src and dest types are the same and the conversion is between
1679   // integer types of the same sign or two floats, no conversion is necessary.
1680   if (DTy == STy &&
1681       (Code == ISD::CVT_UU || Code == ISD::CVT_SS || Code == ISD::CVT_FF))
1682     return Val;
1683 
1684   FoldingSetNodeID ID;
1685   SDValue Ops[] = { Val, DTy, STy, Rnd, Sat };
1686   AddNodeIDNode(ID, ISD::CONVERT_RNDSAT, getVTList(VT), Ops);
1687   void* IP = nullptr;
1688   if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP))
1689     return SDValue(E, 0);
1690 
1691   CvtRndSatSDNode *N = new (NodeAllocator) CvtRndSatSDNode(VT, dl.getIROrder(),
1692                                                            dl.getDebugLoc(),
1693                                                            Ops, Code);
1694   CSEMap.InsertNode(N, IP);
1695   InsertNode(N);
1696   return SDValue(N, 0);
1697 }
1698 
getRegister(unsigned RegNo,EVT VT)1699 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
1700   FoldingSetNodeID ID;
1701   AddNodeIDNode(ID, ISD::Register, getVTList(VT), None);
1702   ID.AddInteger(RegNo);
1703   void *IP = nullptr;
1704   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1705     return SDValue(E, 0);
1706 
1707   SDNode *N = new (NodeAllocator) RegisterSDNode(RegNo, VT);
1708   CSEMap.InsertNode(N, IP);
1709   InsertNode(N);
1710   return SDValue(N, 0);
1711 }
1712 
getRegisterMask(const uint32_t * RegMask)1713 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
1714   FoldingSetNodeID ID;
1715   AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None);
1716   ID.AddPointer(RegMask);
1717   void *IP = nullptr;
1718   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1719     return SDValue(E, 0);
1720 
1721   SDNode *N = new (NodeAllocator) RegisterMaskSDNode(RegMask);
1722   CSEMap.InsertNode(N, IP);
1723   InsertNode(N);
1724   return SDValue(N, 0);
1725 }
1726 
getEHLabel(SDLoc dl,SDValue Root,MCSymbol * Label)1727 SDValue SelectionDAG::getEHLabel(SDLoc dl, SDValue Root, MCSymbol *Label) {
1728   FoldingSetNodeID ID;
1729   SDValue Ops[] = { Root };
1730   AddNodeIDNode(ID, ISD::EH_LABEL, getVTList(MVT::Other), Ops);
1731   ID.AddPointer(Label);
1732   void *IP = nullptr;
1733   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1734     return SDValue(E, 0);
1735 
1736   SDNode *N = new (NodeAllocator) EHLabelSDNode(dl.getIROrder(),
1737                                                 dl.getDebugLoc(), Root, Label);
1738   CSEMap.InsertNode(N, IP);
1739   InsertNode(N);
1740   return SDValue(N, 0);
1741 }
1742 
1743 
getBlockAddress(const BlockAddress * BA,EVT VT,int64_t Offset,bool isTarget,unsigned char TargetFlags)1744 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
1745                                       int64_t Offset,
1746                                       bool isTarget,
1747                                       unsigned char TargetFlags) {
1748   unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
1749 
1750   FoldingSetNodeID ID;
1751   AddNodeIDNode(ID, Opc, getVTList(VT), None);
1752   ID.AddPointer(BA);
1753   ID.AddInteger(Offset);
1754   ID.AddInteger(TargetFlags);
1755   void *IP = nullptr;
1756   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1757     return SDValue(E, 0);
1758 
1759   SDNode *N = new (NodeAllocator) BlockAddressSDNode(Opc, VT, BA, Offset,
1760                                                      TargetFlags);
1761   CSEMap.InsertNode(N, IP);
1762   InsertNode(N);
1763   return SDValue(N, 0);
1764 }
1765 
getSrcValue(const Value * V)1766 SDValue SelectionDAG::getSrcValue(const Value *V) {
1767   assert((!V || V->getType()->isPointerTy()) &&
1768          "SrcValue is not a pointer?");
1769 
1770   FoldingSetNodeID ID;
1771   AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None);
1772   ID.AddPointer(V);
1773 
1774   void *IP = nullptr;
1775   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1776     return SDValue(E, 0);
1777 
1778   SDNode *N = new (NodeAllocator) SrcValueSDNode(V);
1779   CSEMap.InsertNode(N, IP);
1780   InsertNode(N);
1781   return SDValue(N, 0);
1782 }
1783 
1784 /// getMDNode - Return an MDNodeSDNode which holds an MDNode.
getMDNode(const MDNode * MD)1785 SDValue SelectionDAG::getMDNode(const MDNode *MD) {
1786   FoldingSetNodeID ID;
1787   AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None);
1788   ID.AddPointer(MD);
1789 
1790   void *IP = nullptr;
1791   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1792     return SDValue(E, 0);
1793 
1794   SDNode *N = new (NodeAllocator) MDNodeSDNode(MD);
1795   CSEMap.InsertNode(N, IP);
1796   InsertNode(N);
1797   return SDValue(N, 0);
1798 }
1799 
getBitcast(EVT VT,SDValue V)1800 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) {
1801   if (VT == V.getValueType())
1802     return V;
1803 
1804   return getNode(ISD::BITCAST, SDLoc(V), VT, V);
1805 }
1806 
1807 /// getAddrSpaceCast - Return an AddrSpaceCastSDNode.
getAddrSpaceCast(SDLoc dl,EVT VT,SDValue Ptr,unsigned SrcAS,unsigned DestAS)1808 SDValue SelectionDAG::getAddrSpaceCast(SDLoc dl, EVT VT, SDValue Ptr,
1809                                        unsigned SrcAS, unsigned DestAS) {
1810   SDValue Ops[] = {Ptr};
1811   FoldingSetNodeID ID;
1812   AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops);
1813   ID.AddInteger(SrcAS);
1814   ID.AddInteger(DestAS);
1815 
1816   void *IP = nullptr;
1817   if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP))
1818     return SDValue(E, 0);
1819 
1820   SDNode *N = new (NodeAllocator) AddrSpaceCastSDNode(dl.getIROrder(),
1821                                                       dl.getDebugLoc(),
1822                                                       VT, Ptr, SrcAS, DestAS);
1823   CSEMap.InsertNode(N, IP);
1824   InsertNode(N);
1825   return SDValue(N, 0);
1826 }
1827 
1828 /// getShiftAmountOperand - Return the specified value casted to
1829 /// the target's desired shift amount type.
getShiftAmountOperand(EVT LHSTy,SDValue Op)1830 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
1831   EVT OpTy = Op.getValueType();
1832   EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout());
1833   if (OpTy == ShTy || OpTy.isVector()) return Op;
1834 
1835   return getZExtOrTrunc(Op, SDLoc(Op), ShTy);
1836 }
1837 
expandVAArg(SDNode * Node)1838 SDValue SelectionDAG::expandVAArg(SDNode *Node) {
1839   SDLoc dl(Node);
1840   const TargetLowering &TLI = getTargetLoweringInfo();
1841   const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
1842   EVT VT = Node->getValueType(0);
1843   SDValue Tmp1 = Node->getOperand(0);
1844   SDValue Tmp2 = Node->getOperand(1);
1845   unsigned Align = Node->getConstantOperandVal(3);
1846 
1847   SDValue VAListLoad =
1848     getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1, Tmp2,
1849             MachinePointerInfo(V), false, false, false, 0);
1850   SDValue VAList = VAListLoad;
1851 
1852   if (Align > TLI.getMinStackArgumentAlignment()) {
1853     assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2");
1854 
1855     VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
1856                      getConstant(Align - 1, dl, VAList.getValueType()));
1857 
1858     VAList = getNode(ISD::AND, dl, VAList.getValueType(), VAList,
1859                      getConstant(-(int64_t)Align, dl, VAList.getValueType()));
1860   }
1861 
1862   // Increment the pointer, VAList, to the next vaarg
1863   Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
1864                  getConstant(getDataLayout().getTypeAllocSize(
1865                                                VT.getTypeForEVT(*getContext())),
1866                              dl, VAList.getValueType()));
1867   // Store the incremented VAList to the legalized pointer
1868   Tmp1 = getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2,
1869                   MachinePointerInfo(V), false, false, 0);
1870   // Load the actual argument out of the pointer VAList
1871   return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo(),
1872                  false, false, false, 0);
1873 }
1874 
expandVACopy(SDNode * Node)1875 SDValue SelectionDAG::expandVACopy(SDNode *Node) {
1876   SDLoc dl(Node);
1877   const TargetLowering &TLI = getTargetLoweringInfo();
1878   // This defaults to loading a pointer from the input and storing it to the
1879   // output, returning the chain.
1880   const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue();
1881   const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue();
1882   SDValue Tmp1 = getLoad(TLI.getPointerTy(getDataLayout()), dl,
1883                          Node->getOperand(0), Node->getOperand(2),
1884                          MachinePointerInfo(VS), false, false, false, 0);
1885   return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1),
1886                   MachinePointerInfo(VD), false, false, 0);
1887 }
1888 
1889 /// CreateStackTemporary - Create a stack temporary, suitable for holding the
1890 /// specified value type.
CreateStackTemporary(EVT VT,unsigned minAlign)1891 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
1892   MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
1893   unsigned ByteSize = VT.getStoreSize();
1894   Type *Ty = VT.getTypeForEVT(*getContext());
1895   unsigned StackAlign =
1896       std::max((unsigned)getDataLayout().getPrefTypeAlignment(Ty), minAlign);
1897 
1898   int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false);
1899   return getFrameIndex(FrameIdx, TLI->getPointerTy(getDataLayout()));
1900 }
1901 
1902 /// CreateStackTemporary - Create a stack temporary suitable for holding
1903 /// either of the specified value types.
CreateStackTemporary(EVT VT1,EVT VT2)1904 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
1905   unsigned Bytes = std::max(VT1.getStoreSize(), VT2.getStoreSize());
1906   Type *Ty1 = VT1.getTypeForEVT(*getContext());
1907   Type *Ty2 = VT2.getTypeForEVT(*getContext());
1908   const DataLayout &DL = getDataLayout();
1909   unsigned Align =
1910       std::max(DL.getPrefTypeAlignment(Ty1), DL.getPrefTypeAlignment(Ty2));
1911 
1912   MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
1913   int FrameIdx = FrameInfo->CreateStackObject(Bytes, Align, false);
1914   return getFrameIndex(FrameIdx, TLI->getPointerTy(getDataLayout()));
1915 }
1916 
FoldSetCC(EVT VT,SDValue N1,SDValue N2,ISD::CondCode Cond,SDLoc dl)1917 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1,
1918                                 SDValue N2, ISD::CondCode Cond, SDLoc dl) {
1919   // These setcc operations always fold.
1920   switch (Cond) {
1921   default: break;
1922   case ISD::SETFALSE:
1923   case ISD::SETFALSE2: return getConstant(0, dl, VT);
1924   case ISD::SETTRUE:
1925   case ISD::SETTRUE2: {
1926     TargetLowering::BooleanContent Cnt =
1927         TLI->getBooleanContents(N1->getValueType(0));
1928     return getConstant(
1929         Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, dl,
1930         VT);
1931   }
1932 
1933   case ISD::SETOEQ:
1934   case ISD::SETOGT:
1935   case ISD::SETOGE:
1936   case ISD::SETOLT:
1937   case ISD::SETOLE:
1938   case ISD::SETONE:
1939   case ISD::SETO:
1940   case ISD::SETUO:
1941   case ISD::SETUEQ:
1942   case ISD::SETUNE:
1943     assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!");
1944     break;
1945   }
1946 
1947   if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) {
1948     const APInt &C2 = N2C->getAPIntValue();
1949     if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) {
1950       const APInt &C1 = N1C->getAPIntValue();
1951 
1952       switch (Cond) {
1953       default: llvm_unreachable("Unknown integer setcc!");
1954       case ISD::SETEQ:  return getConstant(C1 == C2, dl, VT);
1955       case ISD::SETNE:  return getConstant(C1 != C2, dl, VT);
1956       case ISD::SETULT: return getConstant(C1.ult(C2), dl, VT);
1957       case ISD::SETUGT: return getConstant(C1.ugt(C2), dl, VT);
1958       case ISD::SETULE: return getConstant(C1.ule(C2), dl, VT);
1959       case ISD::SETUGE: return getConstant(C1.uge(C2), dl, VT);
1960       case ISD::SETLT:  return getConstant(C1.slt(C2), dl, VT);
1961       case ISD::SETGT:  return getConstant(C1.sgt(C2), dl, VT);
1962       case ISD::SETLE:  return getConstant(C1.sle(C2), dl, VT);
1963       case ISD::SETGE:  return getConstant(C1.sge(C2), dl, VT);
1964       }
1965     }
1966   }
1967   if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1)) {
1968     if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2)) {
1969       APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF());
1970       switch (Cond) {
1971       default: break;
1972       case ISD::SETEQ:  if (R==APFloat::cmpUnordered)
1973                           return getUNDEF(VT);
1974                         // fall through
1975       case ISD::SETOEQ: return getConstant(R==APFloat::cmpEqual, dl, VT);
1976       case ISD::SETNE:  if (R==APFloat::cmpUnordered)
1977                           return getUNDEF(VT);
1978                         // fall through
1979       case ISD::SETONE: return getConstant(R==APFloat::cmpGreaterThan ||
1980                                            R==APFloat::cmpLessThan, dl, VT);
1981       case ISD::SETLT:  if (R==APFloat::cmpUnordered)
1982                           return getUNDEF(VT);
1983                         // fall through
1984       case ISD::SETOLT: return getConstant(R==APFloat::cmpLessThan, dl, VT);
1985       case ISD::SETGT:  if (R==APFloat::cmpUnordered)
1986                           return getUNDEF(VT);
1987                         // fall through
1988       case ISD::SETOGT: return getConstant(R==APFloat::cmpGreaterThan, dl, VT);
1989       case ISD::SETLE:  if (R==APFloat::cmpUnordered)
1990                           return getUNDEF(VT);
1991                         // fall through
1992       case ISD::SETOLE: return getConstant(R==APFloat::cmpLessThan ||
1993                                            R==APFloat::cmpEqual, dl, VT);
1994       case ISD::SETGE:  if (R==APFloat::cmpUnordered)
1995                           return getUNDEF(VT);
1996                         // fall through
1997       case ISD::SETOGE: return getConstant(R==APFloat::cmpGreaterThan ||
1998                                            R==APFloat::cmpEqual, dl, VT);
1999       case ISD::SETO:   return getConstant(R!=APFloat::cmpUnordered, dl, VT);
2000       case ISD::SETUO:  return getConstant(R==APFloat::cmpUnordered, dl, VT);
2001       case ISD::SETUEQ: return getConstant(R==APFloat::cmpUnordered ||
2002                                            R==APFloat::cmpEqual, dl, VT);
2003       case ISD::SETUNE: return getConstant(R!=APFloat::cmpEqual, dl, VT);
2004       case ISD::SETULT: return getConstant(R==APFloat::cmpUnordered ||
2005                                            R==APFloat::cmpLessThan, dl, VT);
2006       case ISD::SETUGT: return getConstant(R==APFloat::cmpGreaterThan ||
2007                                            R==APFloat::cmpUnordered, dl, VT);
2008       case ISD::SETULE: return getConstant(R!=APFloat::cmpGreaterThan, dl, VT);
2009       case ISD::SETUGE: return getConstant(R!=APFloat::cmpLessThan, dl, VT);
2010       }
2011     } else {
2012       // Ensure that the constant occurs on the RHS.
2013       ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
2014       MVT CompVT = N1.getValueType().getSimpleVT();
2015       if (!TLI->isCondCodeLegal(SwappedCond, CompVT))
2016         return SDValue();
2017 
2018       return getSetCC(dl, VT, N2, N1, SwappedCond);
2019     }
2020   }
2021 
2022   // Could not fold it.
2023   return SDValue();
2024 }
2025 
2026 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero.  We
2027 /// use this predicate to simplify operations downstream.
SignBitIsZero(SDValue Op,unsigned Depth) const2028 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
2029   // This predicate is not safe for vector operations.
2030   if (Op.getValueType().isVector())
2031     return false;
2032 
2033   unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
2034   return MaskedValueIsZero(Op, APInt::getSignBit(BitWidth), Depth);
2035 }
2036 
2037 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero.  We use
2038 /// this predicate to simplify operations downstream.  Mask is known to be zero
2039 /// for bits that V cannot have.
MaskedValueIsZero(SDValue Op,const APInt & Mask,unsigned Depth) const2040 bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask,
2041                                      unsigned Depth) const {
2042   APInt KnownZero, KnownOne;
2043   computeKnownBits(Op, KnownZero, KnownOne, Depth);
2044   return (KnownZero & Mask) == Mask;
2045 }
2046 
2047 /// Determine which bits of Op are known to be either zero or one and return
2048 /// them in the KnownZero/KnownOne bitsets.
computeKnownBits(SDValue Op,APInt & KnownZero,APInt & KnownOne,unsigned Depth) const2049 void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
2050                                     APInt &KnownOne, unsigned Depth) const {
2051   unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
2052 
2053   KnownZero = KnownOne = APInt(BitWidth, 0);   // Don't know anything.
2054   if (Depth == 6)
2055     return;  // Limit search depth.
2056 
2057   APInt KnownZero2, KnownOne2;
2058 
2059   switch (Op.getOpcode()) {
2060   case ISD::Constant:
2061     // We know all of the bits for a constant!
2062     KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue();
2063     KnownZero = ~KnownOne;
2064     break;
2065   case ISD::AND:
2066     // If either the LHS or the RHS are Zero, the result is zero.
2067     computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
2068     computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
2069 
2070     // Output known-1 bits are only known if set in both the LHS & RHS.
2071     KnownOne &= KnownOne2;
2072     // Output known-0 are known to be clear if zero in either the LHS | RHS.
2073     KnownZero |= KnownZero2;
2074     break;
2075   case ISD::OR:
2076     computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
2077     computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
2078 
2079     // Output known-0 bits are only known if clear in both the LHS & RHS.
2080     KnownZero &= KnownZero2;
2081     // Output known-1 are known to be set if set in either the LHS | RHS.
2082     KnownOne |= KnownOne2;
2083     break;
2084   case ISD::XOR: {
2085     computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
2086     computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
2087 
2088     // Output known-0 bits are known if clear or set in both the LHS & RHS.
2089     APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
2090     // Output known-1 are known to be set if set in only one of the LHS, RHS.
2091     KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
2092     KnownZero = KnownZeroOut;
2093     break;
2094   }
2095   case ISD::MUL: {
2096     computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
2097     computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
2098 
2099     // If low bits are zero in either operand, output low known-0 bits.
2100     // Also compute a conserative estimate for high known-0 bits.
2101     // More trickiness is possible, but this is sufficient for the
2102     // interesting case of alignment computation.
2103     KnownOne.clearAllBits();
2104     unsigned TrailZ = KnownZero.countTrailingOnes() +
2105                       KnownZero2.countTrailingOnes();
2106     unsigned LeadZ =  std::max(KnownZero.countLeadingOnes() +
2107                                KnownZero2.countLeadingOnes(),
2108                                BitWidth) - BitWidth;
2109 
2110     TrailZ = std::min(TrailZ, BitWidth);
2111     LeadZ = std::min(LeadZ, BitWidth);
2112     KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
2113                 APInt::getHighBitsSet(BitWidth, LeadZ);
2114     break;
2115   }
2116   case ISD::UDIV: {
2117     // For the purposes of computing leading zeros we can conservatively
2118     // treat a udiv as a logical right shift by the power of 2 known to
2119     // be less than the denominator.
2120     computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
2121     unsigned LeadZ = KnownZero2.countLeadingOnes();
2122 
2123     KnownOne2.clearAllBits();
2124     KnownZero2.clearAllBits();
2125     computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2126     unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
2127     if (RHSUnknownLeadingOnes != BitWidth)
2128       LeadZ = std::min(BitWidth,
2129                        LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
2130 
2131     KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
2132     break;
2133   }
2134   case ISD::SELECT:
2135     computeKnownBits(Op.getOperand(2), KnownZero, KnownOne, Depth+1);
2136     computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2137 
2138     // Only known if known in both the LHS and RHS.
2139     KnownOne &= KnownOne2;
2140     KnownZero &= KnownZero2;
2141     break;
2142   case ISD::SELECT_CC:
2143     computeKnownBits(Op.getOperand(3), KnownZero, KnownOne, Depth+1);
2144     computeKnownBits(Op.getOperand(2), KnownZero2, KnownOne2, Depth+1);
2145 
2146     // Only known if known in both the LHS and RHS.
2147     KnownOne &= KnownOne2;
2148     KnownZero &= KnownZero2;
2149     break;
2150   case ISD::SADDO:
2151   case ISD::UADDO:
2152   case ISD::SSUBO:
2153   case ISD::USUBO:
2154   case ISD::SMULO:
2155   case ISD::UMULO:
2156     if (Op.getResNo() != 1)
2157       break;
2158     // The boolean result conforms to getBooleanContents.
2159     // If we know the result of a setcc has the top bits zero, use this info.
2160     // We know that we have an integer-based boolean since these operations
2161     // are only available for integer.
2162     if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
2163             TargetLowering::ZeroOrOneBooleanContent &&
2164         BitWidth > 1)
2165       KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
2166     break;
2167   case ISD::SETCC:
2168     // If we know the result of a setcc has the top bits zero, use this info.
2169     if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
2170             TargetLowering::ZeroOrOneBooleanContent &&
2171         BitWidth > 1)
2172       KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
2173     break;
2174   case ISD::SHL:
2175     // (shl X, C1) & C2 == 0   iff   (X & C2 >>u C1) == 0
2176     if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2177       unsigned ShAmt = SA->getZExtValue();
2178 
2179       // If the shift count is an invalid immediate, don't do anything.
2180       if (ShAmt >= BitWidth)
2181         break;
2182 
2183       computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2184       KnownZero <<= ShAmt;
2185       KnownOne  <<= ShAmt;
2186       // low bits known zero.
2187       KnownZero |= APInt::getLowBitsSet(BitWidth, ShAmt);
2188     }
2189     break;
2190   case ISD::SRL:
2191     // (ushr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
2192     if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2193       unsigned ShAmt = SA->getZExtValue();
2194 
2195       // If the shift count is an invalid immediate, don't do anything.
2196       if (ShAmt >= BitWidth)
2197         break;
2198 
2199       computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2200       KnownZero = KnownZero.lshr(ShAmt);
2201       KnownOne  = KnownOne.lshr(ShAmt);
2202 
2203       APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
2204       KnownZero |= HighBits;  // High bits known zero.
2205     }
2206     break;
2207   case ISD::SRA:
2208     if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2209       unsigned ShAmt = SA->getZExtValue();
2210 
2211       // If the shift count is an invalid immediate, don't do anything.
2212       if (ShAmt >= BitWidth)
2213         break;
2214 
2215       // If any of the demanded bits are produced by the sign extension, we also
2216       // demand the input sign bit.
2217       APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
2218 
2219       computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2220       KnownZero = KnownZero.lshr(ShAmt);
2221       KnownOne  = KnownOne.lshr(ShAmt);
2222 
2223       // Handle the sign bits.
2224       APInt SignBit = APInt::getSignBit(BitWidth);
2225       SignBit = SignBit.lshr(ShAmt);  // Adjust to where it is now in the mask.
2226 
2227       if (KnownZero.intersects(SignBit)) {
2228         KnownZero |= HighBits;  // New bits are known zero.
2229       } else if (KnownOne.intersects(SignBit)) {
2230         KnownOne  |= HighBits;  // New bits are known one.
2231       }
2232     }
2233     break;
2234   case ISD::SIGN_EXTEND_INREG: {
2235     EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2236     unsigned EBits = EVT.getScalarType().getSizeInBits();
2237 
2238     // Sign extension.  Compute the demanded bits in the result that are not
2239     // present in the input.
2240     APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits);
2241 
2242     APInt InSignBit = APInt::getSignBit(EBits);
2243     APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits);
2244 
2245     // If the sign extended bits are demanded, we know that the sign
2246     // bit is demanded.
2247     InSignBit = InSignBit.zext(BitWidth);
2248     if (NewBits.getBoolValue())
2249       InputDemandedBits |= InSignBit;
2250 
2251     computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2252     KnownOne &= InputDemandedBits;
2253     KnownZero &= InputDemandedBits;
2254 
2255     // If the sign bit of the input is known set or clear, then we know the
2256     // top bits of the result.
2257     if (KnownZero.intersects(InSignBit)) {         // Input sign bit known clear
2258       KnownZero |= NewBits;
2259       KnownOne  &= ~NewBits;
2260     } else if (KnownOne.intersects(InSignBit)) {   // Input sign bit known set
2261       KnownOne  |= NewBits;
2262       KnownZero &= ~NewBits;
2263     } else {                              // Input sign bit unknown
2264       KnownZero &= ~NewBits;
2265       KnownOne  &= ~NewBits;
2266     }
2267     break;
2268   }
2269   case ISD::CTTZ:
2270   case ISD::CTTZ_ZERO_UNDEF:
2271   case ISD::CTLZ:
2272   case ISD::CTLZ_ZERO_UNDEF:
2273   case ISD::CTPOP: {
2274     unsigned LowBits = Log2_32(BitWidth)+1;
2275     KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
2276     KnownOne.clearAllBits();
2277     break;
2278   }
2279   case ISD::LOAD: {
2280     LoadSDNode *LD = cast<LoadSDNode>(Op);
2281     // If this is a ZEXTLoad and we are looking at the loaded value.
2282     if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
2283       EVT VT = LD->getMemoryVT();
2284       unsigned MemBits = VT.getScalarType().getSizeInBits();
2285       KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
2286     } else if (const MDNode *Ranges = LD->getRanges()) {
2287       if (LD->getExtensionType() == ISD::NON_EXTLOAD)
2288         computeKnownBitsFromRangeMetadata(*Ranges, KnownZero, KnownOne);
2289     }
2290     break;
2291   }
2292   case ISD::ZERO_EXTEND: {
2293     EVT InVT = Op.getOperand(0).getValueType();
2294     unsigned InBits = InVT.getScalarType().getSizeInBits();
2295     APInt NewBits   = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
2296     KnownZero = KnownZero.trunc(InBits);
2297     KnownOne = KnownOne.trunc(InBits);
2298     computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2299     KnownZero = KnownZero.zext(BitWidth);
2300     KnownOne = KnownOne.zext(BitWidth);
2301     KnownZero |= NewBits;
2302     break;
2303   }
2304   case ISD::SIGN_EXTEND: {
2305     EVT InVT = Op.getOperand(0).getValueType();
2306     unsigned InBits = InVT.getScalarType().getSizeInBits();
2307     APInt NewBits   = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
2308 
2309     KnownZero = KnownZero.trunc(InBits);
2310     KnownOne = KnownOne.trunc(InBits);
2311     computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2312 
2313     // Note if the sign bit is known to be zero or one.
2314     bool SignBitKnownZero = KnownZero.isNegative();
2315     bool SignBitKnownOne  = KnownOne.isNegative();
2316 
2317     KnownZero = KnownZero.zext(BitWidth);
2318     KnownOne = KnownOne.zext(BitWidth);
2319 
2320     // If the sign bit is known zero or one, the top bits match.
2321     if (SignBitKnownZero)
2322       KnownZero |= NewBits;
2323     else if (SignBitKnownOne)
2324       KnownOne  |= NewBits;
2325     break;
2326   }
2327   case ISD::ANY_EXTEND: {
2328     EVT InVT = Op.getOperand(0).getValueType();
2329     unsigned InBits = InVT.getScalarType().getSizeInBits();
2330     KnownZero = KnownZero.trunc(InBits);
2331     KnownOne = KnownOne.trunc(InBits);
2332     computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2333     KnownZero = KnownZero.zext(BitWidth);
2334     KnownOne = KnownOne.zext(BitWidth);
2335     break;
2336   }
2337   case ISD::TRUNCATE: {
2338     EVT InVT = Op.getOperand(0).getValueType();
2339     unsigned InBits = InVT.getScalarType().getSizeInBits();
2340     KnownZero = KnownZero.zext(InBits);
2341     KnownOne = KnownOne.zext(InBits);
2342     computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2343     KnownZero = KnownZero.trunc(BitWidth);
2344     KnownOne = KnownOne.trunc(BitWidth);
2345     break;
2346   }
2347   case ISD::AssertZext: {
2348     EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2349     APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
2350     computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2351     KnownZero |= (~InMask);
2352     KnownOne  &= (~KnownZero);
2353     break;
2354   }
2355   case ISD::FGETSIGN:
2356     // All bits are zero except the low bit.
2357     KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - 1);
2358     break;
2359 
2360   case ISD::SUB: {
2361     if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0))) {
2362       // We know that the top bits of C-X are clear if X contains less bits
2363       // than C (i.e. no wrap-around can happen).  For example, 20-X is
2364       // positive if we can prove that X is >= 0 and < 16.
2365       if (CLHS->getAPIntValue().isNonNegative()) {
2366         unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros();
2367         // NLZ can't be BitWidth with no sign bit
2368         APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
2369         computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2370 
2371         // If all of the MaskV bits are known to be zero, then we know the
2372         // output top bits are zero, because we now know that the output is
2373         // from [0-C].
2374         if ((KnownZero2 & MaskV) == MaskV) {
2375           unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros();
2376           // Top bits known zero.
2377           KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2);
2378         }
2379       }
2380     }
2381   }
2382   // fall through
2383   case ISD::ADD:
2384   case ISD::ADDE: {
2385     // Output known-0 bits are known if clear or set in both the low clear bits
2386     // common to both LHS & RHS.  For example, 8+(X<<3) is known to have the
2387     // low 3 bits clear.
2388     // Output known-0 bits are also known if the top bits of each input are
2389     // known to be clear. For example, if one input has the top 10 bits clear
2390     // and the other has the top 8 bits clear, we know the top 7 bits of the
2391     // output must be clear.
2392     computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
2393     unsigned KnownZeroHigh = KnownZero2.countLeadingOnes();
2394     unsigned KnownZeroLow = KnownZero2.countTrailingOnes();
2395 
2396     computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2397     KnownZeroHigh = std::min(KnownZeroHigh,
2398                              KnownZero2.countLeadingOnes());
2399     KnownZeroLow = std::min(KnownZeroLow,
2400                             KnownZero2.countTrailingOnes());
2401 
2402     if (Op.getOpcode() == ISD::ADD) {
2403       KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroLow);
2404       if (KnownZeroHigh > 1)
2405         KnownZero |= APInt::getHighBitsSet(BitWidth, KnownZeroHigh - 1);
2406       break;
2407     }
2408 
2409     // With ADDE, a carry bit may be added in, so we can only use this
2410     // information if we know (at least) that the low two bits are clear.  We
2411     // then return to the caller that the low bit is unknown but that other bits
2412     // are known zero.
2413     if (KnownZeroLow >= 2) // ADDE
2414       KnownZero |= APInt::getBitsSet(BitWidth, 1, KnownZeroLow);
2415     break;
2416   }
2417   case ISD::SREM:
2418     if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2419       const APInt &RA = Rem->getAPIntValue().abs();
2420       if (RA.isPowerOf2()) {
2421         APInt LowBits = RA - 1;
2422         computeKnownBits(Op.getOperand(0), KnownZero2,KnownOne2,Depth+1);
2423 
2424         // The low bits of the first operand are unchanged by the srem.
2425         KnownZero = KnownZero2 & LowBits;
2426         KnownOne = KnownOne2 & LowBits;
2427 
2428         // If the first operand is non-negative or has all low bits zero, then
2429         // the upper bits are all zero.
2430         if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
2431           KnownZero |= ~LowBits;
2432 
2433         // If the first operand is negative and not all low bits are zero, then
2434         // the upper bits are all one.
2435         if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
2436           KnownOne |= ~LowBits;
2437         assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
2438       }
2439     }
2440     break;
2441   case ISD::UREM: {
2442     if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2443       const APInt &RA = Rem->getAPIntValue();
2444       if (RA.isPowerOf2()) {
2445         APInt LowBits = (RA - 1);
2446         computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth + 1);
2447 
2448         // The upper bits are all zero, the lower ones are unchanged.
2449         KnownZero = KnownZero2 | ~LowBits;
2450         KnownOne = KnownOne2 & LowBits;
2451         break;
2452       }
2453     }
2454 
2455     // Since the result is less than or equal to either operand, any leading
2456     // zero bits in either operand must also exist in the result.
2457     computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2458     computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2459 
2460     uint32_t Leaders = std::max(KnownZero.countLeadingOnes(),
2461                                 KnownZero2.countLeadingOnes());
2462     KnownOne.clearAllBits();
2463     KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
2464     break;
2465   }
2466   case ISD::EXTRACT_ELEMENT: {
2467     computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2468     const unsigned Index =
2469       cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
2470     const unsigned BitWidth = Op.getValueType().getSizeInBits();
2471 
2472     // Remove low part of known bits mask
2473     KnownZero = KnownZero.getHiBits(KnownZero.getBitWidth() - Index * BitWidth);
2474     KnownOne = KnownOne.getHiBits(KnownOne.getBitWidth() - Index * BitWidth);
2475 
2476     // Remove high part of known bit mask
2477     KnownZero = KnownZero.trunc(BitWidth);
2478     KnownOne = KnownOne.trunc(BitWidth);
2479     break;
2480   }
2481   case ISD::SMIN:
2482   case ISD::SMAX:
2483   case ISD::UMIN:
2484   case ISD::UMAX: {
2485     APInt Op0Zero, Op0One;
2486     APInt Op1Zero, Op1One;
2487     computeKnownBits(Op.getOperand(0), Op0Zero, Op0One, Depth);
2488     computeKnownBits(Op.getOperand(1), Op1Zero, Op1One, Depth);
2489 
2490     KnownZero = Op0Zero & Op1Zero;
2491     KnownOne = Op0One & Op1One;
2492     break;
2493   }
2494   case ISD::FrameIndex:
2495   case ISD::TargetFrameIndex:
2496     if (unsigned Align = InferPtrAlignment(Op)) {
2497       // The low bits are known zero if the pointer is aligned.
2498       KnownZero = APInt::getLowBitsSet(BitWidth, Log2_32(Align));
2499       break;
2500     }
2501     break;
2502 
2503   default:
2504     if (Op.getOpcode() < ISD::BUILTIN_OP_END)
2505       break;
2506     // Fallthrough
2507   case ISD::INTRINSIC_WO_CHAIN:
2508   case ISD::INTRINSIC_W_CHAIN:
2509   case ISD::INTRINSIC_VOID:
2510     // Allow the target to implement this method for its nodes.
2511     TLI->computeKnownBitsForTargetNode(Op, KnownZero, KnownOne, *this, Depth);
2512     break;
2513   }
2514 
2515   assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
2516 }
2517 
2518 /// ComputeNumSignBits - Return the number of times the sign bit of the
2519 /// register is replicated into the other bits.  We know that at least 1 bit
2520 /// is always equal to the sign bit (itself), but other cases can give us
2521 /// information.  For example, immediately after an "SRA X, 2", we know that
2522 /// the top 3 bits are all equal to each other, so we return 3.
ComputeNumSignBits(SDValue Op,unsigned Depth) const2523 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
2524   EVT VT = Op.getValueType();
2525   assert(VT.isInteger() && "Invalid VT!");
2526   unsigned VTBits = VT.getScalarType().getSizeInBits();
2527   unsigned Tmp, Tmp2;
2528   unsigned FirstAnswer = 1;
2529 
2530   if (Depth == 6)
2531     return 1;  // Limit search depth.
2532 
2533   switch (Op.getOpcode()) {
2534   default: break;
2535   case ISD::AssertSext:
2536     Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2537     return VTBits-Tmp+1;
2538   case ISD::AssertZext:
2539     Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2540     return VTBits-Tmp;
2541 
2542   case ISD::Constant: {
2543     const APInt &Val = cast<ConstantSDNode>(Op)->getAPIntValue();
2544     return Val.getNumSignBits();
2545   }
2546 
2547   case ISD::SIGN_EXTEND:
2548     Tmp =
2549         VTBits-Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
2550     return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp;
2551 
2552   case ISD::SIGN_EXTEND_INREG:
2553     // Max of the input and what this extends.
2554     Tmp =
2555       cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarType().getSizeInBits();
2556     Tmp = VTBits-Tmp+1;
2557 
2558     Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2559     return std::max(Tmp, Tmp2);
2560 
2561   case ISD::SRA:
2562     Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2563     // SRA X, C   -> adds C sign bits.
2564     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2565       Tmp += C->getZExtValue();
2566       if (Tmp > VTBits) Tmp = VTBits;
2567     }
2568     return Tmp;
2569   case ISD::SHL:
2570     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2571       // shl destroys sign bits.
2572       Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2573       if (C->getZExtValue() >= VTBits ||      // Bad shift.
2574           C->getZExtValue() >= Tmp) break;    // Shifted all sign bits out.
2575       return Tmp - C->getZExtValue();
2576     }
2577     break;
2578   case ISD::AND:
2579   case ISD::OR:
2580   case ISD::XOR:    // NOT is handled here.
2581     // Logical binary ops preserve the number of sign bits at the worst.
2582     Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2583     if (Tmp != 1) {
2584       Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2585       FirstAnswer = std::min(Tmp, Tmp2);
2586       // We computed what we know about the sign bits as our first
2587       // answer. Now proceed to the generic code that uses
2588       // computeKnownBits, and pick whichever answer is better.
2589     }
2590     break;
2591 
2592   case ISD::SELECT:
2593     Tmp = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2594     if (Tmp == 1) return 1;  // Early out.
2595     Tmp2 = ComputeNumSignBits(Op.getOperand(2), Depth+1);
2596     return std::min(Tmp, Tmp2);
2597   case ISD::SELECT_CC:
2598     Tmp = ComputeNumSignBits(Op.getOperand(2), Depth+1);
2599     if (Tmp == 1) return 1;  // Early out.
2600     Tmp2 = ComputeNumSignBits(Op.getOperand(3), Depth+1);
2601     return std::min(Tmp, Tmp2);
2602   case ISD::SMIN:
2603   case ISD::SMAX:
2604   case ISD::UMIN:
2605   case ISD::UMAX:
2606     Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
2607     if (Tmp == 1)
2608       return 1;  // Early out.
2609     Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1);
2610     return std::min(Tmp, Tmp2);
2611   case ISD::SADDO:
2612   case ISD::UADDO:
2613   case ISD::SSUBO:
2614   case ISD::USUBO:
2615   case ISD::SMULO:
2616   case ISD::UMULO:
2617     if (Op.getResNo() != 1)
2618       break;
2619     // The boolean result conforms to getBooleanContents.  Fall through.
2620     // If setcc returns 0/-1, all bits are sign bits.
2621     // We know that we have an integer-based boolean since these operations
2622     // are only available for integer.
2623     if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
2624         TargetLowering::ZeroOrNegativeOneBooleanContent)
2625       return VTBits;
2626     break;
2627   case ISD::SETCC:
2628     // If setcc returns 0/-1, all bits are sign bits.
2629     if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
2630         TargetLowering::ZeroOrNegativeOneBooleanContent)
2631       return VTBits;
2632     break;
2633   case ISD::ROTL:
2634   case ISD::ROTR:
2635     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2636       unsigned RotAmt = C->getZExtValue() & (VTBits-1);
2637 
2638       // Handle rotate right by N like a rotate left by 32-N.
2639       if (Op.getOpcode() == ISD::ROTR)
2640         RotAmt = (VTBits-RotAmt) & (VTBits-1);
2641 
2642       // If we aren't rotating out all of the known-in sign bits, return the
2643       // number that are left.  This handles rotl(sext(x), 1) for example.
2644       Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2645       if (Tmp > RotAmt+1) return Tmp-RotAmt;
2646     }
2647     break;
2648   case ISD::ADD:
2649     // Add can have at most one carry bit.  Thus we know that the output
2650     // is, at worst, one more bit than the inputs.
2651     Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2652     if (Tmp == 1) return 1;  // Early out.
2653 
2654     // Special case decrementing a value (ADD X, -1):
2655     if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
2656       if (CRHS->isAllOnesValue()) {
2657         APInt KnownZero, KnownOne;
2658         computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2659 
2660         // If the input is known to be 0 or 1, the output is 0/-1, which is all
2661         // sign bits set.
2662         if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
2663           return VTBits;
2664 
2665         // If we are subtracting one from a positive number, there is no carry
2666         // out of the result.
2667         if (KnownZero.isNegative())
2668           return Tmp;
2669       }
2670 
2671     Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2672     if (Tmp2 == 1) return 1;
2673     return std::min(Tmp, Tmp2)-1;
2674 
2675   case ISD::SUB:
2676     Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2677     if (Tmp2 == 1) return 1;
2678 
2679     // Handle NEG.
2680     if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0)))
2681       if (CLHS->isNullValue()) {
2682         APInt KnownZero, KnownOne;
2683         computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
2684         // If the input is known to be 0 or 1, the output is 0/-1, which is all
2685         // sign bits set.
2686         if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
2687           return VTBits;
2688 
2689         // If the input is known to be positive (the sign bit is known clear),
2690         // the output of the NEG has the same number of sign bits as the input.
2691         if (KnownZero.isNegative())
2692           return Tmp2;
2693 
2694         // Otherwise, we treat this like a SUB.
2695       }
2696 
2697     // Sub can have at most one carry bit.  Thus we know that the output
2698     // is, at worst, one more bit than the inputs.
2699     Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2700     if (Tmp == 1) return 1;  // Early out.
2701     return std::min(Tmp, Tmp2)-1;
2702   case ISD::TRUNCATE:
2703     // FIXME: it's tricky to do anything useful for this, but it is an important
2704     // case for targets like X86.
2705     break;
2706   case ISD::EXTRACT_ELEMENT: {
2707     const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2708     const int BitWidth = Op.getValueType().getSizeInBits();
2709     const int Items =
2710       Op.getOperand(0).getValueType().getSizeInBits() / BitWidth;
2711 
2712     // Get reverse index (starting from 1), Op1 value indexes elements from
2713     // little end. Sign starts at big end.
2714     const int rIndex = Items - 1 -
2715       cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
2716 
2717     // If the sign portion ends in our element the subtraction gives correct
2718     // result. Otherwise it gives either negative or > bitwidth result
2719     return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0);
2720   }
2721   }
2722 
2723   // If we are looking at the loaded value of the SDNode.
2724   if (Op.getResNo() == 0) {
2725     // Handle LOADX separately here. EXTLOAD case will fallthrough.
2726     if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
2727       unsigned ExtType = LD->getExtensionType();
2728       switch (ExtType) {
2729         default: break;
2730         case ISD::SEXTLOAD:    // '17' bits known
2731           Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
2732           return VTBits-Tmp+1;
2733         case ISD::ZEXTLOAD:    // '16' bits known
2734           Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
2735           return VTBits-Tmp;
2736       }
2737     }
2738   }
2739 
2740   // Allow the target to implement this method for its nodes.
2741   if (Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2742       Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2743       Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2744       Op.getOpcode() == ISD::INTRINSIC_VOID) {
2745     unsigned NumBits = TLI->ComputeNumSignBitsForTargetNode(Op, *this, Depth);
2746     if (NumBits > 1) FirstAnswer = std::max(FirstAnswer, NumBits);
2747   }
2748 
2749   // Finally, if we can prove that the top bits of the result are 0's or 1's,
2750   // use this information.
2751   APInt KnownZero, KnownOne;
2752   computeKnownBits(Op, KnownZero, KnownOne, Depth);
2753 
2754   APInt Mask;
2755   if (KnownZero.isNegative()) {        // sign bit is 0
2756     Mask = KnownZero;
2757   } else if (KnownOne.isNegative()) {  // sign bit is 1;
2758     Mask = KnownOne;
2759   } else {
2760     // Nothing known.
2761     return FirstAnswer;
2762   }
2763 
2764   // Okay, we know that the sign bit in Mask is set.  Use CLZ to determine
2765   // the number of identical bits in the top of the input value.
2766   Mask = ~Mask;
2767   Mask <<= Mask.getBitWidth()-VTBits;
2768   // Return # leading zeros.  We use 'min' here in case Val was zero before
2769   // shifting.  We don't want to return '64' as for an i32 "0".
2770   return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros()));
2771 }
2772 
2773 /// isBaseWithConstantOffset - Return true if the specified operand is an
2774 /// ISD::ADD with a ConstantSDNode on the right-hand side, or if it is an
2775 /// ISD::OR with a ConstantSDNode that is guaranteed to have the same
2776 /// semantics as an ADD.  This handles the equivalence:
2777 ///     X|Cst == X+Cst iff X&Cst = 0.
isBaseWithConstantOffset(SDValue Op) const2778 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
2779   if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
2780       !isa<ConstantSDNode>(Op.getOperand(1)))
2781     return false;
2782 
2783   if (Op.getOpcode() == ISD::OR &&
2784       !MaskedValueIsZero(Op.getOperand(0),
2785                      cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue()))
2786     return false;
2787 
2788   return true;
2789 }
2790 
2791 
isKnownNeverNaN(SDValue Op) const2792 bool SelectionDAG::isKnownNeverNaN(SDValue Op) const {
2793   // If we're told that NaNs won't happen, assume they won't.
2794   if (getTarget().Options.NoNaNsFPMath)
2795     return true;
2796 
2797   // If the value is a constant, we can obviously see if it is a NaN or not.
2798   if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
2799     return !C->getValueAPF().isNaN();
2800 
2801   // TODO: Recognize more cases here.
2802 
2803   return false;
2804 }
2805 
isKnownNeverZero(SDValue Op) const2806 bool SelectionDAG::isKnownNeverZero(SDValue Op) const {
2807   // If the value is a constant, we can obviously see if it is a zero or not.
2808   if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
2809     return !C->isZero();
2810 
2811   // TODO: Recognize more cases here.
2812   switch (Op.getOpcode()) {
2813   default: break;
2814   case ISD::OR:
2815     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
2816       return !C->isNullValue();
2817     break;
2818   }
2819 
2820   return false;
2821 }
2822 
isEqualTo(SDValue A,SDValue B) const2823 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const {
2824   // Check the obvious case.
2825   if (A == B) return true;
2826 
2827   // For for negative and positive zero.
2828   if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A))
2829     if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B))
2830       if (CA->isZero() && CB->isZero()) return true;
2831 
2832   // Otherwise they may not be equal.
2833   return false;
2834 }
2835 
haveNoCommonBitsSet(SDValue A,SDValue B) const2836 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const {
2837   assert(A.getValueType() == B.getValueType() &&
2838          "Values must have the same type");
2839   APInt AZero, AOne;
2840   APInt BZero, BOne;
2841   computeKnownBits(A, AZero, AOne);
2842   computeKnownBits(B, BZero, BOne);
2843   return (AZero | BZero).isAllOnesValue();
2844 }
2845 
2846 /// getNode - Gets or creates the specified node.
2847 ///
getNode(unsigned Opcode,SDLoc DL,EVT VT)2848 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT) {
2849   FoldingSetNodeID ID;
2850   AddNodeIDNode(ID, Opcode, getVTList(VT), None);
2851   void *IP = nullptr;
2852   if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP))
2853     return SDValue(E, 0);
2854 
2855   SDNode *N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(),
2856                                          DL.getDebugLoc(), getVTList(VT));
2857   CSEMap.InsertNode(N, IP);
2858 
2859   InsertNode(N);
2860   return SDValue(N, 0);
2861 }
2862 
getNode(unsigned Opcode,SDLoc DL,EVT VT,SDValue Operand)2863 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
2864                               EVT VT, SDValue Operand) {
2865   // Constant fold unary operations with an integer constant operand. Even
2866   // opaque constant will be folded, because the folding of unary operations
2867   // doesn't create new constants with different values. Nevertheless, the
2868   // opaque flag is preserved during folding to prevent future folding with
2869   // other constants.
2870   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) {
2871     const APInt &Val = C->getAPIntValue();
2872     switch (Opcode) {
2873     default: break;
2874     case ISD::SIGN_EXTEND:
2875       return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT,
2876                          C->isTargetOpcode(), C->isOpaque());
2877     case ISD::ANY_EXTEND:
2878     case ISD::ZERO_EXTEND:
2879     case ISD::TRUNCATE:
2880       return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,
2881                          C->isTargetOpcode(), C->isOpaque());
2882     case ISD::UINT_TO_FP:
2883     case ISD::SINT_TO_FP: {
2884       APFloat apf(EVTToAPFloatSemantics(VT),
2885                   APInt::getNullValue(VT.getSizeInBits()));
2886       (void)apf.convertFromAPInt(Val,
2887                                  Opcode==ISD::SINT_TO_FP,
2888                                  APFloat::rmNearestTiesToEven);
2889       return getConstantFP(apf, DL, VT);
2890     }
2891     case ISD::BITCAST:
2892       if (VT == MVT::f16 && C->getValueType(0) == MVT::i16)
2893         return getConstantFP(APFloat(APFloat::IEEEhalf, Val), DL, VT);
2894       if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
2895         return getConstantFP(APFloat(APFloat::IEEEsingle, Val), DL, VT);
2896       if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
2897         return getConstantFP(APFloat(APFloat::IEEEdouble, Val), DL, VT);
2898       if (VT == MVT::f128 && C->getValueType(0) == MVT::i128)
2899         return getConstantFP(APFloat(APFloat::IEEEquad, Val), DL, VT);
2900       break;
2901     case ISD::BSWAP:
2902       return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(),
2903                          C->isOpaque());
2904     case ISD::CTPOP:
2905       return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(),
2906                          C->isOpaque());
2907     case ISD::CTLZ:
2908     case ISD::CTLZ_ZERO_UNDEF:
2909       return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(),
2910                          C->isOpaque());
2911     case ISD::CTTZ:
2912     case ISD::CTTZ_ZERO_UNDEF:
2913       return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(),
2914                          C->isOpaque());
2915     }
2916   }
2917 
2918   // Constant fold unary operations with a floating point constant operand.
2919   if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) {
2920     APFloat V = C->getValueAPF();    // make copy
2921     switch (Opcode) {
2922     case ISD::FNEG:
2923       V.changeSign();
2924       return getConstantFP(V, DL, VT);
2925     case ISD::FABS:
2926       V.clearSign();
2927       return getConstantFP(V, DL, VT);
2928     case ISD::FCEIL: {
2929       APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
2930       if (fs == APFloat::opOK || fs == APFloat::opInexact)
2931         return getConstantFP(V, DL, VT);
2932       break;
2933     }
2934     case ISD::FTRUNC: {
2935       APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
2936       if (fs == APFloat::opOK || fs == APFloat::opInexact)
2937         return getConstantFP(V, DL, VT);
2938       break;
2939     }
2940     case ISD::FFLOOR: {
2941       APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
2942       if (fs == APFloat::opOK || fs == APFloat::opInexact)
2943         return getConstantFP(V, DL, VT);
2944       break;
2945     }
2946     case ISD::FP_EXTEND: {
2947       bool ignored;
2948       // This can return overflow, underflow, or inexact; we don't care.
2949       // FIXME need to be more flexible about rounding mode.
2950       (void)V.convert(EVTToAPFloatSemantics(VT),
2951                       APFloat::rmNearestTiesToEven, &ignored);
2952       return getConstantFP(V, DL, VT);
2953     }
2954     case ISD::FP_TO_SINT:
2955     case ISD::FP_TO_UINT: {
2956       integerPart x[2];
2957       bool ignored;
2958       static_assert(integerPartWidth >= 64, "APFloat parts too small!");
2959       // FIXME need to be more flexible about rounding mode.
2960       APFloat::opStatus s = V.convertToInteger(x, VT.getSizeInBits(),
2961                             Opcode==ISD::FP_TO_SINT,
2962                             APFloat::rmTowardZero, &ignored);
2963       if (s==APFloat::opInvalidOp)     // inexact is OK, in fact usual
2964         break;
2965       APInt api(VT.getSizeInBits(), x);
2966       return getConstant(api, DL, VT);
2967     }
2968     case ISD::BITCAST:
2969       if (VT == MVT::i16 && C->getValueType(0) == MVT::f16)
2970         return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
2971       else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
2972         return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
2973       else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
2974         return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT);
2975       break;
2976     }
2977   }
2978 
2979   // Constant fold unary operations with a vector integer or float operand.
2980   if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) {
2981     if (BV->isConstant()) {
2982       switch (Opcode) {
2983       default:
2984         // FIXME: Entirely reasonable to perform folding of other unary
2985         // operations here as the need arises.
2986         break;
2987       case ISD::FNEG:
2988       case ISD::FABS:
2989       case ISD::FCEIL:
2990       case ISD::FTRUNC:
2991       case ISD::FFLOOR:
2992       case ISD::FP_EXTEND:
2993       case ISD::FP_TO_SINT:
2994       case ISD::FP_TO_UINT:
2995       case ISD::TRUNCATE:
2996       case ISD::UINT_TO_FP:
2997       case ISD::SINT_TO_FP:
2998       case ISD::BSWAP:
2999       case ISD::CTLZ:
3000       case ISD::CTLZ_ZERO_UNDEF:
3001       case ISD::CTTZ:
3002       case ISD::CTTZ_ZERO_UNDEF:
3003       case ISD::CTPOP: {
3004         SDValue Ops = { Operand };
3005         if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops))
3006           return Fold;
3007       }
3008       }
3009     }
3010   }
3011 
3012   unsigned OpOpcode = Operand.getNode()->getOpcode();
3013   switch (Opcode) {
3014   case ISD::TokenFactor:
3015   case ISD::MERGE_VALUES:
3016   case ISD::CONCAT_VECTORS:
3017     return Operand;         // Factor, merge or concat of one node?  No need.
3018   case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
3019   case ISD::FP_EXTEND:
3020     assert(VT.isFloatingPoint() &&
3021            Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
3022     if (Operand.getValueType() == VT) return Operand;  // noop conversion.
3023     assert((!VT.isVector() ||
3024             VT.getVectorNumElements() ==
3025             Operand.getValueType().getVectorNumElements()) &&
3026            "Vector element count mismatch!");
3027     assert(Operand.getValueType().bitsLT(VT) &&
3028            "Invalid fpext node, dst < src!");
3029     if (Operand.getOpcode() == ISD::UNDEF)
3030       return getUNDEF(VT);
3031     break;
3032   case ISD::SIGN_EXTEND:
3033     assert(VT.isInteger() && Operand.getValueType().isInteger() &&
3034            "Invalid SIGN_EXTEND!");
3035     if (Operand.getValueType() == VT) return Operand;   // noop extension
3036     assert((!VT.isVector() ||
3037             VT.getVectorNumElements() ==
3038             Operand.getValueType().getVectorNumElements()) &&
3039            "Vector element count mismatch!");
3040     assert(Operand.getValueType().bitsLT(VT) &&
3041            "Invalid sext node, dst < src!");
3042     if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
3043       return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
3044     else if (OpOpcode == ISD::UNDEF)
3045       // sext(undef) = 0, because the top bits will all be the same.
3046       return getConstant(0, DL, VT);
3047     break;
3048   case ISD::ZERO_EXTEND:
3049     assert(VT.isInteger() && Operand.getValueType().isInteger() &&
3050            "Invalid ZERO_EXTEND!");
3051     if (Operand.getValueType() == VT) return Operand;   // noop extension
3052     assert((!VT.isVector() ||
3053             VT.getVectorNumElements() ==
3054             Operand.getValueType().getVectorNumElements()) &&
3055            "Vector element count mismatch!");
3056     assert(Operand.getValueType().bitsLT(VT) &&
3057            "Invalid zext node, dst < src!");
3058     if (OpOpcode == ISD::ZERO_EXTEND)   // (zext (zext x)) -> (zext x)
3059       return getNode(ISD::ZERO_EXTEND, DL, VT,
3060                      Operand.getNode()->getOperand(0));
3061     else if (OpOpcode == ISD::UNDEF)
3062       // zext(undef) = 0, because the top bits will be zero.
3063       return getConstant(0, DL, VT);
3064     break;
3065   case ISD::ANY_EXTEND:
3066     assert(VT.isInteger() && Operand.getValueType().isInteger() &&
3067            "Invalid ANY_EXTEND!");
3068     if (Operand.getValueType() == VT) return Operand;   // noop extension
3069     assert((!VT.isVector() ||
3070             VT.getVectorNumElements() ==
3071             Operand.getValueType().getVectorNumElements()) &&
3072            "Vector element count mismatch!");
3073     assert(Operand.getValueType().bitsLT(VT) &&
3074            "Invalid anyext node, dst < src!");
3075 
3076     if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
3077         OpOpcode == ISD::ANY_EXTEND)
3078       // (ext (zext x)) -> (zext x)  and  (ext (sext x)) -> (sext x)
3079       return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
3080     else if (OpOpcode == ISD::UNDEF)
3081       return getUNDEF(VT);
3082 
3083     // (ext (trunx x)) -> x
3084     if (OpOpcode == ISD::TRUNCATE) {
3085       SDValue OpOp = Operand.getNode()->getOperand(0);
3086       if (OpOp.getValueType() == VT)
3087         return OpOp;
3088     }
3089     break;
3090   case ISD::TRUNCATE:
3091     assert(VT.isInteger() && Operand.getValueType().isInteger() &&
3092            "Invalid TRUNCATE!");
3093     if (Operand.getValueType() == VT) return Operand;   // noop truncate
3094     assert((!VT.isVector() ||
3095             VT.getVectorNumElements() ==
3096             Operand.getValueType().getVectorNumElements()) &&
3097            "Vector element count mismatch!");
3098     assert(Operand.getValueType().bitsGT(VT) &&
3099            "Invalid truncate node, src < dst!");
3100     if (OpOpcode == ISD::TRUNCATE)
3101       return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
3102     if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
3103         OpOpcode == ISD::ANY_EXTEND) {
3104       // If the source is smaller than the dest, we still need an extend.
3105       if (Operand.getNode()->getOperand(0).getValueType().getScalarType()
3106             .bitsLT(VT.getScalarType()))
3107         return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
3108       if (Operand.getNode()->getOperand(0).getValueType().bitsGT(VT))
3109         return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
3110       return Operand.getNode()->getOperand(0);
3111     }
3112     if (OpOpcode == ISD::UNDEF)
3113       return getUNDEF(VT);
3114     break;
3115   case ISD::BSWAP:
3116     assert(VT.isInteger() && VT == Operand.getValueType() &&
3117            "Invalid BSWAP!");
3118     assert((VT.getScalarSizeInBits() % 16 == 0) &&
3119            "BSWAP types must be a multiple of 16 bits!");
3120     if (OpOpcode == ISD::UNDEF)
3121       return getUNDEF(VT);
3122     break;
3123   case ISD::BITCAST:
3124     // Basic sanity checking.
3125     assert(VT.getSizeInBits() == Operand.getValueType().getSizeInBits()
3126            && "Cannot BITCAST between types of different sizes!");
3127     if (VT == Operand.getValueType()) return Operand;  // noop conversion.
3128     if (OpOpcode == ISD::BITCAST)  // bitconv(bitconv(x)) -> bitconv(x)
3129       return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
3130     if (OpOpcode == ISD::UNDEF)
3131       return getUNDEF(VT);
3132     break;
3133   case ISD::SCALAR_TO_VECTOR:
3134     assert(VT.isVector() && !Operand.getValueType().isVector() &&
3135            (VT.getVectorElementType() == Operand.getValueType() ||
3136             (VT.getVectorElementType().isInteger() &&
3137              Operand.getValueType().isInteger() &&
3138              VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&
3139            "Illegal SCALAR_TO_VECTOR node!");
3140     if (OpOpcode == ISD::UNDEF)
3141       return getUNDEF(VT);
3142     // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
3143     if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
3144         isa<ConstantSDNode>(Operand.getOperand(1)) &&
3145         Operand.getConstantOperandVal(1) == 0 &&
3146         Operand.getOperand(0).getValueType() == VT)
3147       return Operand.getOperand(0);
3148     break;
3149   case ISD::FNEG:
3150     // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0
3151     if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB)
3152       // FIXME: FNEG has no fast-math-flags to propagate; use the FSUB's flags?
3153       return getNode(ISD::FSUB, DL, VT, Operand.getNode()->getOperand(1),
3154                        Operand.getNode()->getOperand(0),
3155                        &cast<BinaryWithFlagsSDNode>(Operand.getNode())->Flags);
3156     if (OpOpcode == ISD::FNEG)  // --X -> X
3157       return Operand.getNode()->getOperand(0);
3158     break;
3159   case ISD::FABS:
3160     if (OpOpcode == ISD::FNEG)  // abs(-X) -> abs(X)
3161       return getNode(ISD::FABS, DL, VT, Operand.getNode()->getOperand(0));
3162     break;
3163   }
3164 
3165   SDNode *N;
3166   SDVTList VTs = getVTList(VT);
3167   if (VT != MVT::Glue) { // Don't CSE flag producing nodes
3168     FoldingSetNodeID ID;
3169     SDValue Ops[1] = { Operand };
3170     AddNodeIDNode(ID, Opcode, VTs, Ops);
3171     void *IP = nullptr;
3172     if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP))
3173       return SDValue(E, 0);
3174 
3175     N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
3176                                         DL.getDebugLoc(), VTs, Operand);
3177     CSEMap.InsertNode(N, IP);
3178   } else {
3179     N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
3180                                         DL.getDebugLoc(), VTs, Operand);
3181   }
3182 
3183   InsertNode(N);
3184   return SDValue(N, 0);
3185 }
3186 
FoldValue(unsigned Opcode,const APInt & C1,const APInt & C2)3187 static std::pair<APInt, bool> FoldValue(unsigned Opcode, const APInt &C1,
3188                                         const APInt &C2) {
3189   switch (Opcode) {
3190   case ISD::ADD:  return std::make_pair(C1 + C2, true);
3191   case ISD::SUB:  return std::make_pair(C1 - C2, true);
3192   case ISD::MUL:  return std::make_pair(C1 * C2, true);
3193   case ISD::AND:  return std::make_pair(C1 & C2, true);
3194   case ISD::OR:   return std::make_pair(C1 | C2, true);
3195   case ISD::XOR:  return std::make_pair(C1 ^ C2, true);
3196   case ISD::SHL:  return std::make_pair(C1 << C2, true);
3197   case ISD::SRL:  return std::make_pair(C1.lshr(C2), true);
3198   case ISD::SRA:  return std::make_pair(C1.ashr(C2), true);
3199   case ISD::ROTL: return std::make_pair(C1.rotl(C2), true);
3200   case ISD::ROTR: return std::make_pair(C1.rotr(C2), true);
3201   case ISD::SMIN: return std::make_pair(C1.sle(C2) ? C1 : C2, true);
3202   case ISD::SMAX: return std::make_pair(C1.sge(C2) ? C1 : C2, true);
3203   case ISD::UMIN: return std::make_pair(C1.ule(C2) ? C1 : C2, true);
3204   case ISD::UMAX: return std::make_pair(C1.uge(C2) ? C1 : C2, true);
3205   case ISD::UDIV:
3206     if (!C2.getBoolValue())
3207       break;
3208     return std::make_pair(C1.udiv(C2), true);
3209   case ISD::UREM:
3210     if (!C2.getBoolValue())
3211       break;
3212     return std::make_pair(C1.urem(C2), true);
3213   case ISD::SDIV:
3214     if (!C2.getBoolValue())
3215       break;
3216     return std::make_pair(C1.sdiv(C2), true);
3217   case ISD::SREM:
3218     if (!C2.getBoolValue())
3219       break;
3220     return std::make_pair(C1.srem(C2), true);
3221   }
3222   return std::make_pair(APInt(1, 0), false);
3223 }
3224 
FoldConstantArithmetic(unsigned Opcode,SDLoc DL,EVT VT,const ConstantSDNode * Cst1,const ConstantSDNode * Cst2)3225 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, SDLoc DL, EVT VT,
3226                                              const ConstantSDNode *Cst1,
3227                                              const ConstantSDNode *Cst2) {
3228   if (Cst1->isOpaque() || Cst2->isOpaque())
3229     return SDValue();
3230 
3231   std::pair<APInt, bool> Folded = FoldValue(Opcode, Cst1->getAPIntValue(),
3232                                             Cst2->getAPIntValue());
3233   if (!Folded.second)
3234     return SDValue();
3235   return getConstant(Folded.first, DL, VT);
3236 }
3237 
FoldConstantArithmetic(unsigned Opcode,SDLoc DL,EVT VT,SDNode * Cst1,SDNode * Cst2)3238 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, SDLoc DL, EVT VT,
3239                                              SDNode *Cst1, SDNode *Cst2) {
3240   // If the opcode is a target-specific ISD node, there's nothing we can
3241   // do here and the operand rules may not line up with the below, so
3242   // bail early.
3243   if (Opcode >= ISD::BUILTIN_OP_END)
3244     return SDValue();
3245 
3246   // Handle the case of two scalars.
3247   if (const ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1)) {
3248     if (const ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2)) {
3249       if (SDValue Folded =
3250           FoldConstantArithmetic(Opcode, DL, VT, Scalar1, Scalar2)) {
3251         if (!VT.isVector())
3252           return Folded;
3253         SmallVector<SDValue, 4> Outputs;
3254         // We may have a vector type but a scalar result. Create a splat.
3255         Outputs.resize(VT.getVectorNumElements(), Outputs.back());
3256         // Build a big vector out of the scalar elements we generated.
3257         return getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Outputs);
3258       } else {
3259         return SDValue();
3260       }
3261     }
3262   }
3263 
3264   // For vectors extract each constant element into Inputs so we can constant
3265   // fold them individually.
3266   BuildVectorSDNode *BV1 = dyn_cast<BuildVectorSDNode>(Cst1);
3267   BuildVectorSDNode *BV2 = dyn_cast<BuildVectorSDNode>(Cst2);
3268   if (!BV1 || !BV2)
3269     return SDValue();
3270 
3271   assert(BV1->getNumOperands() == BV2->getNumOperands() && "Out of sync!");
3272 
3273   EVT SVT = VT.getScalarType();
3274   SmallVector<SDValue, 4> Outputs;
3275   for (unsigned I = 0, E = BV1->getNumOperands(); I != E; ++I) {
3276     ConstantSDNode *V1 = dyn_cast<ConstantSDNode>(BV1->getOperand(I));
3277     ConstantSDNode *V2 = dyn_cast<ConstantSDNode>(BV2->getOperand(I));
3278     if (!V1 || !V2) // Not a constant, bail.
3279       return SDValue();
3280 
3281     if (V1->isOpaque() || V2->isOpaque())
3282       return SDValue();
3283 
3284     // Avoid BUILD_VECTOR nodes that perform implicit truncation.
3285     // FIXME: This is valid and could be handled by truncating the APInts.
3286     if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT)
3287       return SDValue();
3288 
3289     // Fold one vector element.
3290     std::pair<APInt, bool> Folded = FoldValue(Opcode, V1->getAPIntValue(),
3291                                               V2->getAPIntValue());
3292     if (!Folded.second)
3293       return SDValue();
3294     Outputs.push_back(getConstant(Folded.first, DL, SVT));
3295   }
3296 
3297   assert(VT.getVectorNumElements() == Outputs.size() &&
3298          "Vector size mismatch!");
3299 
3300   // We may have a vector type but a scalar result. Create a splat.
3301   Outputs.resize(VT.getVectorNumElements(), Outputs.back());
3302 
3303   // Build a big vector out of the scalar elements we generated.
3304   return getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Outputs);
3305 }
3306 
FoldConstantVectorArithmetic(unsigned Opcode,SDLoc DL,EVT VT,ArrayRef<SDValue> Ops,const SDNodeFlags * Flags)3307 SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode, SDLoc DL,
3308                                                    EVT VT,
3309                                                    ArrayRef<SDValue> Ops,
3310                                                    const SDNodeFlags *Flags) {
3311   // If the opcode is a target-specific ISD node, there's nothing we can
3312   // do here and the operand rules may not line up with the below, so
3313   // bail early.
3314   if (Opcode >= ISD::BUILTIN_OP_END)
3315     return SDValue();
3316 
3317   // We can only fold vectors - maybe merge with FoldConstantArithmetic someday?
3318   if (!VT.isVector())
3319     return SDValue();
3320 
3321   unsigned NumElts = VT.getVectorNumElements();
3322 
3323   auto IsScalarOrSameVectorSize = [&](const SDValue &Op) {
3324     return !Op.getValueType().isVector() ||
3325            Op.getValueType().getVectorNumElements() == NumElts;
3326   };
3327 
3328   auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) {
3329     BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op);
3330     return (Op.getOpcode() == ISD::UNDEF) ||
3331            (Op.getOpcode() == ISD::CONDCODE) || (BV && BV->isConstant());
3332   };
3333 
3334   // All operands must be vector types with the same number of elements as
3335   // the result type and must be either UNDEF or a build vector of constant
3336   // or UNDEF scalars.
3337   if (!std::all_of(Ops.begin(), Ops.end(), IsConstantBuildVectorOrUndef) ||
3338       !std::all_of(Ops.begin(), Ops.end(), IsScalarOrSameVectorSize))
3339     return SDValue();
3340 
3341   // If we are comparing vectors, then the result needs to be a i1 boolean
3342   // that is then sign-extended back to the legal result type.
3343   EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType());
3344 
3345   // Find legal integer scalar type for constant promotion and
3346   // ensure that its scalar size is at least as large as source.
3347   EVT LegalSVT = VT.getScalarType();
3348   if (LegalSVT.isInteger()) {
3349     LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
3350     if (LegalSVT.bitsLT(SVT))
3351       return SDValue();
3352   }
3353 
3354   // Constant fold each scalar lane separately.
3355   SmallVector<SDValue, 4> ScalarResults;
3356   for (unsigned i = 0; i != NumElts; i++) {
3357     SmallVector<SDValue, 4> ScalarOps;
3358     for (SDValue Op : Ops) {
3359       EVT InSVT = Op.getValueType().getScalarType();
3360       BuildVectorSDNode *InBV = dyn_cast<BuildVectorSDNode>(Op);
3361       if (!InBV) {
3362         // We've checked that this is UNDEF or a constant of some kind.
3363         if (Op.isUndef())
3364           ScalarOps.push_back(getUNDEF(InSVT));
3365         else
3366           ScalarOps.push_back(Op);
3367         continue;
3368       }
3369 
3370       SDValue ScalarOp = InBV->getOperand(i);
3371       EVT ScalarVT = ScalarOp.getValueType();
3372 
3373       // Build vector (integer) scalar operands may need implicit
3374       // truncation - do this before constant folding.
3375       if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT))
3376         ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp);
3377 
3378       ScalarOps.push_back(ScalarOp);
3379     }
3380 
3381     // Constant fold the scalar operands.
3382     SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags);
3383 
3384     // Legalize the (integer) scalar constant if necessary.
3385     if (LegalSVT != SVT)
3386       ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult);
3387 
3388     // Scalar folding only succeeded if the result is a constant or UNDEF.
3389     if (ScalarResult.getOpcode() != ISD::UNDEF &&
3390         ScalarResult.getOpcode() != ISD::Constant &&
3391         ScalarResult.getOpcode() != ISD::ConstantFP)
3392       return SDValue();
3393     ScalarResults.push_back(ScalarResult);
3394   }
3395 
3396   assert(ScalarResults.size() == NumElts &&
3397          "Unexpected number of scalar results for BUILD_VECTOR");
3398   return getNode(ISD::BUILD_VECTOR, DL, VT, ScalarResults);
3399 }
3400 
getNode(unsigned Opcode,SDLoc DL,EVT VT,SDValue N1,SDValue N2,const SDNodeFlags * Flags)3401 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1,
3402                               SDValue N2, const SDNodeFlags *Flags) {
3403   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
3404   ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
3405   ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
3406   ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
3407 
3408   // Canonicalize constant to RHS if commutative.
3409   if (isCommutativeBinOp(Opcode)) {
3410     if (N1C && !N2C) {
3411       std::swap(N1C, N2C);
3412       std::swap(N1, N2);
3413     } else if (N1CFP && !N2CFP) {
3414       std::swap(N1CFP, N2CFP);
3415       std::swap(N1, N2);
3416     }
3417   }
3418 
3419   switch (Opcode) {
3420   default: break;
3421   case ISD::TokenFactor:
3422     assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
3423            N2.getValueType() == MVT::Other && "Invalid token factor!");
3424     // Fold trivial token factors.
3425     if (N1.getOpcode() == ISD::EntryToken) return N2;
3426     if (N2.getOpcode() == ISD::EntryToken) return N1;
3427     if (N1 == N2) return N1;
3428     break;
3429   case ISD::CONCAT_VECTORS:
3430     // Concat of UNDEFs is UNDEF.
3431     if (N1.getOpcode() == ISD::UNDEF &&
3432         N2.getOpcode() == ISD::UNDEF)
3433       return getUNDEF(VT);
3434 
3435     // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
3436     // one big BUILD_VECTOR.
3437     if (N1.getOpcode() == ISD::BUILD_VECTOR &&
3438         N2.getOpcode() == ISD::BUILD_VECTOR) {
3439       SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
3440                                     N1.getNode()->op_end());
3441       Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
3442 
3443       // BUILD_VECTOR requires all inputs to be of the same type, find the
3444       // maximum type and extend them all.
3445       EVT SVT = VT.getScalarType();
3446       for (SDValue Op : Elts)
3447         SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT);
3448       if (SVT.bitsGT(VT.getScalarType()))
3449         for (SDValue &Op : Elts)
3450           Op = TLI->isZExtFree(Op.getValueType(), SVT)
3451              ? getZExtOrTrunc(Op, DL, SVT)
3452              : getSExtOrTrunc(Op, DL, SVT);
3453 
3454       return getNode(ISD::BUILD_VECTOR, DL, VT, Elts);
3455     }
3456     break;
3457   case ISD::AND:
3458     assert(VT.isInteger() && "This operator does not apply to FP types!");
3459     assert(N1.getValueType() == N2.getValueType() &&
3460            N1.getValueType() == VT && "Binary operator types must match!");
3461     // (X & 0) -> 0.  This commonly occurs when legalizing i64 values, so it's
3462     // worth handling here.
3463     if (N2C && N2C->isNullValue())
3464       return N2;
3465     if (N2C && N2C->isAllOnesValue())  // X & -1 -> X
3466       return N1;
3467     break;
3468   case ISD::OR:
3469   case ISD::XOR:
3470   case ISD::ADD:
3471   case ISD::SUB:
3472     assert(VT.isInteger() && "This operator does not apply to FP types!");
3473     assert(N1.getValueType() == N2.getValueType() &&
3474            N1.getValueType() == VT && "Binary operator types must match!");
3475     // (X ^|+- 0) -> X.  This commonly occurs when legalizing i64 values, so
3476     // it's worth handling here.
3477     if (N2C && N2C->isNullValue())
3478       return N1;
3479     break;
3480   case ISD::UDIV:
3481   case ISD::UREM:
3482   case ISD::MULHU:
3483   case ISD::MULHS:
3484   case ISD::MUL:
3485   case ISD::SDIV:
3486   case ISD::SREM:
3487   case ISD::SMIN:
3488   case ISD::SMAX:
3489   case ISD::UMIN:
3490   case ISD::UMAX:
3491     assert(VT.isInteger() && "This operator does not apply to FP types!");
3492     assert(N1.getValueType() == N2.getValueType() &&
3493            N1.getValueType() == VT && "Binary operator types must match!");
3494     break;
3495   case ISD::FADD:
3496   case ISD::FSUB:
3497   case ISD::FMUL:
3498   case ISD::FDIV:
3499   case ISD::FREM:
3500     if (getTarget().Options.UnsafeFPMath) {
3501       if (Opcode == ISD::FADD) {
3502         // x+0 --> x
3503         if (N2CFP && N2CFP->getValueAPF().isZero())
3504           return N1;
3505       } else if (Opcode == ISD::FSUB) {
3506         // x-0 --> x
3507         if (N2CFP && N2CFP->getValueAPF().isZero())
3508           return N1;
3509       } else if (Opcode == ISD::FMUL) {
3510         // x*0 --> 0
3511         if (N2CFP && N2CFP->isZero())
3512           return N2;
3513         // x*1 --> x
3514         if (N2CFP && N2CFP->isExactlyValue(1.0))
3515           return N1;
3516       }
3517     }
3518     assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
3519     assert(N1.getValueType() == N2.getValueType() &&
3520            N1.getValueType() == VT && "Binary operator types must match!");
3521     break;
3522   case ISD::FCOPYSIGN:   // N1 and result must match.  N1/N2 need not match.
3523     assert(N1.getValueType() == VT &&
3524            N1.getValueType().isFloatingPoint() &&
3525            N2.getValueType().isFloatingPoint() &&
3526            "Invalid FCOPYSIGN!");
3527     break;
3528   case ISD::SHL:
3529   case ISD::SRA:
3530   case ISD::SRL:
3531   case ISD::ROTL:
3532   case ISD::ROTR:
3533     assert(VT == N1.getValueType() &&
3534            "Shift operators return type must be the same as their first arg");
3535     assert(VT.isInteger() && N2.getValueType().isInteger() &&
3536            "Shifts only work on integers");
3537     assert((!VT.isVector() || VT == N2.getValueType()) &&
3538            "Vector shift amounts must be in the same as their first arg");
3539     // Verify that the shift amount VT is bit enough to hold valid shift
3540     // amounts.  This catches things like trying to shift an i1024 value by an
3541     // i8, which is easy to fall into in generic code that uses
3542     // TLI.getShiftAmount().
3543     assert(N2.getValueType().getSizeInBits() >=
3544                    Log2_32_Ceil(N1.getValueType().getSizeInBits()) &&
3545            "Invalid use of small shift amount with oversized value!");
3546 
3547     // Always fold shifts of i1 values so the code generator doesn't need to
3548     // handle them.  Since we know the size of the shift has to be less than the
3549     // size of the value, the shift/rotate count is guaranteed to be zero.
3550     if (VT == MVT::i1)
3551       return N1;
3552     if (N2C && N2C->isNullValue())
3553       return N1;
3554     break;
3555   case ISD::FP_ROUND_INREG: {
3556     EVT EVT = cast<VTSDNode>(N2)->getVT();
3557     assert(VT == N1.getValueType() && "Not an inreg round!");
3558     assert(VT.isFloatingPoint() && EVT.isFloatingPoint() &&
3559            "Cannot FP_ROUND_INREG integer types");
3560     assert(EVT.isVector() == VT.isVector() &&
3561            "FP_ROUND_INREG type should be vector iff the operand "
3562            "type is vector!");
3563     assert((!EVT.isVector() ||
3564             EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
3565            "Vector element counts must match in FP_ROUND_INREG");
3566     assert(EVT.bitsLE(VT) && "Not rounding down!");
3567     (void)EVT;
3568     if (cast<VTSDNode>(N2)->getVT() == VT) return N1;  // Not actually rounding.
3569     break;
3570   }
3571   case ISD::FP_ROUND:
3572     assert(VT.isFloatingPoint() &&
3573            N1.getValueType().isFloatingPoint() &&
3574            VT.bitsLE(N1.getValueType()) &&
3575            N2C && "Invalid FP_ROUND!");
3576     if (N1.getValueType() == VT) return N1;  // noop conversion.
3577     break;
3578   case ISD::AssertSext:
3579   case ISD::AssertZext: {
3580     EVT EVT = cast<VTSDNode>(N2)->getVT();
3581     assert(VT == N1.getValueType() && "Not an inreg extend!");
3582     assert(VT.isInteger() && EVT.isInteger() &&
3583            "Cannot *_EXTEND_INREG FP types");
3584     assert(!EVT.isVector() &&
3585            "AssertSExt/AssertZExt type should be the vector element type "
3586            "rather than the vector type!");
3587     assert(EVT.bitsLE(VT) && "Not extending!");
3588     if (VT == EVT) return N1; // noop assertion.
3589     break;
3590   }
3591   case ISD::SIGN_EXTEND_INREG: {
3592     EVT EVT = cast<VTSDNode>(N2)->getVT();
3593     assert(VT == N1.getValueType() && "Not an inreg extend!");
3594     assert(VT.isInteger() && EVT.isInteger() &&
3595            "Cannot *_EXTEND_INREG FP types");
3596     assert(EVT.isVector() == VT.isVector() &&
3597            "SIGN_EXTEND_INREG type should be vector iff the operand "
3598            "type is vector!");
3599     assert((!EVT.isVector() ||
3600             EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
3601            "Vector element counts must match in SIGN_EXTEND_INREG");
3602     assert(EVT.bitsLE(VT) && "Not extending!");
3603     if (EVT == VT) return N1;  // Not actually extending
3604 
3605     auto SignExtendInReg = [&](APInt Val) {
3606       unsigned FromBits = EVT.getScalarType().getSizeInBits();
3607       Val <<= Val.getBitWidth() - FromBits;
3608       Val = Val.ashr(Val.getBitWidth() - FromBits);
3609       return getConstant(Val, DL, VT.getScalarType());
3610     };
3611 
3612     if (N1C) {
3613       APInt Val = N1C->getAPIntValue();
3614       return SignExtendInReg(Val);
3615     }
3616     if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) {
3617       SmallVector<SDValue, 8> Ops;
3618       for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
3619         SDValue Op = N1.getOperand(i);
3620         if (Op.getOpcode() == ISD::UNDEF) {
3621           Ops.push_back(getUNDEF(VT.getScalarType()));
3622           continue;
3623         }
3624         if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3625           APInt Val = C->getAPIntValue();
3626           Val = Val.zextOrTrunc(VT.getScalarSizeInBits());
3627           Ops.push_back(SignExtendInReg(Val));
3628           continue;
3629         }
3630         break;
3631       }
3632       if (Ops.size() == VT.getVectorNumElements())
3633         return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
3634     }
3635     break;
3636   }
3637   case ISD::EXTRACT_VECTOR_ELT:
3638     // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF.
3639     if (N1.getOpcode() == ISD::UNDEF)
3640       return getUNDEF(VT);
3641 
3642     // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF
3643     if (N2C && N2C->getZExtValue() >= N1.getValueType().getVectorNumElements())
3644       return getUNDEF(VT);
3645 
3646     // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
3647     // expanding copies of large vectors from registers.
3648     if (N2C &&
3649         N1.getOpcode() == ISD::CONCAT_VECTORS &&
3650         N1.getNumOperands() > 0) {
3651       unsigned Factor =
3652         N1.getOperand(0).getValueType().getVectorNumElements();
3653       return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
3654                      N1.getOperand(N2C->getZExtValue() / Factor),
3655                      getConstant(N2C->getZExtValue() % Factor, DL,
3656                                  N2.getValueType()));
3657     }
3658 
3659     // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is
3660     // expanding large vector constants.
3661     if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) {
3662       SDValue Elt = N1.getOperand(N2C->getZExtValue());
3663 
3664       if (VT != Elt.getValueType())
3665         // If the vector element type is not legal, the BUILD_VECTOR operands
3666         // are promoted and implicitly truncated, and the result implicitly
3667         // extended. Make that explicit here.
3668         Elt = getAnyExtOrTrunc(Elt, DL, VT);
3669 
3670       return Elt;
3671     }
3672 
3673     // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
3674     // operations are lowered to scalars.
3675     if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
3676       // If the indices are the same, return the inserted element else
3677       // if the indices are known different, extract the element from
3678       // the original vector.
3679       SDValue N1Op2 = N1.getOperand(2);
3680       ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2);
3681 
3682       if (N1Op2C && N2C) {
3683         if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
3684           if (VT == N1.getOperand(1).getValueType())
3685             return N1.getOperand(1);
3686           else
3687             return getSExtOrTrunc(N1.getOperand(1), DL, VT);
3688         }
3689 
3690         return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
3691       }
3692     }
3693     break;
3694   case ISD::EXTRACT_ELEMENT:
3695     assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
3696     assert(!N1.getValueType().isVector() && !VT.isVector() &&
3697            (N1.getValueType().isInteger() == VT.isInteger()) &&
3698            N1.getValueType() != VT &&
3699            "Wrong types for EXTRACT_ELEMENT!");
3700 
3701     // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
3702     // 64-bit integers into 32-bit parts.  Instead of building the extract of
3703     // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
3704     if (N1.getOpcode() == ISD::BUILD_PAIR)
3705       return N1.getOperand(N2C->getZExtValue());
3706 
3707     // EXTRACT_ELEMENT of a constant int is also very common.
3708     if (N1C) {
3709       unsigned ElementSize = VT.getSizeInBits();
3710       unsigned Shift = ElementSize * N2C->getZExtValue();
3711       APInt ShiftedVal = N1C->getAPIntValue().lshr(Shift);
3712       return getConstant(ShiftedVal.trunc(ElementSize), DL, VT);
3713     }
3714     break;
3715   case ISD::EXTRACT_SUBVECTOR:
3716     if (VT.isSimple() && N1.getValueType().isSimple()) {
3717       assert(VT.isVector() && N1.getValueType().isVector() &&
3718              "Extract subvector VTs must be a vectors!");
3719       assert(VT.getVectorElementType() ==
3720              N1.getValueType().getVectorElementType() &&
3721              "Extract subvector VTs must have the same element type!");
3722       assert(VT.getSimpleVT() <= N1.getSimpleValueType() &&
3723              "Extract subvector must be from larger vector to smaller vector!");
3724 
3725       if (N2C) {
3726         assert((VT.getVectorNumElements() + N2C->getZExtValue()
3727                 <= N1.getValueType().getVectorNumElements())
3728                && "Extract subvector overflow!");
3729       }
3730 
3731       // Trivial extraction.
3732       if (VT.getSimpleVT() == N1.getSimpleValueType())
3733         return N1;
3734     }
3735     break;
3736   }
3737 
3738   // Perform trivial constant folding.
3739   if (SDValue SV =
3740           FoldConstantArithmetic(Opcode, DL, VT, N1.getNode(), N2.getNode()))
3741     return SV;
3742 
3743   // Constant fold FP operations.
3744   bool HasFPExceptions = TLI->hasFloatingPointExceptions();
3745   if (N1CFP) {
3746     if (N2CFP) {
3747       APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF();
3748       APFloat::opStatus s;
3749       switch (Opcode) {
3750       case ISD::FADD:
3751         s = V1.add(V2, APFloat::rmNearestTiesToEven);
3752         if (!HasFPExceptions || s != APFloat::opInvalidOp)
3753           return getConstantFP(V1, DL, VT);
3754         break;
3755       case ISD::FSUB:
3756         s = V1.subtract(V2, APFloat::rmNearestTiesToEven);
3757         if (!HasFPExceptions || s!=APFloat::opInvalidOp)
3758           return getConstantFP(V1, DL, VT);
3759         break;
3760       case ISD::FMUL:
3761         s = V1.multiply(V2, APFloat::rmNearestTiesToEven);
3762         if (!HasFPExceptions || s!=APFloat::opInvalidOp)
3763           return getConstantFP(V1, DL, VT);
3764         break;
3765       case ISD::FDIV:
3766         s = V1.divide(V2, APFloat::rmNearestTiesToEven);
3767         if (!HasFPExceptions || (s!=APFloat::opInvalidOp &&
3768                                  s!=APFloat::opDivByZero)) {
3769           return getConstantFP(V1, DL, VT);
3770         }
3771         break;
3772       case ISD::FREM :
3773         s = V1.mod(V2);
3774         if (!HasFPExceptions || (s!=APFloat::opInvalidOp &&
3775                                  s!=APFloat::opDivByZero)) {
3776           return getConstantFP(V1, DL, VT);
3777         }
3778         break;
3779       case ISD::FCOPYSIGN:
3780         V1.copySign(V2);
3781         return getConstantFP(V1, DL, VT);
3782       default: break;
3783       }
3784     }
3785 
3786     if (Opcode == ISD::FP_ROUND) {
3787       APFloat V = N1CFP->getValueAPF();    // make copy
3788       bool ignored;
3789       // This can return overflow, underflow, or inexact; we don't care.
3790       // FIXME need to be more flexible about rounding mode.
3791       (void)V.convert(EVTToAPFloatSemantics(VT),
3792                       APFloat::rmNearestTiesToEven, &ignored);
3793       return getConstantFP(V, DL, VT);
3794     }
3795   }
3796 
3797   // Canonicalize an UNDEF to the RHS, even over a constant.
3798   if (N1.getOpcode() == ISD::UNDEF) {
3799     if (isCommutativeBinOp(Opcode)) {
3800       std::swap(N1, N2);
3801     } else {
3802       switch (Opcode) {
3803       case ISD::FP_ROUND_INREG:
3804       case ISD::SIGN_EXTEND_INREG:
3805       case ISD::SUB:
3806       case ISD::FSUB:
3807       case ISD::FDIV:
3808       case ISD::FREM:
3809       case ISD::SRA:
3810         return N1;     // fold op(undef, arg2) -> undef
3811       case ISD::UDIV:
3812       case ISD::SDIV:
3813       case ISD::UREM:
3814       case ISD::SREM:
3815       case ISD::SRL:
3816       case ISD::SHL:
3817         if (!VT.isVector())
3818           return getConstant(0, DL, VT);    // fold op(undef, arg2) -> 0
3819         // For vectors, we can't easily build an all zero vector, just return
3820         // the LHS.
3821         return N2;
3822       }
3823     }
3824   }
3825 
3826   // Fold a bunch of operators when the RHS is undef.
3827   if (N2.getOpcode() == ISD::UNDEF) {
3828     switch (Opcode) {
3829     case ISD::XOR:
3830       if (N1.getOpcode() == ISD::UNDEF)
3831         // Handle undef ^ undef -> 0 special case. This is a common
3832         // idiom (misuse).
3833         return getConstant(0, DL, VT);
3834       // fallthrough
3835     case ISD::ADD:
3836     case ISD::ADDC:
3837     case ISD::ADDE:
3838     case ISD::SUB:
3839     case ISD::UDIV:
3840     case ISD::SDIV:
3841     case ISD::UREM:
3842     case ISD::SREM:
3843       return N2;       // fold op(arg1, undef) -> undef
3844     case ISD::FADD:
3845     case ISD::FSUB:
3846     case ISD::FMUL:
3847     case ISD::FDIV:
3848     case ISD::FREM:
3849       if (getTarget().Options.UnsafeFPMath)
3850         return N2;
3851       break;
3852     case ISD::MUL:
3853     case ISD::AND:
3854     case ISD::SRL:
3855     case ISD::SHL:
3856       if (!VT.isVector())
3857         return getConstant(0, DL, VT);  // fold op(arg1, undef) -> 0
3858       // For vectors, we can't easily build an all zero vector, just return
3859       // the LHS.
3860       return N1;
3861     case ISD::OR:
3862       if (!VT.isVector())
3863         return getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), DL, VT);
3864       // For vectors, we can't easily build an all one vector, just return
3865       // the LHS.
3866       return N1;
3867     case ISD::SRA:
3868       return N1;
3869     }
3870   }
3871 
3872   // Memoize this node if possible.
3873   BinarySDNode *N;
3874   SDVTList VTs = getVTList(VT);
3875   if (VT != MVT::Glue) {
3876     SDValue Ops[] = {N1, N2};
3877     FoldingSetNodeID ID;
3878     AddNodeIDNode(ID, Opcode, VTs, Ops);
3879     AddNodeIDFlags(ID, Opcode, Flags);
3880     void *IP = nullptr;
3881     if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP))
3882       return SDValue(E, 0);
3883 
3884     N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, Flags);
3885 
3886     CSEMap.InsertNode(N, IP);
3887   } else {
3888     N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, Flags);
3889   }
3890 
3891   InsertNode(N);
3892   return SDValue(N, 0);
3893 }
3894 
getNode(unsigned Opcode,SDLoc DL,EVT VT,SDValue N1,SDValue N2,SDValue N3)3895 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
3896                               SDValue N1, SDValue N2, SDValue N3) {
3897   // Perform various simplifications.
3898   switch (Opcode) {
3899   case ISD::FMA: {
3900     ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
3901     ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
3902     ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3);
3903     if (N1CFP && N2CFP && N3CFP) {
3904       APFloat  V1 = N1CFP->getValueAPF();
3905       const APFloat &V2 = N2CFP->getValueAPF();
3906       const APFloat &V3 = N3CFP->getValueAPF();
3907       APFloat::opStatus s =
3908         V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven);
3909       if (!TLI->hasFloatingPointExceptions() || s != APFloat::opInvalidOp)
3910         return getConstantFP(V1, DL, VT);
3911     }
3912     break;
3913   }
3914   case ISD::CONCAT_VECTORS:
3915     // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
3916     // one big BUILD_VECTOR.
3917     if (N1.getOpcode() == ISD::BUILD_VECTOR &&
3918         N2.getOpcode() == ISD::BUILD_VECTOR &&
3919         N3.getOpcode() == ISD::BUILD_VECTOR) {
3920       SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
3921                                     N1.getNode()->op_end());
3922       Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
3923       Elts.append(N3.getNode()->op_begin(), N3.getNode()->op_end());
3924       return getNode(ISD::BUILD_VECTOR, DL, VT, Elts);
3925     }
3926     break;
3927   case ISD::SETCC: {
3928     // Use FoldSetCC to simplify SETCC's.
3929     if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL))
3930       return V;
3931     // Vector constant folding.
3932     SDValue Ops[] = {N1, N2, N3};
3933     if (SDValue V = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops))
3934       return V;
3935     break;
3936   }
3937   case ISD::SELECT:
3938     if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) {
3939      if (N1C->getZExtValue())
3940        return N2;             // select true, X, Y -> X
3941      return N3;             // select false, X, Y -> Y
3942     }
3943 
3944     if (N2 == N3) return N2;   // select C, X, X -> X
3945     break;
3946   case ISD::VECTOR_SHUFFLE:
3947     llvm_unreachable("should use getVectorShuffle constructor!");
3948   case ISD::INSERT_SUBVECTOR: {
3949     SDValue Index = N3;
3950     if (VT.isSimple() && N1.getValueType().isSimple()
3951         && N2.getValueType().isSimple()) {
3952       assert(VT.isVector() && N1.getValueType().isVector() &&
3953              N2.getValueType().isVector() &&
3954              "Insert subvector VTs must be a vectors");
3955       assert(VT == N1.getValueType() &&
3956              "Dest and insert subvector source types must match!");
3957       assert(N2.getSimpleValueType() <= N1.getSimpleValueType() &&
3958              "Insert subvector must be from smaller vector to larger vector!");
3959       if (isa<ConstantSDNode>(Index)) {
3960         assert((N2.getValueType().getVectorNumElements() +
3961                 cast<ConstantSDNode>(Index)->getZExtValue()
3962                 <= VT.getVectorNumElements())
3963                && "Insert subvector overflow!");
3964       }
3965 
3966       // Trivial insertion.
3967       if (VT.getSimpleVT() == N2.getSimpleValueType())
3968         return N2;
3969     }
3970     break;
3971   }
3972   case ISD::BITCAST:
3973     // Fold bit_convert nodes from a type to themselves.
3974     if (N1.getValueType() == VT)
3975       return N1;
3976     break;
3977   }
3978 
3979   // Memoize node if it doesn't produce a flag.
3980   SDNode *N;
3981   SDVTList VTs = getVTList(VT);
3982   if (VT != MVT::Glue) {
3983     SDValue Ops[] = { N1, N2, N3 };
3984     FoldingSetNodeID ID;
3985     AddNodeIDNode(ID, Opcode, VTs, Ops);
3986     void *IP = nullptr;
3987     if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP))
3988       return SDValue(E, 0);
3989 
3990     N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
3991                                           DL.getDebugLoc(), VTs, N1, N2, N3);
3992     CSEMap.InsertNode(N, IP);
3993   } else {
3994     N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
3995                                           DL.getDebugLoc(), VTs, N1, N2, N3);
3996   }
3997 
3998   InsertNode(N);
3999   return SDValue(N, 0);
4000 }
4001 
getNode(unsigned Opcode,SDLoc DL,EVT VT,SDValue N1,SDValue N2,SDValue N3,SDValue N4)4002 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
4003                               SDValue N1, SDValue N2, SDValue N3,
4004                               SDValue N4) {
4005   SDValue Ops[] = { N1, N2, N3, N4 };
4006   return getNode(Opcode, DL, VT, Ops);
4007 }
4008 
getNode(unsigned Opcode,SDLoc DL,EVT VT,SDValue N1,SDValue N2,SDValue N3,SDValue N4,SDValue N5)4009 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
4010                               SDValue N1, SDValue N2, SDValue N3,
4011                               SDValue N4, SDValue N5) {
4012   SDValue Ops[] = { N1, N2, N3, N4, N5 };
4013   return getNode(Opcode, DL, VT, Ops);
4014 }
4015 
4016 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all
4017 /// the incoming stack arguments to be loaded from the stack.
getStackArgumentTokenFactor(SDValue Chain)4018 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
4019   SmallVector<SDValue, 8> ArgChains;
4020 
4021   // Include the original chain at the beginning of the list. When this is
4022   // used by target LowerCall hooks, this helps legalize find the
4023   // CALLSEQ_BEGIN node.
4024   ArgChains.push_back(Chain);
4025 
4026   // Add a chain value for each stack argument.
4027   for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(),
4028        UE = getEntryNode().getNode()->use_end(); U != UE; ++U)
4029     if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
4030       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
4031         if (FI->getIndex() < 0)
4032           ArgChains.push_back(SDValue(L, 1));
4033 
4034   // Build a tokenfactor for all the chains.
4035   return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
4036 }
4037 
4038 /// getMemsetValue - Vectorized representation of the memset value
4039 /// operand.
getMemsetValue(SDValue Value,EVT VT,SelectionDAG & DAG,SDLoc dl)4040 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
4041                               SDLoc dl) {
4042   assert(Value.getOpcode() != ISD::UNDEF);
4043 
4044   unsigned NumBits = VT.getScalarType().getSizeInBits();
4045   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
4046     assert(C->getAPIntValue().getBitWidth() == 8);
4047     APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
4048     if (VT.isInteger())
4049       return DAG.getConstant(Val, dl, VT);
4050     return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl,
4051                              VT);
4052   }
4053 
4054   assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?");
4055   EVT IntVT = VT.getScalarType();
4056   if (!IntVT.isInteger())
4057     IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits());
4058 
4059   Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value);
4060   if (NumBits > 8) {
4061     // Use a multiplication with 0x010101... to extend the input to the
4062     // required length.
4063     APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
4064     Value = DAG.getNode(ISD::MUL, dl, IntVT, Value,
4065                         DAG.getConstant(Magic, dl, IntVT));
4066   }
4067 
4068   if (VT != Value.getValueType() && !VT.isInteger())
4069     Value = DAG.getNode(ISD::BITCAST, dl, VT.getScalarType(), Value);
4070   if (VT != Value.getValueType()) {
4071     assert(VT.getVectorElementType() == Value.getValueType() &&
4072            "value type should be one vector element here");
4073     SmallVector<SDValue, 8> BVOps(VT.getVectorNumElements(), Value);
4074     Value = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, BVOps);
4075   }
4076 
4077   return Value;
4078 }
4079 
4080 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
4081 /// used when a memcpy is turned into a memset when the source is a constant
4082 /// string ptr.
getMemsetStringVal(EVT VT,SDLoc dl,SelectionDAG & DAG,const TargetLowering & TLI,StringRef Str)4083 static SDValue getMemsetStringVal(EVT VT, SDLoc dl, SelectionDAG &DAG,
4084                                   const TargetLowering &TLI, StringRef Str) {
4085   // Handle vector with all elements zero.
4086   if (Str.empty()) {
4087     if (VT.isInteger())
4088       return DAG.getConstant(0, dl, VT);
4089     else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128)
4090       return DAG.getConstantFP(0.0, dl, VT);
4091     else if (VT.isVector()) {
4092       unsigned NumElts = VT.getVectorNumElements();
4093       MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
4094       return DAG.getNode(ISD::BITCAST, dl, VT,
4095                          DAG.getConstant(0, dl,
4096                                          EVT::getVectorVT(*DAG.getContext(),
4097                                                           EltVT, NumElts)));
4098     } else
4099       llvm_unreachable("Expected type!");
4100   }
4101 
4102   assert(!VT.isVector() && "Can't handle vector type here!");
4103   unsigned NumVTBits = VT.getSizeInBits();
4104   unsigned NumVTBytes = NumVTBits / 8;
4105   unsigned NumBytes = std::min(NumVTBytes, unsigned(Str.size()));
4106 
4107   APInt Val(NumVTBits, 0);
4108   if (DAG.getDataLayout().isLittleEndian()) {
4109     for (unsigned i = 0; i != NumBytes; ++i)
4110       Val |= (uint64_t)(unsigned char)Str[i] << i*8;
4111   } else {
4112     for (unsigned i = 0; i != NumBytes; ++i)
4113       Val |= (uint64_t)(unsigned char)Str[i] << (NumVTBytes-i-1)*8;
4114   }
4115 
4116   // If the "cost" of materializing the integer immediate is less than the cost
4117   // of a load, then it is cost effective to turn the load into the immediate.
4118   Type *Ty = VT.getTypeForEVT(*DAG.getContext());
4119   if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty))
4120     return DAG.getConstant(Val, dl, VT);
4121   return SDValue(nullptr, 0);
4122 }
4123 
4124 /// getMemBasePlusOffset - Returns base and offset node for the
4125 ///
getMemBasePlusOffset(SDValue Base,unsigned Offset,SDLoc dl,SelectionDAG & DAG)4126 static SDValue getMemBasePlusOffset(SDValue Base, unsigned Offset, SDLoc dl,
4127                                       SelectionDAG &DAG) {
4128   EVT VT = Base.getValueType();
4129   return DAG.getNode(ISD::ADD, dl,
4130                      VT, Base, DAG.getConstant(Offset, dl, VT));
4131 }
4132 
4133 /// isMemSrcFromString - Returns true if memcpy source is a string constant.
4134 ///
isMemSrcFromString(SDValue Src,StringRef & Str)4135 static bool isMemSrcFromString(SDValue Src, StringRef &Str) {
4136   unsigned SrcDelta = 0;
4137   GlobalAddressSDNode *G = nullptr;
4138   if (Src.getOpcode() == ISD::GlobalAddress)
4139     G = cast<GlobalAddressSDNode>(Src);
4140   else if (Src.getOpcode() == ISD::ADD &&
4141            Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
4142            Src.getOperand(1).getOpcode() == ISD::Constant) {
4143     G = cast<GlobalAddressSDNode>(Src.getOperand(0));
4144     SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue();
4145   }
4146   if (!G)
4147     return false;
4148 
4149   return getConstantStringInfo(G->getGlobal(), Str, SrcDelta, false);
4150 }
4151 
4152 /// Determines the optimal series of memory ops to replace the memset / memcpy.
4153 /// Return true if the number of memory ops is below the threshold (Limit).
4154 /// It returns the types of the sequence of memory ops to perform
4155 /// memset / memcpy by reference.
FindOptimalMemOpLowering(std::vector<EVT> & MemOps,unsigned Limit,uint64_t Size,unsigned DstAlign,unsigned SrcAlign,bool IsMemset,bool ZeroMemset,bool MemcpyStrSrc,bool AllowOverlap,SelectionDAG & DAG,const TargetLowering & TLI)4156 static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
4157                                      unsigned Limit, uint64_t Size,
4158                                      unsigned DstAlign, unsigned SrcAlign,
4159                                      bool IsMemset,
4160                                      bool ZeroMemset,
4161                                      bool MemcpyStrSrc,
4162                                      bool AllowOverlap,
4163                                      SelectionDAG &DAG,
4164                                      const TargetLowering &TLI) {
4165   assert((SrcAlign == 0 || SrcAlign >= DstAlign) &&
4166          "Expecting memcpy / memset source to meet alignment requirement!");
4167   // If 'SrcAlign' is zero, that means the memory operation does not need to
4168   // load the value, i.e. memset or memcpy from constant string. Otherwise,
4169   // it's the inferred alignment of the source. 'DstAlign', on the other hand,
4170   // is the specified alignment of the memory operation. If it is zero, that
4171   // means it's possible to change the alignment of the destination.
4172   // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does
4173   // not need to be loaded.
4174   EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign,
4175                                    IsMemset, ZeroMemset, MemcpyStrSrc,
4176                                    DAG.getMachineFunction());
4177 
4178   if (VT == MVT::Other) {
4179     unsigned AS = 0;
4180     if (DstAlign >= DAG.getDataLayout().getPointerPrefAlignment(AS) ||
4181         TLI.allowsMisalignedMemoryAccesses(VT, AS, DstAlign)) {
4182       VT = TLI.getPointerTy(DAG.getDataLayout());
4183     } else {
4184       switch (DstAlign & 7) {
4185       case 0:  VT = MVT::i64; break;
4186       case 4:  VT = MVT::i32; break;
4187       case 2:  VT = MVT::i16; break;
4188       default: VT = MVT::i8;  break;
4189       }
4190     }
4191 
4192     MVT LVT = MVT::i64;
4193     while (!TLI.isTypeLegal(LVT))
4194       LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1);
4195     assert(LVT.isInteger());
4196 
4197     if (VT.bitsGT(LVT))
4198       VT = LVT;
4199   }
4200 
4201   unsigned NumMemOps = 0;
4202   while (Size != 0) {
4203     unsigned VTSize = VT.getSizeInBits() / 8;
4204     while (VTSize > Size) {
4205       // For now, only use non-vector load / store's for the left-over pieces.
4206       EVT NewVT = VT;
4207       unsigned NewVTSize;
4208 
4209       bool Found = false;
4210       if (VT.isVector() || VT.isFloatingPoint()) {
4211         NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32;
4212         if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) &&
4213             TLI.isSafeMemOpType(NewVT.getSimpleVT()))
4214           Found = true;
4215         else if (NewVT == MVT::i64 &&
4216                  TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) &&
4217                  TLI.isSafeMemOpType(MVT::f64)) {
4218           // i64 is usually not legal on 32-bit targets, but f64 may be.
4219           NewVT = MVT::f64;
4220           Found = true;
4221         }
4222       }
4223 
4224       if (!Found) {
4225         do {
4226           NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1);
4227           if (NewVT == MVT::i8)
4228             break;
4229         } while (!TLI.isSafeMemOpType(NewVT.getSimpleVT()));
4230       }
4231       NewVTSize = NewVT.getSizeInBits() / 8;
4232 
4233       // If the new VT cannot cover all of the remaining bits, then consider
4234       // issuing a (or a pair of) unaligned and overlapping load / store.
4235       // FIXME: Only does this for 64-bit or more since we don't have proper
4236       // cost model for unaligned load / store.
4237       bool Fast;
4238       unsigned AS = 0;
4239       if (NumMemOps && AllowOverlap &&
4240           VTSize >= 8 && NewVTSize < Size &&
4241           TLI.allowsMisalignedMemoryAccesses(VT, AS, DstAlign, &Fast) && Fast)
4242         VTSize = Size;
4243       else {
4244         VT = NewVT;
4245         VTSize = NewVTSize;
4246       }
4247     }
4248 
4249     if (++NumMemOps > Limit)
4250       return false;
4251 
4252     MemOps.push_back(VT);
4253     Size -= VTSize;
4254   }
4255 
4256   return true;
4257 }
4258 
shouldLowerMemFuncForSize(const MachineFunction & MF)4259 static bool shouldLowerMemFuncForSize(const MachineFunction &MF) {
4260   // On Darwin, -Os means optimize for size without hurting performance, so
4261   // only really optimize for size when -Oz (MinSize) is used.
4262   if (MF.getTarget().getTargetTriple().isOSDarwin())
4263     return MF.getFunction()->optForMinSize();
4264   return MF.getFunction()->optForSize();
4265 }
4266 
getMemcpyLoadsAndStores(SelectionDAG & DAG,SDLoc dl,SDValue Chain,SDValue Dst,SDValue Src,uint64_t Size,unsigned Align,bool isVol,bool AlwaysInline,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)4267 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
4268                                        SDValue Chain, SDValue Dst,
4269                                        SDValue Src, uint64_t Size,
4270                                        unsigned Align, bool isVol,
4271                                        bool AlwaysInline,
4272                                        MachinePointerInfo DstPtrInfo,
4273                                        MachinePointerInfo SrcPtrInfo) {
4274   // Turn a memcpy of undef to nop.
4275   if (Src.getOpcode() == ISD::UNDEF)
4276     return Chain;
4277 
4278   // Expand memcpy to a series of load and store ops if the size operand falls
4279   // below a certain threshold.
4280   // TODO: In the AlwaysInline case, if the size is big then generate a loop
4281   // rather than maybe a humongous number of loads and stores.
4282   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4283   std::vector<EVT> MemOps;
4284   bool DstAlignCanChange = false;
4285   MachineFunction &MF = DAG.getMachineFunction();
4286   MachineFrameInfo *MFI = MF.getFrameInfo();
4287   bool OptSize = shouldLowerMemFuncForSize(MF);
4288   FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
4289   if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
4290     DstAlignCanChange = true;
4291   unsigned SrcAlign = DAG.InferPtrAlignment(Src);
4292   if (Align > SrcAlign)
4293     SrcAlign = Align;
4294   StringRef Str;
4295   bool CopyFromStr = isMemSrcFromString(Src, Str);
4296   bool isZeroStr = CopyFromStr && Str.empty();
4297   unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
4298 
4299   if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
4300                                 (DstAlignCanChange ? 0 : Align),
4301                                 (isZeroStr ? 0 : SrcAlign),
4302                                 false, false, CopyFromStr, true, DAG, TLI))
4303     return SDValue();
4304 
4305   if (DstAlignCanChange) {
4306     Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
4307     unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty);
4308 
4309     // Don't promote to an alignment that would require dynamic stack
4310     // realignment.
4311     const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
4312     if (!TRI->needsStackRealignment(MF))
4313       while (NewAlign > Align &&
4314              DAG.getDataLayout().exceedsNaturalStackAlignment(NewAlign))
4315           NewAlign /= 2;
4316 
4317     if (NewAlign > Align) {
4318       // Give the stack frame object a larger alignment if needed.
4319       if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
4320         MFI->setObjectAlignment(FI->getIndex(), NewAlign);
4321       Align = NewAlign;
4322     }
4323   }
4324 
4325   SmallVector<SDValue, 8> OutChains;
4326   unsigned NumMemOps = MemOps.size();
4327   uint64_t SrcOff = 0, DstOff = 0;
4328   for (unsigned i = 0; i != NumMemOps; ++i) {
4329     EVT VT = MemOps[i];
4330     unsigned VTSize = VT.getSizeInBits() / 8;
4331     SDValue Value, Store;
4332 
4333     if (VTSize > Size) {
4334       // Issuing an unaligned load / store pair  that overlaps with the previous
4335       // pair. Adjust the offset accordingly.
4336       assert(i == NumMemOps-1 && i != 0);
4337       SrcOff -= VTSize - Size;
4338       DstOff -= VTSize - Size;
4339     }
4340 
4341     if (CopyFromStr &&
4342         (isZeroStr || (VT.isInteger() && !VT.isVector()))) {
4343       // It's unlikely a store of a vector immediate can be done in a single
4344       // instruction. It would require a load from a constantpool first.
4345       // We only handle zero vectors here.
4346       // FIXME: Handle other cases where store of vector immediate is done in
4347       // a single instruction.
4348       Value = getMemsetStringVal(VT, dl, DAG, TLI, Str.substr(SrcOff));
4349       if (Value.getNode())
4350         Store = DAG.getStore(Chain, dl, Value,
4351                              getMemBasePlusOffset(Dst, DstOff, dl, DAG),
4352                              DstPtrInfo.getWithOffset(DstOff), isVol,
4353                              false, Align);
4354     }
4355 
4356     if (!Store.getNode()) {
4357       // The type might not be legal for the target.  This should only happen
4358       // if the type is smaller than a legal type, as on PPC, so the right
4359       // thing to do is generate a LoadExt/StoreTrunc pair.  These simplify
4360       // to Load/Store if NVT==VT.
4361       // FIXME does the case above also need this?
4362       EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
4363       assert(NVT.bitsGE(VT));
4364       Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
4365                              getMemBasePlusOffset(Src, SrcOff, dl, DAG),
4366                              SrcPtrInfo.getWithOffset(SrcOff), VT, isVol, false,
4367                              false, MinAlign(SrcAlign, SrcOff));
4368       Store = DAG.getTruncStore(Chain, dl, Value,
4369                                 getMemBasePlusOffset(Dst, DstOff, dl, DAG),
4370                                 DstPtrInfo.getWithOffset(DstOff), VT, isVol,
4371                                 false, Align);
4372     }
4373     OutChains.push_back(Store);
4374     SrcOff += VTSize;
4375     DstOff += VTSize;
4376     Size -= VTSize;
4377   }
4378 
4379   return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
4380 }
4381 
getMemmoveLoadsAndStores(SelectionDAG & DAG,SDLoc dl,SDValue Chain,SDValue Dst,SDValue Src,uint64_t Size,unsigned Align,bool isVol,bool AlwaysInline,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)4382 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
4383                                         SDValue Chain, SDValue Dst,
4384                                         SDValue Src, uint64_t Size,
4385                                         unsigned Align,  bool isVol,
4386                                         bool AlwaysInline,
4387                                         MachinePointerInfo DstPtrInfo,
4388                                         MachinePointerInfo SrcPtrInfo) {
4389   // Turn a memmove of undef to nop.
4390   if (Src.getOpcode() == ISD::UNDEF)
4391     return Chain;
4392 
4393   // Expand memmove to a series of load and store ops if the size operand falls
4394   // below a certain threshold.
4395   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4396   std::vector<EVT> MemOps;
4397   bool DstAlignCanChange = false;
4398   MachineFunction &MF = DAG.getMachineFunction();
4399   MachineFrameInfo *MFI = MF.getFrameInfo();
4400   bool OptSize = shouldLowerMemFuncForSize(MF);
4401   FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
4402   if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
4403     DstAlignCanChange = true;
4404   unsigned SrcAlign = DAG.InferPtrAlignment(Src);
4405   if (Align > SrcAlign)
4406     SrcAlign = Align;
4407   unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
4408 
4409   if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
4410                                 (DstAlignCanChange ? 0 : Align), SrcAlign,
4411                                 false, false, false, false, DAG, TLI))
4412     return SDValue();
4413 
4414   if (DstAlignCanChange) {
4415     Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
4416     unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty);
4417     if (NewAlign > Align) {
4418       // Give the stack frame object a larger alignment if needed.
4419       if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
4420         MFI->setObjectAlignment(FI->getIndex(), NewAlign);
4421       Align = NewAlign;
4422     }
4423   }
4424 
4425   uint64_t SrcOff = 0, DstOff = 0;
4426   SmallVector<SDValue, 8> LoadValues;
4427   SmallVector<SDValue, 8> LoadChains;
4428   SmallVector<SDValue, 8> OutChains;
4429   unsigned NumMemOps = MemOps.size();
4430   for (unsigned i = 0; i < NumMemOps; i++) {
4431     EVT VT = MemOps[i];
4432     unsigned VTSize = VT.getSizeInBits() / 8;
4433     SDValue Value;
4434 
4435     Value = DAG.getLoad(VT, dl, Chain,
4436                         getMemBasePlusOffset(Src, SrcOff, dl, DAG),
4437                         SrcPtrInfo.getWithOffset(SrcOff), isVol,
4438                         false, false, SrcAlign);
4439     LoadValues.push_back(Value);
4440     LoadChains.push_back(Value.getValue(1));
4441     SrcOff += VTSize;
4442   }
4443   Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
4444   OutChains.clear();
4445   for (unsigned i = 0; i < NumMemOps; i++) {
4446     EVT VT = MemOps[i];
4447     unsigned VTSize = VT.getSizeInBits() / 8;
4448     SDValue Store;
4449 
4450     Store = DAG.getStore(Chain, dl, LoadValues[i],
4451                          getMemBasePlusOffset(Dst, DstOff, dl, DAG),
4452                          DstPtrInfo.getWithOffset(DstOff), isVol, false, Align);
4453     OutChains.push_back(Store);
4454     DstOff += VTSize;
4455   }
4456 
4457   return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
4458 }
4459 
4460 /// \brief Lower the call to 'memset' intrinsic function into a series of store
4461 /// operations.
4462 ///
4463 /// \param DAG Selection DAG where lowered code is placed.
4464 /// \param dl Link to corresponding IR location.
4465 /// \param Chain Control flow dependency.
4466 /// \param Dst Pointer to destination memory location.
4467 /// \param Src Value of byte to write into the memory.
4468 /// \param Size Number of bytes to write.
4469 /// \param Align Alignment of the destination in bytes.
4470 /// \param isVol True if destination is volatile.
4471 /// \param DstPtrInfo IR information on the memory pointer.
4472 /// \returns New head in the control flow, if lowering was successful, empty
4473 /// SDValue otherwise.
4474 ///
4475 /// The function tries to replace 'llvm.memset' intrinsic with several store
4476 /// operations and value calculation code. This is usually profitable for small
4477 /// memory size.
getMemsetStores(SelectionDAG & DAG,SDLoc dl,SDValue Chain,SDValue Dst,SDValue Src,uint64_t Size,unsigned Align,bool isVol,MachinePointerInfo DstPtrInfo)4478 static SDValue getMemsetStores(SelectionDAG &DAG, SDLoc dl,
4479                                SDValue Chain, SDValue Dst,
4480                                SDValue Src, uint64_t Size,
4481                                unsigned Align, bool isVol,
4482                                MachinePointerInfo DstPtrInfo) {
4483   // Turn a memset of undef to nop.
4484   if (Src.getOpcode() == ISD::UNDEF)
4485     return Chain;
4486 
4487   // Expand memset to a series of load/store ops if the size operand
4488   // falls below a certain threshold.
4489   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4490   std::vector<EVT> MemOps;
4491   bool DstAlignCanChange = false;
4492   MachineFunction &MF = DAG.getMachineFunction();
4493   MachineFrameInfo *MFI = MF.getFrameInfo();
4494   bool OptSize = shouldLowerMemFuncForSize(MF);
4495   FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
4496   if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
4497     DstAlignCanChange = true;
4498   bool IsZeroVal =
4499     isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
4500   if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize),
4501                                 Size, (DstAlignCanChange ? 0 : Align), 0,
4502                                 true, IsZeroVal, false, true, DAG, TLI))
4503     return SDValue();
4504 
4505   if (DstAlignCanChange) {
4506     Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
4507     unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty);
4508     if (NewAlign > Align) {
4509       // Give the stack frame object a larger alignment if needed.
4510       if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
4511         MFI->setObjectAlignment(FI->getIndex(), NewAlign);
4512       Align = NewAlign;
4513     }
4514   }
4515 
4516   SmallVector<SDValue, 8> OutChains;
4517   uint64_t DstOff = 0;
4518   unsigned NumMemOps = MemOps.size();
4519 
4520   // Find the largest store and generate the bit pattern for it.
4521   EVT LargestVT = MemOps[0];
4522   for (unsigned i = 1; i < NumMemOps; i++)
4523     if (MemOps[i].bitsGT(LargestVT))
4524       LargestVT = MemOps[i];
4525   SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
4526 
4527   for (unsigned i = 0; i < NumMemOps; i++) {
4528     EVT VT = MemOps[i];
4529     unsigned VTSize = VT.getSizeInBits() / 8;
4530     if (VTSize > Size) {
4531       // Issuing an unaligned load / store pair  that overlaps with the previous
4532       // pair. Adjust the offset accordingly.
4533       assert(i == NumMemOps-1 && i != 0);
4534       DstOff -= VTSize - Size;
4535     }
4536 
4537     // If this store is smaller than the largest store see whether we can get
4538     // the smaller value for free with a truncate.
4539     SDValue Value = MemSetValue;
4540     if (VT.bitsLT(LargestVT)) {
4541       if (!LargestVT.isVector() && !VT.isVector() &&
4542           TLI.isTruncateFree(LargestVT, VT))
4543         Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
4544       else
4545         Value = getMemsetValue(Src, VT, DAG, dl);
4546     }
4547     assert(Value.getValueType() == VT && "Value with wrong type.");
4548     SDValue Store = DAG.getStore(Chain, dl, Value,
4549                                  getMemBasePlusOffset(Dst, DstOff, dl, DAG),
4550                                  DstPtrInfo.getWithOffset(DstOff),
4551                                  isVol, false, Align);
4552     OutChains.push_back(Store);
4553     DstOff += VT.getSizeInBits() / 8;
4554     Size -= VTSize;
4555   }
4556 
4557   return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
4558 }
4559 
checkAddrSpaceIsValidForLibcall(const TargetLowering * TLI,unsigned AS)4560 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI,
4561                                             unsigned AS) {
4562   // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all
4563   // pointer operands can be losslessly bitcasted to pointers of address space 0
4564   if (AS != 0 && !TLI->isNoopAddrSpaceCast(AS, 0)) {
4565     report_fatal_error("cannot lower memory intrinsic in address space " +
4566                        Twine(AS));
4567   }
4568 }
4569 
getMemcpy(SDValue Chain,SDLoc dl,SDValue Dst,SDValue Src,SDValue Size,unsigned Align,bool isVol,bool AlwaysInline,bool isTailCall,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)4570 SDValue SelectionDAG::getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst,
4571                                 SDValue Src, SDValue Size,
4572                                 unsigned Align, bool isVol, bool AlwaysInline,
4573                                 bool isTailCall, MachinePointerInfo DstPtrInfo,
4574                                 MachinePointerInfo SrcPtrInfo) {
4575   assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
4576 
4577   // Check to see if we should lower the memcpy to loads and stores first.
4578   // For cases within the target-specified limits, this is the best choice.
4579   ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4580   if (ConstantSize) {
4581     // Memcpy with size zero? Just return the original chain.
4582     if (ConstantSize->isNullValue())
4583       return Chain;
4584 
4585     SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
4586                                              ConstantSize->getZExtValue(),Align,
4587                                 isVol, false, DstPtrInfo, SrcPtrInfo);
4588     if (Result.getNode())
4589       return Result;
4590   }
4591 
4592   // Then check to see if we should lower the memcpy with target-specific
4593   // code. If the target chooses to do this, this is the next best.
4594   if (TSI) {
4595     SDValue Result = TSI->EmitTargetCodeForMemcpy(
4596         *this, dl, Chain, Dst, Src, Size, Align, isVol, AlwaysInline,
4597         DstPtrInfo, SrcPtrInfo);
4598     if (Result.getNode())
4599       return Result;
4600   }
4601 
4602   // If we really need inline code and the target declined to provide it,
4603   // use a (potentially long) sequence of loads and stores.
4604   if (AlwaysInline) {
4605     assert(ConstantSize && "AlwaysInline requires a constant size!");
4606     return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
4607                                    ConstantSize->getZExtValue(), Align, isVol,
4608                                    true, DstPtrInfo, SrcPtrInfo);
4609   }
4610 
4611   checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
4612   checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace());
4613 
4614   // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
4615   // memcpy is not guaranteed to be safe. libc memcpys aren't required to
4616   // respect volatile, so they may do things like read or write memory
4617   // beyond the given memory regions. But fixing this isn't easy, and most
4618   // people don't care.
4619 
4620   // Emit a library call.
4621   TargetLowering::ArgListTy Args;
4622   TargetLowering::ArgListEntry Entry;
4623   Entry.Ty = getDataLayout().getIntPtrType(*getContext());
4624   Entry.Node = Dst; Args.push_back(Entry);
4625   Entry.Node = Src; Args.push_back(Entry);
4626   Entry.Node = Size; Args.push_back(Entry);
4627   // FIXME: pass in SDLoc
4628   TargetLowering::CallLoweringInfo CLI(*this);
4629   CLI.setDebugLoc(dl)
4630       .setChain(Chain)
4631       .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY),
4632                  Type::getVoidTy(*getContext()),
4633                  getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
4634                                    TLI->getPointerTy(getDataLayout())),
4635                  std::move(Args), 0)
4636       .setDiscardResult()
4637       .setTailCall(isTailCall);
4638 
4639   std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
4640   return CallResult.second;
4641 }
4642 
getMemmove(SDValue Chain,SDLoc dl,SDValue Dst,SDValue Src,SDValue Size,unsigned Align,bool isVol,bool isTailCall,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)4643 SDValue SelectionDAG::getMemmove(SDValue Chain, SDLoc dl, SDValue Dst,
4644                                  SDValue Src, SDValue Size,
4645                                  unsigned Align, bool isVol, bool isTailCall,
4646                                  MachinePointerInfo DstPtrInfo,
4647                                  MachinePointerInfo SrcPtrInfo) {
4648   assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
4649 
4650   // Check to see if we should lower the memmove to loads and stores first.
4651   // For cases within the target-specified limits, this is the best choice.
4652   ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4653   if (ConstantSize) {
4654     // Memmove with size zero? Just return the original chain.
4655     if (ConstantSize->isNullValue())
4656       return Chain;
4657 
4658     SDValue Result =
4659       getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src,
4660                                ConstantSize->getZExtValue(), Align, isVol,
4661                                false, DstPtrInfo, SrcPtrInfo);
4662     if (Result.getNode())
4663       return Result;
4664   }
4665 
4666   // Then check to see if we should lower the memmove with target-specific
4667   // code. If the target chooses to do this, this is the next best.
4668   if (TSI) {
4669     SDValue Result = TSI->EmitTargetCodeForMemmove(
4670         *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo, SrcPtrInfo);
4671     if (Result.getNode())
4672       return Result;
4673   }
4674 
4675   checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
4676   checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace());
4677 
4678   // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
4679   // not be safe.  See memcpy above for more details.
4680 
4681   // Emit a library call.
4682   TargetLowering::ArgListTy Args;
4683   TargetLowering::ArgListEntry Entry;
4684   Entry.Ty = getDataLayout().getIntPtrType(*getContext());
4685   Entry.Node = Dst; Args.push_back(Entry);
4686   Entry.Node = Src; Args.push_back(Entry);
4687   Entry.Node = Size; Args.push_back(Entry);
4688   // FIXME:  pass in SDLoc
4689   TargetLowering::CallLoweringInfo CLI(*this);
4690   CLI.setDebugLoc(dl)
4691       .setChain(Chain)
4692       .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE),
4693                  Type::getVoidTy(*getContext()),
4694                  getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
4695                                    TLI->getPointerTy(getDataLayout())),
4696                  std::move(Args), 0)
4697       .setDiscardResult()
4698       .setTailCall(isTailCall);
4699 
4700   std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
4701   return CallResult.second;
4702 }
4703 
getMemset(SDValue Chain,SDLoc dl,SDValue Dst,SDValue Src,SDValue Size,unsigned Align,bool isVol,bool isTailCall,MachinePointerInfo DstPtrInfo)4704 SDValue SelectionDAG::getMemset(SDValue Chain, SDLoc dl, SDValue Dst,
4705                                 SDValue Src, SDValue Size,
4706                                 unsigned Align, bool isVol, bool isTailCall,
4707                                 MachinePointerInfo DstPtrInfo) {
4708   assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
4709 
4710   // Check to see if we should lower the memset to stores first.
4711   // For cases within the target-specified limits, this is the best choice.
4712   ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4713   if (ConstantSize) {
4714     // Memset with size zero? Just return the original chain.
4715     if (ConstantSize->isNullValue())
4716       return Chain;
4717 
4718     SDValue Result =
4719       getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
4720                       Align, isVol, DstPtrInfo);
4721 
4722     if (Result.getNode())
4723       return Result;
4724   }
4725 
4726   // Then check to see if we should lower the memset with target-specific
4727   // code. If the target chooses to do this, this is the next best.
4728   if (TSI) {
4729     SDValue Result = TSI->EmitTargetCodeForMemset(
4730         *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo);
4731     if (Result.getNode())
4732       return Result;
4733   }
4734 
4735   checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
4736 
4737   // Emit a library call.
4738   Type *IntPtrTy = getDataLayout().getIntPtrType(*getContext());
4739   TargetLowering::ArgListTy Args;
4740   TargetLowering::ArgListEntry Entry;
4741   Entry.Node = Dst; Entry.Ty = IntPtrTy;
4742   Args.push_back(Entry);
4743   Entry.Node = Src;
4744   Entry.Ty = Src.getValueType().getTypeForEVT(*getContext());
4745   Args.push_back(Entry);
4746   Entry.Node = Size;
4747   Entry.Ty = IntPtrTy;
4748   Args.push_back(Entry);
4749 
4750   // FIXME: pass in SDLoc
4751   TargetLowering::CallLoweringInfo CLI(*this);
4752   CLI.setDebugLoc(dl)
4753       .setChain(Chain)
4754       .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET),
4755                  Type::getVoidTy(*getContext()),
4756                  getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
4757                                    TLI->getPointerTy(getDataLayout())),
4758                  std::move(Args), 0)
4759       .setDiscardResult()
4760       .setTailCall(isTailCall);
4761 
4762   std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
4763   return CallResult.second;
4764 }
4765 
getAtomic(unsigned Opcode,SDLoc dl,EVT MemVT,SDVTList VTList,ArrayRef<SDValue> Ops,MachineMemOperand * MMO,AtomicOrdering SuccessOrdering,AtomicOrdering FailureOrdering,SynchronizationScope SynchScope)4766 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4767                                 SDVTList VTList, ArrayRef<SDValue> Ops,
4768                                 MachineMemOperand *MMO,
4769                                 AtomicOrdering SuccessOrdering,
4770                                 AtomicOrdering FailureOrdering,
4771                                 SynchronizationScope SynchScope) {
4772   FoldingSetNodeID ID;
4773   ID.AddInteger(MemVT.getRawBits());
4774   AddNodeIDNode(ID, Opcode, VTList, Ops);
4775   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4776   void* IP = nullptr;
4777   if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) {
4778     cast<AtomicSDNode>(E)->refineAlignment(MMO);
4779     return SDValue(E, 0);
4780   }
4781 
4782   // Allocate the operands array for the node out of the BumpPtrAllocator, since
4783   // SDNode doesn't have access to it.  This memory will be "leaked" when
4784   // the node is deallocated, but recovered when the allocator is released.
4785   // If the number of operands is less than 5 we use AtomicSDNode's internal
4786   // storage.
4787   unsigned NumOps = Ops.size();
4788   SDUse *DynOps = NumOps > 4 ? OperandAllocator.Allocate<SDUse>(NumOps)
4789                              : nullptr;
4790 
4791   SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl.getIROrder(),
4792                                                dl.getDebugLoc(), VTList, MemVT,
4793                                                Ops.data(), DynOps, NumOps, MMO,
4794                                                SuccessOrdering, FailureOrdering,
4795                                                SynchScope);
4796   CSEMap.InsertNode(N, IP);
4797   InsertNode(N);
4798   return SDValue(N, 0);
4799 }
4800 
getAtomic(unsigned Opcode,SDLoc dl,EVT MemVT,SDVTList VTList,ArrayRef<SDValue> Ops,MachineMemOperand * MMO,AtomicOrdering Ordering,SynchronizationScope SynchScope)4801 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4802                                 SDVTList VTList, ArrayRef<SDValue> Ops,
4803                                 MachineMemOperand *MMO,
4804                                 AtomicOrdering Ordering,
4805                                 SynchronizationScope SynchScope) {
4806   return getAtomic(Opcode, dl, MemVT, VTList, Ops, MMO, Ordering,
4807                    Ordering, SynchScope);
4808 }
4809 
getAtomicCmpSwap(unsigned Opcode,SDLoc dl,EVT MemVT,SDVTList VTs,SDValue Chain,SDValue Ptr,SDValue Cmp,SDValue Swp,MachinePointerInfo PtrInfo,unsigned Alignment,AtomicOrdering SuccessOrdering,AtomicOrdering FailureOrdering,SynchronizationScope SynchScope)4810 SDValue SelectionDAG::getAtomicCmpSwap(
4811     unsigned Opcode, SDLoc dl, EVT MemVT, SDVTList VTs, SDValue Chain,
4812     SDValue Ptr, SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo,
4813     unsigned Alignment, AtomicOrdering SuccessOrdering,
4814     AtomicOrdering FailureOrdering, SynchronizationScope SynchScope) {
4815   assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
4816          Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
4817   assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
4818 
4819   if (Alignment == 0)  // Ensure that codegen never sees alignment 0
4820     Alignment = getEVTAlignment(MemVT);
4821 
4822   MachineFunction &MF = getMachineFunction();
4823 
4824   // FIXME: Volatile isn't really correct; we should keep track of atomic
4825   // orderings in the memoperand.
4826   unsigned Flags = MachineMemOperand::MOVolatile;
4827   Flags |= MachineMemOperand::MOLoad;
4828   Flags |= MachineMemOperand::MOStore;
4829 
4830   MachineMemOperand *MMO =
4831     MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment);
4832 
4833   return getAtomicCmpSwap(Opcode, dl, MemVT, VTs, Chain, Ptr, Cmp, Swp, MMO,
4834                           SuccessOrdering, FailureOrdering, SynchScope);
4835 }
4836 
getAtomicCmpSwap(unsigned Opcode,SDLoc dl,EVT MemVT,SDVTList VTs,SDValue Chain,SDValue Ptr,SDValue Cmp,SDValue Swp,MachineMemOperand * MMO,AtomicOrdering SuccessOrdering,AtomicOrdering FailureOrdering,SynchronizationScope SynchScope)4837 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, SDLoc dl, EVT MemVT,
4838                                        SDVTList VTs, SDValue Chain, SDValue Ptr,
4839                                        SDValue Cmp, SDValue Swp,
4840                                        MachineMemOperand *MMO,
4841                                        AtomicOrdering SuccessOrdering,
4842                                        AtomicOrdering FailureOrdering,
4843                                        SynchronizationScope SynchScope) {
4844   assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
4845          Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
4846   assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
4847 
4848   SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
4849   return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO,
4850                    SuccessOrdering, FailureOrdering, SynchScope);
4851 }
4852 
getAtomic(unsigned Opcode,SDLoc dl,EVT MemVT,SDValue Chain,SDValue Ptr,SDValue Val,const Value * PtrVal,unsigned Alignment,AtomicOrdering Ordering,SynchronizationScope SynchScope)4853 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4854                                 SDValue Chain,
4855                                 SDValue Ptr, SDValue Val,
4856                                 const Value* PtrVal,
4857                                 unsigned Alignment,
4858                                 AtomicOrdering Ordering,
4859                                 SynchronizationScope SynchScope) {
4860   if (Alignment == 0)  // Ensure that codegen never sees alignment 0
4861     Alignment = getEVTAlignment(MemVT);
4862 
4863   MachineFunction &MF = getMachineFunction();
4864   // An atomic store does not load. An atomic load does not store.
4865   // (An atomicrmw obviously both loads and stores.)
4866   // For now, atomics are considered to be volatile always, and they are
4867   // chained as such.
4868   // FIXME: Volatile isn't really correct; we should keep track of atomic
4869   // orderings in the memoperand.
4870   unsigned Flags = MachineMemOperand::MOVolatile;
4871   if (Opcode != ISD::ATOMIC_STORE)
4872     Flags |= MachineMemOperand::MOLoad;
4873   if (Opcode != ISD::ATOMIC_LOAD)
4874     Flags |= MachineMemOperand::MOStore;
4875 
4876   MachineMemOperand *MMO =
4877     MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
4878                             MemVT.getStoreSize(), Alignment);
4879 
4880   return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO,
4881                    Ordering, SynchScope);
4882 }
4883 
getAtomic(unsigned Opcode,SDLoc dl,EVT MemVT,SDValue Chain,SDValue Ptr,SDValue Val,MachineMemOperand * MMO,AtomicOrdering Ordering,SynchronizationScope SynchScope)4884 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4885                                 SDValue Chain,
4886                                 SDValue Ptr, SDValue Val,
4887                                 MachineMemOperand *MMO,
4888                                 AtomicOrdering Ordering,
4889                                 SynchronizationScope SynchScope) {
4890   assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
4891           Opcode == ISD::ATOMIC_LOAD_SUB ||
4892           Opcode == ISD::ATOMIC_LOAD_AND ||
4893           Opcode == ISD::ATOMIC_LOAD_OR ||
4894           Opcode == ISD::ATOMIC_LOAD_XOR ||
4895           Opcode == ISD::ATOMIC_LOAD_NAND ||
4896           Opcode == ISD::ATOMIC_LOAD_MIN ||
4897           Opcode == ISD::ATOMIC_LOAD_MAX ||
4898           Opcode == ISD::ATOMIC_LOAD_UMIN ||
4899           Opcode == ISD::ATOMIC_LOAD_UMAX ||
4900           Opcode == ISD::ATOMIC_SWAP ||
4901           Opcode == ISD::ATOMIC_STORE) &&
4902          "Invalid Atomic Op");
4903 
4904   EVT VT = Val.getValueType();
4905 
4906   SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
4907                                                getVTList(VT, MVT::Other);
4908   SDValue Ops[] = {Chain, Ptr, Val};
4909   return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO, Ordering, SynchScope);
4910 }
4911 
getAtomic(unsigned Opcode,SDLoc dl,EVT MemVT,EVT VT,SDValue Chain,SDValue Ptr,MachineMemOperand * MMO,AtomicOrdering Ordering,SynchronizationScope SynchScope)4912 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4913                                 EVT VT, SDValue Chain,
4914                                 SDValue Ptr,
4915                                 MachineMemOperand *MMO,
4916                                 AtomicOrdering Ordering,
4917                                 SynchronizationScope SynchScope) {
4918   assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op");
4919 
4920   SDVTList VTs = getVTList(VT, MVT::Other);
4921   SDValue Ops[] = {Chain, Ptr};
4922   return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO, Ordering, SynchScope);
4923 }
4924 
4925 /// getMergeValues - Create a MERGE_VALUES node from the given operands.
getMergeValues(ArrayRef<SDValue> Ops,SDLoc dl)4926 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, SDLoc dl) {
4927   if (Ops.size() == 1)
4928     return Ops[0];
4929 
4930   SmallVector<EVT, 4> VTs;
4931   VTs.reserve(Ops.size());
4932   for (unsigned i = 0; i < Ops.size(); ++i)
4933     VTs.push_back(Ops[i].getValueType());
4934   return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops);
4935 }
4936 
4937 SDValue
getMemIntrinsicNode(unsigned Opcode,SDLoc dl,SDVTList VTList,ArrayRef<SDValue> Ops,EVT MemVT,MachinePointerInfo PtrInfo,unsigned Align,bool Vol,bool ReadMem,bool WriteMem,unsigned Size)4938 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
4939                                   ArrayRef<SDValue> Ops,
4940                                   EVT MemVT, MachinePointerInfo PtrInfo,
4941                                   unsigned Align, bool Vol,
4942                                   bool ReadMem, bool WriteMem, unsigned Size) {
4943   if (Align == 0)  // Ensure that codegen never sees alignment 0
4944     Align = getEVTAlignment(MemVT);
4945 
4946   MachineFunction &MF = getMachineFunction();
4947   unsigned Flags = 0;
4948   if (WriteMem)
4949     Flags |= MachineMemOperand::MOStore;
4950   if (ReadMem)
4951     Flags |= MachineMemOperand::MOLoad;
4952   if (Vol)
4953     Flags |= MachineMemOperand::MOVolatile;
4954   if (!Size)
4955     Size = MemVT.getStoreSize();
4956   MachineMemOperand *MMO =
4957     MF.getMachineMemOperand(PtrInfo, Flags, Size, Align);
4958 
4959   return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO);
4960 }
4961 
4962 SDValue
getMemIntrinsicNode(unsigned Opcode,SDLoc dl,SDVTList VTList,ArrayRef<SDValue> Ops,EVT MemVT,MachineMemOperand * MMO)4963 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
4964                                   ArrayRef<SDValue> Ops, EVT MemVT,
4965                                   MachineMemOperand *MMO) {
4966   assert((Opcode == ISD::INTRINSIC_VOID ||
4967           Opcode == ISD::INTRINSIC_W_CHAIN ||
4968           Opcode == ISD::PREFETCH ||
4969           Opcode == ISD::LIFETIME_START ||
4970           Opcode == ISD::LIFETIME_END ||
4971           (Opcode <= INT_MAX &&
4972            (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
4973          "Opcode is not a memory-accessing opcode!");
4974 
4975   // Memoize the node unless it returns a flag.
4976   MemIntrinsicSDNode *N;
4977   if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
4978     FoldingSetNodeID ID;
4979     AddNodeIDNode(ID, Opcode, VTList, Ops);
4980     ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4981     void *IP = nullptr;
4982     if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) {
4983       cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
4984       return SDValue(E, 0);
4985     }
4986 
4987     N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(),
4988                                                dl.getDebugLoc(), VTList, Ops,
4989                                                MemVT, MMO);
4990     CSEMap.InsertNode(N, IP);
4991   } else {
4992     N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(),
4993                                                dl.getDebugLoc(), VTList, Ops,
4994                                                MemVT, MMO);
4995   }
4996   InsertNode(N);
4997   return SDValue(N, 0);
4998 }
4999 
5000 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
5001 /// MachinePointerInfo record from it.  This is particularly useful because the
5002 /// code generator has many cases where it doesn't bother passing in a
5003 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
InferPointerInfo(SelectionDAG & DAG,SDValue Ptr,int64_t Offset=0)5004 static MachinePointerInfo InferPointerInfo(SelectionDAG &DAG, SDValue Ptr,
5005                                            int64_t Offset = 0) {
5006   // If this is FI+Offset, we can model it.
5007   if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
5008     return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(),
5009                                              FI->getIndex(), Offset);
5010 
5011   // If this is (FI+Offset1)+Offset2, we can model it.
5012   if (Ptr.getOpcode() != ISD::ADD ||
5013       !isa<ConstantSDNode>(Ptr.getOperand(1)) ||
5014       !isa<FrameIndexSDNode>(Ptr.getOperand(0)))
5015     return MachinePointerInfo();
5016 
5017   int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
5018   return MachinePointerInfo::getFixedStack(
5019       DAG.getMachineFunction(), FI,
5020       Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
5021 }
5022 
5023 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
5024 /// MachinePointerInfo record from it.  This is particularly useful because the
5025 /// code generator has many cases where it doesn't bother passing in a
5026 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
InferPointerInfo(SelectionDAG & DAG,SDValue Ptr,SDValue OffsetOp)5027 static MachinePointerInfo InferPointerInfo(SelectionDAG &DAG, SDValue Ptr,
5028                                            SDValue OffsetOp) {
5029   // If the 'Offset' value isn't a constant, we can't handle this.
5030   if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
5031     return InferPointerInfo(DAG, Ptr, OffsetNode->getSExtValue());
5032   if (OffsetOp.getOpcode() == ISD::UNDEF)
5033     return InferPointerInfo(DAG, Ptr);
5034   return MachinePointerInfo();
5035 }
5036 
5037 
5038 SDValue
getLoad(ISD::MemIndexedMode AM,ISD::LoadExtType ExtType,EVT VT,SDLoc dl,SDValue Chain,SDValue Ptr,SDValue Offset,MachinePointerInfo PtrInfo,EVT MemVT,bool isVolatile,bool isNonTemporal,bool isInvariant,unsigned Alignment,const AAMDNodes & AAInfo,const MDNode * Ranges)5039 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
5040                       EVT VT, SDLoc dl, SDValue Chain,
5041                       SDValue Ptr, SDValue Offset,
5042                       MachinePointerInfo PtrInfo, EVT MemVT,
5043                       bool isVolatile, bool isNonTemporal, bool isInvariant,
5044                       unsigned Alignment, const AAMDNodes &AAInfo,
5045                       const MDNode *Ranges) {
5046   assert(Chain.getValueType() == MVT::Other &&
5047         "Invalid chain type");
5048   if (Alignment == 0)  // Ensure that codegen never sees alignment 0
5049     Alignment = getEVTAlignment(VT);
5050 
5051   unsigned Flags = MachineMemOperand::MOLoad;
5052   if (isVolatile)
5053     Flags |= MachineMemOperand::MOVolatile;
5054   if (isNonTemporal)
5055     Flags |= MachineMemOperand::MONonTemporal;
5056   if (isInvariant)
5057     Flags |= MachineMemOperand::MOInvariant;
5058 
5059   // If we don't have a PtrInfo, infer the trivial frame index case to simplify
5060   // clients.
5061   if (PtrInfo.V.isNull())
5062     PtrInfo = InferPointerInfo(*this, Ptr, Offset);
5063 
5064   MachineFunction &MF = getMachineFunction();
5065   MachineMemOperand *MMO =
5066     MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment,
5067                             AAInfo, Ranges);
5068   return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
5069 }
5070 
5071 SDValue
getLoad(ISD::MemIndexedMode AM,ISD::LoadExtType ExtType,EVT VT,SDLoc dl,SDValue Chain,SDValue Ptr,SDValue Offset,EVT MemVT,MachineMemOperand * MMO)5072 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
5073                       EVT VT, SDLoc dl, SDValue Chain,
5074                       SDValue Ptr, SDValue Offset, EVT MemVT,
5075                       MachineMemOperand *MMO) {
5076   if (VT == MemVT) {
5077     ExtType = ISD::NON_EXTLOAD;
5078   } else if (ExtType == ISD::NON_EXTLOAD) {
5079     assert(VT == MemVT && "Non-extending load from different memory type!");
5080   } else {
5081     // Extending load.
5082     assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
5083            "Should only be an extending load, not truncating!");
5084     assert(VT.isInteger() == MemVT.isInteger() &&
5085            "Cannot convert from FP to Int or Int -> FP!");
5086     assert(VT.isVector() == MemVT.isVector() &&
5087            "Cannot use an ext load to convert to or from a vector!");
5088     assert((!VT.isVector() ||
5089             VT.getVectorNumElements() == MemVT.getVectorNumElements()) &&
5090            "Cannot use an ext load to change the number of vector elements!");
5091   }
5092 
5093   bool Indexed = AM != ISD::UNINDEXED;
5094   assert((Indexed || Offset.getOpcode() == ISD::UNDEF) &&
5095          "Unindexed load with an offset!");
5096 
5097   SDVTList VTs = Indexed ?
5098     getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
5099   SDValue Ops[] = { Chain, Ptr, Offset };
5100   FoldingSetNodeID ID;
5101   AddNodeIDNode(ID, ISD::LOAD, VTs, Ops);
5102   ID.AddInteger(MemVT.getRawBits());
5103   ID.AddInteger(encodeMemSDNodeFlags(ExtType, AM, MMO->isVolatile(),
5104                                      MMO->isNonTemporal(),
5105                                      MMO->isInvariant()));
5106   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5107   void *IP = nullptr;
5108   if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) {
5109     cast<LoadSDNode>(E)->refineAlignment(MMO);
5110     return SDValue(E, 0);
5111   }
5112   SDNode *N = new (NodeAllocator) LoadSDNode(Ops, dl.getIROrder(),
5113                                              dl.getDebugLoc(), VTs, AM, ExtType,
5114                                              MemVT, MMO);
5115   CSEMap.InsertNode(N, IP);
5116   InsertNode(N);
5117   return SDValue(N, 0);
5118 }
5119 
getLoad(EVT VT,SDLoc dl,SDValue Chain,SDValue Ptr,MachinePointerInfo PtrInfo,bool isVolatile,bool isNonTemporal,bool isInvariant,unsigned Alignment,const AAMDNodes & AAInfo,const MDNode * Ranges)5120 SDValue SelectionDAG::getLoad(EVT VT, SDLoc dl,
5121                               SDValue Chain, SDValue Ptr,
5122                               MachinePointerInfo PtrInfo,
5123                               bool isVolatile, bool isNonTemporal,
5124                               bool isInvariant, unsigned Alignment,
5125                               const AAMDNodes &AAInfo,
5126                               const MDNode *Ranges) {
5127   SDValue Undef = getUNDEF(Ptr.getValueType());
5128   return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
5129                  PtrInfo, VT, isVolatile, isNonTemporal, isInvariant, Alignment,
5130                  AAInfo, Ranges);
5131 }
5132 
getLoad(EVT VT,SDLoc dl,SDValue Chain,SDValue Ptr,MachineMemOperand * MMO)5133 SDValue SelectionDAG::getLoad(EVT VT, SDLoc dl,
5134                               SDValue Chain, SDValue Ptr,
5135                               MachineMemOperand *MMO) {
5136   SDValue Undef = getUNDEF(Ptr.getValueType());
5137   return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
5138                  VT, MMO);
5139 }
5140 
getExtLoad(ISD::LoadExtType ExtType,SDLoc dl,EVT VT,SDValue Chain,SDValue Ptr,MachinePointerInfo PtrInfo,EVT MemVT,bool isVolatile,bool isNonTemporal,bool isInvariant,unsigned Alignment,const AAMDNodes & AAInfo)5141 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT,
5142                                  SDValue Chain, SDValue Ptr,
5143                                  MachinePointerInfo PtrInfo, EVT MemVT,
5144                                  bool isVolatile, bool isNonTemporal,
5145                                  bool isInvariant, unsigned Alignment,
5146                                  const AAMDNodes &AAInfo) {
5147   SDValue Undef = getUNDEF(Ptr.getValueType());
5148   return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
5149                  PtrInfo, MemVT, isVolatile, isNonTemporal, isInvariant,
5150                  Alignment, AAInfo);
5151 }
5152 
5153 
getExtLoad(ISD::LoadExtType ExtType,SDLoc dl,EVT VT,SDValue Chain,SDValue Ptr,EVT MemVT,MachineMemOperand * MMO)5154 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT,
5155                                  SDValue Chain, SDValue Ptr, EVT MemVT,
5156                                  MachineMemOperand *MMO) {
5157   SDValue Undef = getUNDEF(Ptr.getValueType());
5158   return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
5159                  MemVT, MMO);
5160 }
5161 
5162 SDValue
getIndexedLoad(SDValue OrigLoad,SDLoc dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)5163 SelectionDAG::getIndexedLoad(SDValue OrigLoad, SDLoc dl, SDValue Base,
5164                              SDValue Offset, ISD::MemIndexedMode AM) {
5165   LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
5166   assert(LD->getOffset().getOpcode() == ISD::UNDEF &&
5167          "Load is already a indexed load!");
5168   return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
5169                  LD->getChain(), Base, Offset, LD->getPointerInfo(),
5170                  LD->getMemoryVT(), LD->isVolatile(), LD->isNonTemporal(),
5171                  false, LD->getAlignment());
5172 }
5173 
getStore(SDValue Chain,SDLoc dl,SDValue Val,SDValue Ptr,MachinePointerInfo PtrInfo,bool isVolatile,bool isNonTemporal,unsigned Alignment,const AAMDNodes & AAInfo)5174 SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val,
5175                                SDValue Ptr, MachinePointerInfo PtrInfo,
5176                                bool isVolatile, bool isNonTemporal,
5177                                unsigned Alignment, const AAMDNodes &AAInfo) {
5178   assert(Chain.getValueType() == MVT::Other &&
5179         "Invalid chain type");
5180   if (Alignment == 0)  // Ensure that codegen never sees alignment 0
5181     Alignment = getEVTAlignment(Val.getValueType());
5182 
5183   unsigned Flags = MachineMemOperand::MOStore;
5184   if (isVolatile)
5185     Flags |= MachineMemOperand::MOVolatile;
5186   if (isNonTemporal)
5187     Flags |= MachineMemOperand::MONonTemporal;
5188 
5189   if (PtrInfo.V.isNull())
5190     PtrInfo = InferPointerInfo(*this, Ptr);
5191 
5192   MachineFunction &MF = getMachineFunction();
5193   MachineMemOperand *MMO =
5194     MF.getMachineMemOperand(PtrInfo, Flags,
5195                             Val.getValueType().getStoreSize(), Alignment,
5196                             AAInfo);
5197 
5198   return getStore(Chain, dl, Val, Ptr, MMO);
5199 }
5200 
getStore(SDValue Chain,SDLoc dl,SDValue Val,SDValue Ptr,MachineMemOperand * MMO)5201 SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val,
5202                                SDValue Ptr, MachineMemOperand *MMO) {
5203   assert(Chain.getValueType() == MVT::Other &&
5204         "Invalid chain type");
5205   EVT VT = Val.getValueType();
5206   SDVTList VTs = getVTList(MVT::Other);
5207   SDValue Undef = getUNDEF(Ptr.getValueType());
5208   SDValue Ops[] = { Chain, Val, Ptr, Undef };
5209   FoldingSetNodeID ID;
5210   AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
5211   ID.AddInteger(VT.getRawBits());
5212   ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(),
5213                                      MMO->isNonTemporal(), MMO->isInvariant()));
5214   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5215   void *IP = nullptr;
5216   if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) {
5217     cast<StoreSDNode>(E)->refineAlignment(MMO);
5218     return SDValue(E, 0);
5219   }
5220   SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
5221                                               dl.getDebugLoc(), VTs,
5222                                               ISD::UNINDEXED, false, VT, MMO);
5223   CSEMap.InsertNode(N, IP);
5224   InsertNode(N);
5225   return SDValue(N, 0);
5226 }
5227 
getTruncStore(SDValue Chain,SDLoc dl,SDValue Val,SDValue Ptr,MachinePointerInfo PtrInfo,EVT SVT,bool isVolatile,bool isNonTemporal,unsigned Alignment,const AAMDNodes & AAInfo)5228 SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val,
5229                                     SDValue Ptr, MachinePointerInfo PtrInfo,
5230                                     EVT SVT,bool isVolatile, bool isNonTemporal,
5231                                     unsigned Alignment,
5232                                     const AAMDNodes &AAInfo) {
5233   assert(Chain.getValueType() == MVT::Other &&
5234         "Invalid chain type");
5235   if (Alignment == 0)  // Ensure that codegen never sees alignment 0
5236     Alignment = getEVTAlignment(SVT);
5237 
5238   unsigned Flags = MachineMemOperand::MOStore;
5239   if (isVolatile)
5240     Flags |= MachineMemOperand::MOVolatile;
5241   if (isNonTemporal)
5242     Flags |= MachineMemOperand::MONonTemporal;
5243 
5244   if (PtrInfo.V.isNull())
5245     PtrInfo = InferPointerInfo(*this, Ptr);
5246 
5247   MachineFunction &MF = getMachineFunction();
5248   MachineMemOperand *MMO =
5249     MF.getMachineMemOperand(PtrInfo, Flags, SVT.getStoreSize(), Alignment,
5250                             AAInfo);
5251 
5252   return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
5253 }
5254 
getTruncStore(SDValue Chain,SDLoc dl,SDValue Val,SDValue Ptr,EVT SVT,MachineMemOperand * MMO)5255 SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val,
5256                                     SDValue Ptr, EVT SVT,
5257                                     MachineMemOperand *MMO) {
5258   EVT VT = Val.getValueType();
5259 
5260   assert(Chain.getValueType() == MVT::Other &&
5261         "Invalid chain type");
5262   if (VT == SVT)
5263     return getStore(Chain, dl, Val, Ptr, MMO);
5264 
5265   assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
5266          "Should only be a truncating store, not extending!");
5267   assert(VT.isInteger() == SVT.isInteger() &&
5268          "Can't do FP-INT conversion!");
5269   assert(VT.isVector() == SVT.isVector() &&
5270          "Cannot use trunc store to convert to or from a vector!");
5271   assert((!VT.isVector() ||
5272           VT.getVectorNumElements() == SVT.getVectorNumElements()) &&
5273          "Cannot use trunc store to change the number of vector elements!");
5274 
5275   SDVTList VTs = getVTList(MVT::Other);
5276   SDValue Undef = getUNDEF(Ptr.getValueType());
5277   SDValue Ops[] = { Chain, Val, Ptr, Undef };
5278   FoldingSetNodeID ID;
5279   AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
5280   ID.AddInteger(SVT.getRawBits());
5281   ID.AddInteger(encodeMemSDNodeFlags(true, ISD::UNINDEXED, MMO->isVolatile(),
5282                                      MMO->isNonTemporal(), MMO->isInvariant()));
5283   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5284   void *IP = nullptr;
5285   if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) {
5286     cast<StoreSDNode>(E)->refineAlignment(MMO);
5287     return SDValue(E, 0);
5288   }
5289   SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
5290                                               dl.getDebugLoc(), VTs,
5291                                               ISD::UNINDEXED, true, SVT, MMO);
5292   CSEMap.InsertNode(N, IP);
5293   InsertNode(N);
5294   return SDValue(N, 0);
5295 }
5296 
5297 SDValue
getIndexedStore(SDValue OrigStore,SDLoc dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)5298 SelectionDAG::getIndexedStore(SDValue OrigStore, SDLoc dl, SDValue Base,
5299                               SDValue Offset, ISD::MemIndexedMode AM) {
5300   StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
5301   assert(ST->getOffset().getOpcode() == ISD::UNDEF &&
5302          "Store is already a indexed store!");
5303   SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
5304   SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
5305   FoldingSetNodeID ID;
5306   AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
5307   ID.AddInteger(ST->getMemoryVT().getRawBits());
5308   ID.AddInteger(ST->getRawSubclassData());
5309   ID.AddInteger(ST->getPointerInfo().getAddrSpace());
5310   void *IP = nullptr;
5311   if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP))
5312     return SDValue(E, 0);
5313 
5314   SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
5315                                               dl.getDebugLoc(), VTs, AM,
5316                                               ST->isTruncatingStore(),
5317                                               ST->getMemoryVT(),
5318                                               ST->getMemOperand());
5319   CSEMap.InsertNode(N, IP);
5320   InsertNode(N);
5321   return SDValue(N, 0);
5322 }
5323 
5324 SDValue
getMaskedLoad(EVT VT,SDLoc dl,SDValue Chain,SDValue Ptr,SDValue Mask,SDValue Src0,EVT MemVT,MachineMemOperand * MMO,ISD::LoadExtType ExtTy)5325 SelectionDAG::getMaskedLoad(EVT VT, SDLoc dl, SDValue Chain,
5326                             SDValue Ptr, SDValue Mask, SDValue Src0, EVT MemVT,
5327                             MachineMemOperand *MMO, ISD::LoadExtType ExtTy) {
5328 
5329   SDVTList VTs = getVTList(VT, MVT::Other);
5330   SDValue Ops[] = { Chain, Ptr, Mask, Src0 };
5331   FoldingSetNodeID ID;
5332   AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops);
5333   ID.AddInteger(VT.getRawBits());
5334   ID.AddInteger(encodeMemSDNodeFlags(ExtTy, ISD::UNINDEXED,
5335                                      MMO->isVolatile(),
5336                                      MMO->isNonTemporal(),
5337                                      MMO->isInvariant()));
5338   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5339   void *IP = nullptr;
5340   if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) {
5341     cast<MaskedLoadSDNode>(E)->refineAlignment(MMO);
5342     return SDValue(E, 0);
5343   }
5344   SDNode *N = new (NodeAllocator) MaskedLoadSDNode(dl.getIROrder(),
5345                                              dl.getDebugLoc(), Ops, 4, VTs,
5346                                              ExtTy, MemVT, MMO);
5347   CSEMap.InsertNode(N, IP);
5348   InsertNode(N);
5349   return SDValue(N, 0);
5350 }
5351 
getMaskedStore(SDValue Chain,SDLoc dl,SDValue Val,SDValue Ptr,SDValue Mask,EVT MemVT,MachineMemOperand * MMO,bool isTrunc)5352 SDValue SelectionDAG::getMaskedStore(SDValue Chain, SDLoc dl, SDValue Val,
5353                                      SDValue Ptr, SDValue Mask, EVT MemVT,
5354                                      MachineMemOperand *MMO, bool isTrunc) {
5355   assert(Chain.getValueType() == MVT::Other &&
5356         "Invalid chain type");
5357   EVT VT = Val.getValueType();
5358   SDVTList VTs = getVTList(MVT::Other);
5359   SDValue Ops[] = { Chain, Ptr, Mask, Val };
5360   FoldingSetNodeID ID;
5361   AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops);
5362   ID.AddInteger(VT.getRawBits());
5363   ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(),
5364                                      MMO->isNonTemporal(), MMO->isInvariant()));
5365   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5366   void *IP = nullptr;
5367   if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) {
5368     cast<MaskedStoreSDNode>(E)->refineAlignment(MMO);
5369     return SDValue(E, 0);
5370   }
5371   SDNode *N = new (NodeAllocator) MaskedStoreSDNode(dl.getIROrder(),
5372                                                     dl.getDebugLoc(), Ops, 4,
5373                                                     VTs, isTrunc, MemVT, MMO);
5374   CSEMap.InsertNode(N, IP);
5375   InsertNode(N);
5376   return SDValue(N, 0);
5377 }
5378 
5379 SDValue
getMaskedGather(SDVTList VTs,EVT VT,SDLoc dl,ArrayRef<SDValue> Ops,MachineMemOperand * MMO)5380 SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, SDLoc dl,
5381                               ArrayRef<SDValue> Ops,
5382                               MachineMemOperand *MMO) {
5383 
5384   FoldingSetNodeID ID;
5385   AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops);
5386   ID.AddInteger(VT.getRawBits());
5387   ID.AddInteger(encodeMemSDNodeFlags(ISD::NON_EXTLOAD, ISD::UNINDEXED,
5388                                      MMO->isVolatile(),
5389                                      MMO->isNonTemporal(),
5390                                      MMO->isInvariant()));
5391   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5392   void *IP = nullptr;
5393   if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) {
5394     cast<MaskedGatherSDNode>(E)->refineAlignment(MMO);
5395     return SDValue(E, 0);
5396   }
5397   MaskedGatherSDNode *N =
5398     new (NodeAllocator) MaskedGatherSDNode(dl.getIROrder(), dl.getDebugLoc(),
5399                                            Ops, VTs, VT, MMO);
5400   CSEMap.InsertNode(N, IP);
5401   InsertNode(N);
5402   return SDValue(N, 0);
5403 }
5404 
getMaskedScatter(SDVTList VTs,EVT VT,SDLoc dl,ArrayRef<SDValue> Ops,MachineMemOperand * MMO)5405 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, SDLoc dl,
5406                                        ArrayRef<SDValue> Ops,
5407                                        MachineMemOperand *MMO) {
5408   FoldingSetNodeID ID;
5409   AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops);
5410   ID.AddInteger(VT.getRawBits());
5411   ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(),
5412                                      MMO->isNonTemporal(),
5413                                      MMO->isInvariant()));
5414   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5415   void *IP = nullptr;
5416   if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) {
5417     cast<MaskedScatterSDNode>(E)->refineAlignment(MMO);
5418     return SDValue(E, 0);
5419   }
5420   SDNode *N =
5421     new (NodeAllocator) MaskedScatterSDNode(dl.getIROrder(), dl.getDebugLoc(),
5422                                             Ops, VTs, VT, MMO);
5423   CSEMap.InsertNode(N, IP);
5424   InsertNode(N);
5425   return SDValue(N, 0);
5426 }
5427 
getVAArg(EVT VT,SDLoc dl,SDValue Chain,SDValue Ptr,SDValue SV,unsigned Align)5428 SDValue SelectionDAG::getVAArg(EVT VT, SDLoc dl,
5429                                SDValue Chain, SDValue Ptr,
5430                                SDValue SV,
5431                                unsigned Align) {
5432   SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) };
5433   return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops);
5434 }
5435 
getNode(unsigned Opcode,SDLoc DL,EVT VT,ArrayRef<SDUse> Ops)5436 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
5437                               ArrayRef<SDUse> Ops) {
5438   switch (Ops.size()) {
5439   case 0: return getNode(Opcode, DL, VT);
5440   case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0]));
5441   case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
5442   case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
5443   default: break;
5444   }
5445 
5446   // Copy from an SDUse array into an SDValue array for use with
5447   // the regular getNode logic.
5448   SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end());
5449   return getNode(Opcode, DL, VT, NewOps);
5450 }
5451 
getNode(unsigned Opcode,SDLoc DL,EVT VT,ArrayRef<SDValue> Ops,const SDNodeFlags * Flags)5452 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
5453                               ArrayRef<SDValue> Ops, const SDNodeFlags *Flags) {
5454   unsigned NumOps = Ops.size();
5455   switch (NumOps) {
5456   case 0: return getNode(Opcode, DL, VT);
5457   case 1: return getNode(Opcode, DL, VT, Ops[0]);
5458   case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags);
5459   case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
5460   default: break;
5461   }
5462 
5463   switch (Opcode) {
5464   default: break;
5465   case ISD::SELECT_CC: {
5466     assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
5467     assert(Ops[0].getValueType() == Ops[1].getValueType() &&
5468            "LHS and RHS of condition must have same type!");
5469     assert(Ops[2].getValueType() == Ops[3].getValueType() &&
5470            "True and False arms of SelectCC must have same type!");
5471     assert(Ops[2].getValueType() == VT &&
5472            "select_cc node must be of same type as true and false value!");
5473     break;
5474   }
5475   case ISD::BR_CC: {
5476     assert(NumOps == 5 && "BR_CC takes 5 operands!");
5477     assert(Ops[2].getValueType() == Ops[3].getValueType() &&
5478            "LHS/RHS of comparison should match types!");
5479     break;
5480   }
5481   }
5482 
5483   // Memoize nodes.
5484   SDNode *N;
5485   SDVTList VTs = getVTList(VT);
5486 
5487   if (VT != MVT::Glue) {
5488     FoldingSetNodeID ID;
5489     AddNodeIDNode(ID, Opcode, VTs, Ops);
5490     void *IP = nullptr;
5491 
5492     if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP))
5493       return SDValue(E, 0);
5494 
5495     N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
5496                                    VTs, Ops);
5497     CSEMap.InsertNode(N, IP);
5498   } else {
5499     N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
5500                                    VTs, Ops);
5501   }
5502 
5503   InsertNode(N);
5504   return SDValue(N, 0);
5505 }
5506 
getNode(unsigned Opcode,SDLoc DL,ArrayRef<EVT> ResultTys,ArrayRef<SDValue> Ops)5507 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
5508                               ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) {
5509   return getNode(Opcode, DL, getVTList(ResultTys), Ops);
5510 }
5511 
getNode(unsigned Opcode,SDLoc DL,SDVTList VTList,ArrayRef<SDValue> Ops)5512 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
5513                               ArrayRef<SDValue> Ops) {
5514   if (VTList.NumVTs == 1)
5515     return getNode(Opcode, DL, VTList.VTs[0], Ops);
5516 
5517 #if 0
5518   switch (Opcode) {
5519   // FIXME: figure out how to safely handle things like
5520   // int foo(int x) { return 1 << (x & 255); }
5521   // int bar() { return foo(256); }
5522   case ISD::SRA_PARTS:
5523   case ISD::SRL_PARTS:
5524   case ISD::SHL_PARTS:
5525     if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
5526         cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
5527       return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
5528     else if (N3.getOpcode() == ISD::AND)
5529       if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
5530         // If the and is only masking out bits that cannot effect the shift,
5531         // eliminate the and.
5532         unsigned NumBits = VT.getScalarType().getSizeInBits()*2;
5533         if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
5534           return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
5535       }
5536     break;
5537   }
5538 #endif
5539 
5540   // Memoize the node unless it returns a flag.
5541   SDNode *N;
5542   unsigned NumOps = Ops.size();
5543   if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
5544     FoldingSetNodeID ID;
5545     AddNodeIDNode(ID, Opcode, VTList, Ops);
5546     void *IP = nullptr;
5547     if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP))
5548       return SDValue(E, 0);
5549 
5550     if (NumOps == 1) {
5551       N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
5552                                           DL.getDebugLoc(), VTList, Ops[0]);
5553     } else if (NumOps == 2) {
5554       N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
5555                                            DL.getDebugLoc(), VTList, Ops[0],
5556                                            Ops[1]);
5557     } else if (NumOps == 3) {
5558       N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
5559                                             DL.getDebugLoc(), VTList, Ops[0],
5560                                             Ops[1], Ops[2]);
5561     } else {
5562       N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
5563                                      VTList, Ops);
5564     }
5565     CSEMap.InsertNode(N, IP);
5566   } else {
5567     if (NumOps == 1) {
5568       N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
5569                                           DL.getDebugLoc(), VTList, Ops[0]);
5570     } else if (NumOps == 2) {
5571       N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
5572                                            DL.getDebugLoc(), VTList, Ops[0],
5573                                            Ops[1]);
5574     } else if (NumOps == 3) {
5575       N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
5576                                             DL.getDebugLoc(), VTList, Ops[0],
5577                                             Ops[1], Ops[2]);
5578     } else {
5579       N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
5580                                      VTList, Ops);
5581     }
5582   }
5583   InsertNode(N);
5584   return SDValue(N, 0);
5585 }
5586 
getNode(unsigned Opcode,SDLoc DL,SDVTList VTList)5587 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList) {
5588   return getNode(Opcode, DL, VTList, None);
5589 }
5590 
getNode(unsigned Opcode,SDLoc DL,SDVTList VTList,SDValue N1)5591 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
5592                               SDValue N1) {
5593   SDValue Ops[] = { N1 };
5594   return getNode(Opcode, DL, VTList, Ops);
5595 }
5596 
getNode(unsigned Opcode,SDLoc DL,SDVTList VTList,SDValue N1,SDValue N2)5597 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
5598                               SDValue N1, SDValue N2) {
5599   SDValue Ops[] = { N1, N2 };
5600   return getNode(Opcode, DL, VTList, Ops);
5601 }
5602 
getNode(unsigned Opcode,SDLoc DL,SDVTList VTList,SDValue N1,SDValue N2,SDValue N3)5603 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
5604                               SDValue N1, SDValue N2, SDValue N3) {
5605   SDValue Ops[] = { N1, N2, N3 };
5606   return getNode(Opcode, DL, VTList, Ops);
5607 }
5608 
getNode(unsigned Opcode,SDLoc DL,SDVTList VTList,SDValue N1,SDValue N2,SDValue N3,SDValue N4)5609 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
5610                               SDValue N1, SDValue N2, SDValue N3,
5611                               SDValue N4) {
5612   SDValue Ops[] = { N1, N2, N3, N4 };
5613   return getNode(Opcode, DL, VTList, Ops);
5614 }
5615 
getNode(unsigned Opcode,SDLoc DL,SDVTList VTList,SDValue N1,SDValue N2,SDValue N3,SDValue N4,SDValue N5)5616 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
5617                               SDValue N1, SDValue N2, SDValue N3,
5618                               SDValue N4, SDValue N5) {
5619   SDValue Ops[] = { N1, N2, N3, N4, N5 };
5620   return getNode(Opcode, DL, VTList, Ops);
5621 }
5622 
getVTList(EVT VT)5623 SDVTList SelectionDAG::getVTList(EVT VT) {
5624   return makeVTList(SDNode::getValueTypeList(VT), 1);
5625 }
5626 
getVTList(EVT VT1,EVT VT2)5627 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) {
5628   FoldingSetNodeID ID;
5629   ID.AddInteger(2U);
5630   ID.AddInteger(VT1.getRawBits());
5631   ID.AddInteger(VT2.getRawBits());
5632 
5633   void *IP = nullptr;
5634   SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5635   if (!Result) {
5636     EVT *Array = Allocator.Allocate<EVT>(2);
5637     Array[0] = VT1;
5638     Array[1] = VT2;
5639     Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2);
5640     VTListMap.InsertNode(Result, IP);
5641   }
5642   return Result->getSDVTList();
5643 }
5644 
getVTList(EVT VT1,EVT VT2,EVT VT3)5645 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) {
5646   FoldingSetNodeID ID;
5647   ID.AddInteger(3U);
5648   ID.AddInteger(VT1.getRawBits());
5649   ID.AddInteger(VT2.getRawBits());
5650   ID.AddInteger(VT3.getRawBits());
5651 
5652   void *IP = nullptr;
5653   SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5654   if (!Result) {
5655     EVT *Array = Allocator.Allocate<EVT>(3);
5656     Array[0] = VT1;
5657     Array[1] = VT2;
5658     Array[2] = VT3;
5659     Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3);
5660     VTListMap.InsertNode(Result, IP);
5661   }
5662   return Result->getSDVTList();
5663 }
5664 
getVTList(EVT VT1,EVT VT2,EVT VT3,EVT VT4)5665 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
5666   FoldingSetNodeID ID;
5667   ID.AddInteger(4U);
5668   ID.AddInteger(VT1.getRawBits());
5669   ID.AddInteger(VT2.getRawBits());
5670   ID.AddInteger(VT3.getRawBits());
5671   ID.AddInteger(VT4.getRawBits());
5672 
5673   void *IP = nullptr;
5674   SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5675   if (!Result) {
5676     EVT *Array = Allocator.Allocate<EVT>(4);
5677     Array[0] = VT1;
5678     Array[1] = VT2;
5679     Array[2] = VT3;
5680     Array[3] = VT4;
5681     Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4);
5682     VTListMap.InsertNode(Result, IP);
5683   }
5684   return Result->getSDVTList();
5685 }
5686 
getVTList(ArrayRef<EVT> VTs)5687 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) {
5688   unsigned NumVTs = VTs.size();
5689   FoldingSetNodeID ID;
5690   ID.AddInteger(NumVTs);
5691   for (unsigned index = 0; index < NumVTs; index++) {
5692     ID.AddInteger(VTs[index].getRawBits());
5693   }
5694 
5695   void *IP = nullptr;
5696   SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5697   if (!Result) {
5698     EVT *Array = Allocator.Allocate<EVT>(NumVTs);
5699     std::copy(VTs.begin(), VTs.end(), Array);
5700     Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs);
5701     VTListMap.InsertNode(Result, IP);
5702   }
5703   return Result->getSDVTList();
5704 }
5705 
5706 
5707 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the
5708 /// specified operands.  If the resultant node already exists in the DAG,
5709 /// this does not modify the specified node, instead it returns the node that
5710 /// already exists.  If the resultant node does not exist in the DAG, the
5711 /// input node is returned.  As a degenerate case, if you specify the same
5712 /// input operands as the node already has, the input node is returned.
UpdateNodeOperands(SDNode * N,SDValue Op)5713 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
5714   assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
5715 
5716   // Check to see if there is no change.
5717   if (Op == N->getOperand(0)) return N;
5718 
5719   // See if the modified node already exists.
5720   void *InsertPos = nullptr;
5721   if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
5722     return Existing;
5723 
5724   // Nope it doesn't.  Remove the node from its current place in the maps.
5725   if (InsertPos)
5726     if (!RemoveNodeFromCSEMaps(N))
5727       InsertPos = nullptr;
5728 
5729   // Now we update the operands.
5730   N->OperandList[0].set(Op);
5731 
5732   // If this gets put into a CSE map, add it.
5733   if (InsertPos) CSEMap.InsertNode(N, InsertPos);
5734   return N;
5735 }
5736 
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2)5737 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
5738   assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
5739 
5740   // Check to see if there is no change.
5741   if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
5742     return N;   // No operands changed, just return the input node.
5743 
5744   // See if the modified node already exists.
5745   void *InsertPos = nullptr;
5746   if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
5747     return Existing;
5748 
5749   // Nope it doesn't.  Remove the node from its current place in the maps.
5750   if (InsertPos)
5751     if (!RemoveNodeFromCSEMaps(N))
5752       InsertPos = nullptr;
5753 
5754   // Now we update the operands.
5755   if (N->OperandList[0] != Op1)
5756     N->OperandList[0].set(Op1);
5757   if (N->OperandList[1] != Op2)
5758     N->OperandList[1].set(Op2);
5759 
5760   // If this gets put into a CSE map, add it.
5761   if (InsertPos) CSEMap.InsertNode(N, InsertPos);
5762   return N;
5763 }
5764 
5765 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2,SDValue Op3)5766 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
5767   SDValue Ops[] = { Op1, Op2, Op3 };
5768   return UpdateNodeOperands(N, Ops);
5769 }
5770 
5771 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2,SDValue Op3,SDValue Op4)5772 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
5773                    SDValue Op3, SDValue Op4) {
5774   SDValue Ops[] = { Op1, Op2, Op3, Op4 };
5775   return UpdateNodeOperands(N, Ops);
5776 }
5777 
5778 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2,SDValue Op3,SDValue Op4,SDValue Op5)5779 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
5780                    SDValue Op3, SDValue Op4, SDValue Op5) {
5781   SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
5782   return UpdateNodeOperands(N, Ops);
5783 }
5784 
5785 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,ArrayRef<SDValue> Ops)5786 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) {
5787   unsigned NumOps = Ops.size();
5788   assert(N->getNumOperands() == NumOps &&
5789          "Update with wrong number of operands");
5790 
5791   // If no operands changed just return the input node.
5792   if (std::equal(Ops.begin(), Ops.end(), N->op_begin()))
5793     return N;
5794 
5795   // See if the modified node already exists.
5796   void *InsertPos = nullptr;
5797   if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos))
5798     return Existing;
5799 
5800   // Nope it doesn't.  Remove the node from its current place in the maps.
5801   if (InsertPos)
5802     if (!RemoveNodeFromCSEMaps(N))
5803       InsertPos = nullptr;
5804 
5805   // Now we update the operands.
5806   for (unsigned i = 0; i != NumOps; ++i)
5807     if (N->OperandList[i] != Ops[i])
5808       N->OperandList[i].set(Ops[i]);
5809 
5810   // If this gets put into a CSE map, add it.
5811   if (InsertPos) CSEMap.InsertNode(N, InsertPos);
5812   return N;
5813 }
5814 
5815 /// DropOperands - Release the operands and set this node to have
5816 /// zero operands.
DropOperands()5817 void SDNode::DropOperands() {
5818   // Unlike the code in MorphNodeTo that does this, we don't need to
5819   // watch for dead nodes here.
5820   for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
5821     SDUse &Use = *I++;
5822     Use.set(SDValue());
5823   }
5824 }
5825 
5826 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
5827 /// machine opcode.
5828 ///
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT)5829 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5830                                    EVT VT) {
5831   SDVTList VTs = getVTList(VT);
5832   return SelectNodeTo(N, MachineOpc, VTs, None);
5833 }
5834 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,SDValue Op1)5835 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5836                                    EVT VT, SDValue Op1) {
5837   SDVTList VTs = getVTList(VT);
5838   SDValue Ops[] = { Op1 };
5839   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5840 }
5841 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,SDValue Op1,SDValue Op2)5842 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5843                                    EVT VT, SDValue Op1,
5844                                    SDValue Op2) {
5845   SDVTList VTs = getVTList(VT);
5846   SDValue Ops[] = { Op1, Op2 };
5847   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5848 }
5849 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,SDValue Op1,SDValue Op2,SDValue Op3)5850 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5851                                    EVT VT, SDValue Op1,
5852                                    SDValue Op2, SDValue Op3) {
5853   SDVTList VTs = getVTList(VT);
5854   SDValue Ops[] = { Op1, Op2, Op3 };
5855   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5856 }
5857 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,ArrayRef<SDValue> Ops)5858 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5859                                    EVT VT, ArrayRef<SDValue> Ops) {
5860   SDVTList VTs = getVTList(VT);
5861   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5862 }
5863 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,ArrayRef<SDValue> Ops)5864 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5865                                    EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) {
5866   SDVTList VTs = getVTList(VT1, VT2);
5867   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5868 }
5869 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2)5870 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5871                                    EVT VT1, EVT VT2) {
5872   SDVTList VTs = getVTList(VT1, VT2);
5873   return SelectNodeTo(N, MachineOpc, VTs, None);
5874 }
5875 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,EVT VT3,ArrayRef<SDValue> Ops)5876 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5877                                    EVT VT1, EVT VT2, EVT VT3,
5878                                    ArrayRef<SDValue> Ops) {
5879   SDVTList VTs = getVTList(VT1, VT2, VT3);
5880   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5881 }
5882 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,EVT VT3,EVT VT4,ArrayRef<SDValue> Ops)5883 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5884                                    EVT VT1, EVT VT2, EVT VT3, EVT VT4,
5885                                    ArrayRef<SDValue> Ops) {
5886   SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
5887   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5888 }
5889 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,SDValue Op1)5890 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5891                                    EVT VT1, EVT VT2,
5892                                    SDValue Op1) {
5893   SDVTList VTs = getVTList(VT1, VT2);
5894   SDValue Ops[] = { Op1 };
5895   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5896 }
5897 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2)5898 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5899                                    EVT VT1, EVT VT2,
5900                                    SDValue Op1, SDValue Op2) {
5901   SDVTList VTs = getVTList(VT1, VT2);
5902   SDValue Ops[] = { Op1, Op2 };
5903   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5904 }
5905 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2,SDValue Op3)5906 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5907                                    EVT VT1, EVT VT2,
5908                                    SDValue Op1, SDValue Op2,
5909                                    SDValue Op3) {
5910   SDVTList VTs = getVTList(VT1, VT2);
5911   SDValue Ops[] = { Op1, Op2, Op3 };
5912   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5913 }
5914 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,EVT VT3,SDValue Op1,SDValue Op2,SDValue Op3)5915 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5916                                    EVT VT1, EVT VT2, EVT VT3,
5917                                    SDValue Op1, SDValue Op2,
5918                                    SDValue Op3) {
5919   SDVTList VTs = getVTList(VT1, VT2, VT3);
5920   SDValue Ops[] = { Op1, Op2, Op3 };
5921   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5922 }
5923 
SelectNodeTo(SDNode * N,unsigned MachineOpc,SDVTList VTs,ArrayRef<SDValue> Ops)5924 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5925                                    SDVTList VTs,ArrayRef<SDValue> Ops) {
5926   N = MorphNodeTo(N, ~MachineOpc, VTs, Ops);
5927   // Reset the NodeID to -1.
5928   N->setNodeId(-1);
5929   return N;
5930 }
5931 
5932 /// UpdadeSDLocOnMergedSDNode - If the opt level is -O0 then it throws away
5933 /// the line number information on the merged node since it is not possible to
5934 /// preserve the information that operation is associated with multiple lines.
5935 /// This will make the debugger working better at -O0, were there is a higher
5936 /// probability having other instructions associated with that line.
5937 ///
5938 /// For IROrder, we keep the smaller of the two
UpdadeSDLocOnMergedSDNode(SDNode * N,SDLoc OLoc)5939 SDNode *SelectionDAG::UpdadeSDLocOnMergedSDNode(SDNode *N, SDLoc OLoc) {
5940   DebugLoc NLoc = N->getDebugLoc();
5941   if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) {
5942     N->setDebugLoc(DebugLoc());
5943   }
5944   unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder());
5945   N->setIROrder(Order);
5946   return N;
5947 }
5948 
5949 /// MorphNodeTo - This *mutates* the specified node to have the specified
5950 /// return type, opcode, and operands.
5951 ///
5952 /// Note that MorphNodeTo returns the resultant node.  If there is already a
5953 /// node of the specified opcode and operands, it returns that node instead of
5954 /// the current one.  Note that the SDLoc need not be the same.
5955 ///
5956 /// Using MorphNodeTo is faster than creating a new node and swapping it in
5957 /// with ReplaceAllUsesWith both because it often avoids allocating a new
5958 /// node, and because it doesn't require CSE recalculation for any of
5959 /// the node's users.
5960 ///
5961 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG.
5962 /// As a consequence it isn't appropriate to use from within the DAG combiner or
5963 /// the legalizer which maintain worklists that would need to be updated when
5964 /// deleting things.
MorphNodeTo(SDNode * N,unsigned Opc,SDVTList VTs,ArrayRef<SDValue> Ops)5965 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
5966                                   SDVTList VTs, ArrayRef<SDValue> Ops) {
5967   unsigned NumOps = Ops.size();
5968   // If an identical node already exists, use it.
5969   void *IP = nullptr;
5970   if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
5971     FoldingSetNodeID ID;
5972     AddNodeIDNode(ID, Opc, VTs, Ops);
5973     if (SDNode *ON = FindNodeOrInsertPos(ID, N->getDebugLoc(), IP))
5974       return UpdadeSDLocOnMergedSDNode(ON, SDLoc(N));
5975   }
5976 
5977   if (!RemoveNodeFromCSEMaps(N))
5978     IP = nullptr;
5979 
5980   // Start the morphing.
5981   N->NodeType = Opc;
5982   N->ValueList = VTs.VTs;
5983   N->NumValues = VTs.NumVTs;
5984 
5985   // Clear the operands list, updating used nodes to remove this from their
5986   // use list.  Keep track of any operands that become dead as a result.
5987   SmallPtrSet<SDNode*, 16> DeadNodeSet;
5988   for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
5989     SDUse &Use = *I++;
5990     SDNode *Used = Use.getNode();
5991     Use.set(SDValue());
5992     if (Used->use_empty())
5993       DeadNodeSet.insert(Used);
5994   }
5995 
5996   if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) {
5997     // Initialize the memory references information.
5998     MN->setMemRefs(nullptr, nullptr);
5999     // If NumOps is larger than the # of operands we can have in a
6000     // MachineSDNode, reallocate the operand list.
6001     if (NumOps > MN->NumOperands || !MN->OperandsNeedDelete) {
6002       if (MN->OperandsNeedDelete)
6003         delete[] MN->OperandList;
6004       if (NumOps > array_lengthof(MN->LocalOperands))
6005         // We're creating a final node that will live unmorphed for the
6006         // remainder of the current SelectionDAG iteration, so we can allocate
6007         // the operands directly out of a pool with no recycling metadata.
6008         MN->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
6009                          Ops.data(), NumOps);
6010       else
6011         MN->InitOperands(MN->LocalOperands, Ops.data(), NumOps);
6012       MN->OperandsNeedDelete = false;
6013     } else
6014       MN->InitOperands(MN->OperandList, Ops.data(), NumOps);
6015   } else {
6016     // If NumOps is larger than the # of operands we currently have, reallocate
6017     // the operand list.
6018     if (NumOps > N->NumOperands) {
6019       if (N->OperandsNeedDelete)
6020         delete[] N->OperandList;
6021       N->InitOperands(new SDUse[NumOps], Ops.data(), NumOps);
6022       N->OperandsNeedDelete = true;
6023     } else
6024       N->InitOperands(N->OperandList, Ops.data(), NumOps);
6025   }
6026 
6027   // Delete any nodes that are still dead after adding the uses for the
6028   // new operands.
6029   if (!DeadNodeSet.empty()) {
6030     SmallVector<SDNode *, 16> DeadNodes;
6031     for (SDNode *N : DeadNodeSet)
6032       if (N->use_empty())
6033         DeadNodes.push_back(N);
6034     RemoveDeadNodes(DeadNodes);
6035   }
6036 
6037   if (IP)
6038     CSEMap.InsertNode(N, IP);   // Memoize the new node.
6039   return N;
6040 }
6041 
6042 
6043 /// getMachineNode - These are used for target selectors to create a new node
6044 /// with specified return type(s), MachineInstr opcode, and operands.
6045 ///
6046 /// Note that getMachineNode returns the resultant node.  If there is already a
6047 /// node of the specified opcode and operands, it returns that node instead of
6048 /// the current one.
6049 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT)6050 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT) {
6051   SDVTList VTs = getVTList(VT);
6052   return getMachineNode(Opcode, dl, VTs, None);
6053 }
6054 
6055 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT,SDValue Op1)6056 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT, SDValue Op1) {
6057   SDVTList VTs = getVTList(VT);
6058   SDValue Ops[] = { Op1 };
6059   return getMachineNode(Opcode, dl, VTs, Ops);
6060 }
6061 
6062 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT,SDValue Op1,SDValue Op2)6063 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
6064                              SDValue Op1, SDValue Op2) {
6065   SDVTList VTs = getVTList(VT);
6066   SDValue Ops[] = { Op1, Op2 };
6067   return getMachineNode(Opcode, dl, VTs, Ops);
6068 }
6069 
6070 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT,SDValue Op1,SDValue Op2,SDValue Op3)6071 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
6072                              SDValue Op1, SDValue Op2, SDValue Op3) {
6073   SDVTList VTs = getVTList(VT);
6074   SDValue Ops[] = { Op1, Op2, Op3 };
6075   return getMachineNode(Opcode, dl, VTs, Ops);
6076 }
6077 
6078 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT,ArrayRef<SDValue> Ops)6079 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
6080                              ArrayRef<SDValue> Ops) {
6081   SDVTList VTs = getVTList(VT);
6082   return getMachineNode(Opcode, dl, VTs, Ops);
6083 }
6084 
6085 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT1,EVT VT2)6086 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2) {
6087   SDVTList VTs = getVTList(VT1, VT2);
6088   return getMachineNode(Opcode, dl, VTs, None);
6089 }
6090 
6091 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT1,EVT VT2,SDValue Op1)6092 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
6093                              EVT VT1, EVT VT2, SDValue Op1) {
6094   SDVTList VTs = getVTList(VT1, VT2);
6095   SDValue Ops[] = { Op1 };
6096   return getMachineNode(Opcode, dl, VTs, Ops);
6097 }
6098 
6099 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2)6100 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
6101                              EVT VT1, EVT VT2, SDValue Op1, SDValue Op2) {
6102   SDVTList VTs = getVTList(VT1, VT2);
6103   SDValue Ops[] = { Op1, Op2 };
6104   return getMachineNode(Opcode, dl, VTs, Ops);
6105 }
6106 
6107 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2,SDValue Op3)6108 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
6109                              EVT VT1, EVT VT2, SDValue Op1,
6110                              SDValue Op2, SDValue Op3) {
6111   SDVTList VTs = getVTList(VT1, VT2);
6112   SDValue Ops[] = { Op1, Op2, Op3 };
6113   return getMachineNode(Opcode, dl, VTs, Ops);
6114 }
6115 
6116 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT1,EVT VT2,ArrayRef<SDValue> Ops)6117 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
6118                              EVT VT1, EVT VT2,
6119                              ArrayRef<SDValue> Ops) {
6120   SDVTList VTs = getVTList(VT1, VT2);
6121   return getMachineNode(Opcode, dl, VTs, Ops);
6122 }
6123 
6124 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT1,EVT VT2,EVT VT3,SDValue Op1,SDValue Op2)6125 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
6126                              EVT VT1, EVT VT2, EVT VT3,
6127                              SDValue Op1, SDValue Op2) {
6128   SDVTList VTs = getVTList(VT1, VT2, VT3);
6129   SDValue Ops[] = { Op1, Op2 };
6130   return getMachineNode(Opcode, dl, VTs, Ops);
6131 }
6132 
6133 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT1,EVT VT2,EVT VT3,SDValue Op1,SDValue Op2,SDValue Op3)6134 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
6135                              EVT VT1, EVT VT2, EVT VT3,
6136                              SDValue Op1, SDValue Op2, SDValue Op3) {
6137   SDVTList VTs = getVTList(VT1, VT2, VT3);
6138   SDValue Ops[] = { Op1, Op2, Op3 };
6139   return getMachineNode(Opcode, dl, VTs, Ops);
6140 }
6141 
6142 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT1,EVT VT2,EVT VT3,ArrayRef<SDValue> Ops)6143 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
6144                              EVT VT1, EVT VT2, EVT VT3,
6145                              ArrayRef<SDValue> Ops) {
6146   SDVTList VTs = getVTList(VT1, VT2, VT3);
6147   return getMachineNode(Opcode, dl, VTs, Ops);
6148 }
6149 
6150 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT1,EVT VT2,EVT VT3,EVT VT4,ArrayRef<SDValue> Ops)6151 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1,
6152                              EVT VT2, EVT VT3, EVT VT4,
6153                              ArrayRef<SDValue> Ops) {
6154   SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
6155   return getMachineNode(Opcode, dl, VTs, Ops);
6156 }
6157 
6158 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,ArrayRef<EVT> ResultTys,ArrayRef<SDValue> Ops)6159 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
6160                              ArrayRef<EVT> ResultTys,
6161                              ArrayRef<SDValue> Ops) {
6162   SDVTList VTs = getVTList(ResultTys);
6163   return getMachineNode(Opcode, dl, VTs, Ops);
6164 }
6165 
6166 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc DL,SDVTList VTs,ArrayRef<SDValue> OpsArray)6167 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc DL, SDVTList VTs,
6168                              ArrayRef<SDValue> OpsArray) {
6169   bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
6170   MachineSDNode *N;
6171   void *IP = nullptr;
6172   const SDValue *Ops = OpsArray.data();
6173   unsigned NumOps = OpsArray.size();
6174 
6175   if (DoCSE) {
6176     FoldingSetNodeID ID;
6177     AddNodeIDNode(ID, ~Opcode, VTs, OpsArray);
6178     IP = nullptr;
6179     if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP)) {
6180       return cast<MachineSDNode>(UpdadeSDLocOnMergedSDNode(E, DL));
6181     }
6182   }
6183 
6184   // Allocate a new MachineSDNode.
6185   N = new (NodeAllocator) MachineSDNode(~Opcode, DL.getIROrder(),
6186                                         DL.getDebugLoc(), VTs);
6187 
6188   // Initialize the operands list.
6189   if (NumOps > array_lengthof(N->LocalOperands))
6190     // We're creating a final node that will live unmorphed for the
6191     // remainder of the current SelectionDAG iteration, so we can allocate
6192     // the operands directly out of a pool with no recycling metadata.
6193     N->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
6194                     Ops, NumOps);
6195   else
6196     N->InitOperands(N->LocalOperands, Ops, NumOps);
6197   N->OperandsNeedDelete = false;
6198 
6199   if (DoCSE)
6200     CSEMap.InsertNode(N, IP);
6201 
6202   InsertNode(N);
6203   return N;
6204 }
6205 
6206 /// getTargetExtractSubreg - A convenience function for creating
6207 /// TargetOpcode::EXTRACT_SUBREG nodes.
6208 SDValue
getTargetExtractSubreg(int SRIdx,SDLoc DL,EVT VT,SDValue Operand)6209 SelectionDAG::getTargetExtractSubreg(int SRIdx, SDLoc DL, EVT VT,
6210                                      SDValue Operand) {
6211   SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
6212   SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
6213                                   VT, Operand, SRIdxVal);
6214   return SDValue(Subreg, 0);
6215 }
6216 
6217 /// getTargetInsertSubreg - A convenience function for creating
6218 /// TargetOpcode::INSERT_SUBREG nodes.
6219 SDValue
getTargetInsertSubreg(int SRIdx,SDLoc DL,EVT VT,SDValue Operand,SDValue Subreg)6220 SelectionDAG::getTargetInsertSubreg(int SRIdx, SDLoc DL, EVT VT,
6221                                     SDValue Operand, SDValue Subreg) {
6222   SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
6223   SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
6224                                   VT, Operand, Subreg, SRIdxVal);
6225   return SDValue(Result, 0);
6226 }
6227 
6228 /// getNodeIfExists - Get the specified node if it's already available, or
6229 /// else return NULL.
getNodeIfExists(unsigned Opcode,SDVTList VTList,ArrayRef<SDValue> Ops,const SDNodeFlags * Flags)6230 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
6231                                       ArrayRef<SDValue> Ops,
6232                                       const SDNodeFlags *Flags) {
6233   if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
6234     FoldingSetNodeID ID;
6235     AddNodeIDNode(ID, Opcode, VTList, Ops);
6236     AddNodeIDFlags(ID, Opcode, Flags);
6237     void *IP = nullptr;
6238     if (SDNode *E = FindNodeOrInsertPos(ID, DebugLoc(), IP))
6239       return E;
6240   }
6241   return nullptr;
6242 }
6243 
6244 /// getDbgValue - Creates a SDDbgValue node.
6245 ///
6246 /// SDNode
getDbgValue(MDNode * Var,MDNode * Expr,SDNode * N,unsigned R,bool IsIndirect,uint64_t Off,DebugLoc DL,unsigned O)6247 SDDbgValue *SelectionDAG::getDbgValue(MDNode *Var, MDNode *Expr, SDNode *N,
6248                                       unsigned R, bool IsIndirect, uint64_t Off,
6249                                       DebugLoc DL, unsigned O) {
6250   assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
6251          "Expected inlined-at fields to agree");
6252   return new (DbgInfo->getAlloc())
6253       SDDbgValue(Var, Expr, N, R, IsIndirect, Off, DL, O);
6254 }
6255 
6256 /// Constant
getConstantDbgValue(MDNode * Var,MDNode * Expr,const Value * C,uint64_t Off,DebugLoc DL,unsigned O)6257 SDDbgValue *SelectionDAG::getConstantDbgValue(MDNode *Var, MDNode *Expr,
6258                                               const Value *C, uint64_t Off,
6259                                               DebugLoc DL, unsigned O) {
6260   assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
6261          "Expected inlined-at fields to agree");
6262   return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, C, Off, DL, O);
6263 }
6264 
6265 /// FrameIndex
getFrameIndexDbgValue(MDNode * Var,MDNode * Expr,unsigned FI,uint64_t Off,DebugLoc DL,unsigned O)6266 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(MDNode *Var, MDNode *Expr,
6267                                                 unsigned FI, uint64_t Off,
6268                                                 DebugLoc DL, unsigned O) {
6269   assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
6270          "Expected inlined-at fields to agree");
6271   return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, FI, Off, DL, O);
6272 }
6273 
6274 namespace {
6275 
6276 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
6277 /// pointed to by a use iterator is deleted, increment the use iterator
6278 /// so that it doesn't dangle.
6279 ///
6280 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
6281   SDNode::use_iterator &UI;
6282   SDNode::use_iterator &UE;
6283 
NodeDeleted(SDNode * N,SDNode * E)6284   void NodeDeleted(SDNode *N, SDNode *E) override {
6285     // Increment the iterator as needed.
6286     while (UI != UE && N == *UI)
6287       ++UI;
6288   }
6289 
6290 public:
RAUWUpdateListener(SelectionDAG & d,SDNode::use_iterator & ui,SDNode::use_iterator & ue)6291   RAUWUpdateListener(SelectionDAG &d,
6292                      SDNode::use_iterator &ui,
6293                      SDNode::use_iterator &ue)
6294     : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
6295 };
6296 
6297 }
6298 
6299 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
6300 /// This can cause recursive merging of nodes in the DAG.
6301 ///
6302 /// This version assumes From has a single result value.
6303 ///
ReplaceAllUsesWith(SDValue FromN,SDValue To)6304 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) {
6305   SDNode *From = FromN.getNode();
6306   assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
6307          "Cannot replace with this method!");
6308   assert(From != To.getNode() && "Cannot replace uses of with self");
6309 
6310   // Iterate over all the existing uses of From. New uses will be added
6311   // to the beginning of the use list, which we avoid visiting.
6312   // This specifically avoids visiting uses of From that arise while the
6313   // replacement is happening, because any such uses would be the result
6314   // of CSE: If an existing node looks like From after one of its operands
6315   // is replaced by To, we don't want to replace of all its users with To
6316   // too. See PR3018 for more info.
6317   SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
6318   RAUWUpdateListener Listener(*this, UI, UE);
6319   while (UI != UE) {
6320     SDNode *User = *UI;
6321 
6322     // This node is about to morph, remove its old self from the CSE maps.
6323     RemoveNodeFromCSEMaps(User);
6324 
6325     // A user can appear in a use list multiple times, and when this
6326     // happens the uses are usually next to each other in the list.
6327     // To help reduce the number of CSE recomputations, process all
6328     // the uses of this user that we can find this way.
6329     do {
6330       SDUse &Use = UI.getUse();
6331       ++UI;
6332       Use.set(To);
6333     } while (UI != UE && *UI == User);
6334 
6335     // Now that we have modified User, add it back to the CSE maps.  If it
6336     // already exists there, recursively merge the results together.
6337     AddModifiedNodeToCSEMaps(User);
6338   }
6339 
6340   // If we just RAUW'd the root, take note.
6341   if (FromN == getRoot())
6342     setRoot(To);
6343 }
6344 
6345 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
6346 /// This can cause recursive merging of nodes in the DAG.
6347 ///
6348 /// This version assumes that for each value of From, there is a
6349 /// corresponding value in To in the same position with the same type.
6350 ///
ReplaceAllUsesWith(SDNode * From,SDNode * To)6351 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) {
6352 #ifndef NDEBUG
6353   for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
6354     assert((!From->hasAnyUseOfValue(i) ||
6355             From->getValueType(i) == To->getValueType(i)) &&
6356            "Cannot use this version of ReplaceAllUsesWith!");
6357 #endif
6358 
6359   // Handle the trivial case.
6360   if (From == To)
6361     return;
6362 
6363   // Iterate over just the existing users of From. See the comments in
6364   // the ReplaceAllUsesWith above.
6365   SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
6366   RAUWUpdateListener Listener(*this, UI, UE);
6367   while (UI != UE) {
6368     SDNode *User = *UI;
6369 
6370     // This node is about to morph, remove its old self from the CSE maps.
6371     RemoveNodeFromCSEMaps(User);
6372 
6373     // A user can appear in a use list multiple times, and when this
6374     // happens the uses are usually next to each other in the list.
6375     // To help reduce the number of CSE recomputations, process all
6376     // the uses of this user that we can find this way.
6377     do {
6378       SDUse &Use = UI.getUse();
6379       ++UI;
6380       Use.setNode(To);
6381     } while (UI != UE && *UI == User);
6382 
6383     // Now that we have modified User, add it back to the CSE maps.  If it
6384     // already exists there, recursively merge the results together.
6385     AddModifiedNodeToCSEMaps(User);
6386   }
6387 
6388   // If we just RAUW'd the root, take note.
6389   if (From == getRoot().getNode())
6390     setRoot(SDValue(To, getRoot().getResNo()));
6391 }
6392 
6393 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
6394 /// This can cause recursive merging of nodes in the DAG.
6395 ///
6396 /// This version can replace From with any result values.  To must match the
6397 /// number and types of values returned by From.
ReplaceAllUsesWith(SDNode * From,const SDValue * To)6398 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) {
6399   if (From->getNumValues() == 1)  // Handle the simple case efficiently.
6400     return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
6401 
6402   // Iterate over just the existing users of From. See the comments in
6403   // the ReplaceAllUsesWith above.
6404   SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
6405   RAUWUpdateListener Listener(*this, UI, UE);
6406   while (UI != UE) {
6407     SDNode *User = *UI;
6408 
6409     // This node is about to morph, remove its old self from the CSE maps.
6410     RemoveNodeFromCSEMaps(User);
6411 
6412     // A user can appear in a use list multiple times, and when this
6413     // happens the uses are usually next to each other in the list.
6414     // To help reduce the number of CSE recomputations, process all
6415     // the uses of this user that we can find this way.
6416     do {
6417       SDUse &Use = UI.getUse();
6418       const SDValue &ToOp = To[Use.getResNo()];
6419       ++UI;
6420       Use.set(ToOp);
6421     } while (UI != UE && *UI == User);
6422 
6423     // Now that we have modified User, add it back to the CSE maps.  If it
6424     // already exists there, recursively merge the results together.
6425     AddModifiedNodeToCSEMaps(User);
6426   }
6427 
6428   // If we just RAUW'd the root, take note.
6429   if (From == getRoot().getNode())
6430     setRoot(SDValue(To[getRoot().getResNo()]));
6431 }
6432 
6433 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
6434 /// uses of other values produced by From.getNode() alone.  The Deleted
6435 /// vector is handled the same way as for ReplaceAllUsesWith.
ReplaceAllUsesOfValueWith(SDValue From,SDValue To)6436 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){
6437   // Handle the really simple, really trivial case efficiently.
6438   if (From == To) return;
6439 
6440   // Handle the simple, trivial, case efficiently.
6441   if (From.getNode()->getNumValues() == 1) {
6442     ReplaceAllUsesWith(From, To);
6443     return;
6444   }
6445 
6446   // Iterate over just the existing users of From. See the comments in
6447   // the ReplaceAllUsesWith above.
6448   SDNode::use_iterator UI = From.getNode()->use_begin(),
6449                        UE = From.getNode()->use_end();
6450   RAUWUpdateListener Listener(*this, UI, UE);
6451   while (UI != UE) {
6452     SDNode *User = *UI;
6453     bool UserRemovedFromCSEMaps = false;
6454 
6455     // A user can appear in a use list multiple times, and when this
6456     // happens the uses are usually next to each other in the list.
6457     // To help reduce the number of CSE recomputations, process all
6458     // the uses of this user that we can find this way.
6459     do {
6460       SDUse &Use = UI.getUse();
6461 
6462       // Skip uses of different values from the same node.
6463       if (Use.getResNo() != From.getResNo()) {
6464         ++UI;
6465         continue;
6466       }
6467 
6468       // If this node hasn't been modified yet, it's still in the CSE maps,
6469       // so remove its old self from the CSE maps.
6470       if (!UserRemovedFromCSEMaps) {
6471         RemoveNodeFromCSEMaps(User);
6472         UserRemovedFromCSEMaps = true;
6473       }
6474 
6475       ++UI;
6476       Use.set(To);
6477     } while (UI != UE && *UI == User);
6478 
6479     // We are iterating over all uses of the From node, so if a use
6480     // doesn't use the specific value, no changes are made.
6481     if (!UserRemovedFromCSEMaps)
6482       continue;
6483 
6484     // Now that we have modified User, add it back to the CSE maps.  If it
6485     // already exists there, recursively merge the results together.
6486     AddModifiedNodeToCSEMaps(User);
6487   }
6488 
6489   // If we just RAUW'd the root, take note.
6490   if (From == getRoot())
6491     setRoot(To);
6492 }
6493 
6494 namespace {
6495   /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
6496   /// to record information about a use.
6497   struct UseMemo {
6498     SDNode *User;
6499     unsigned Index;
6500     SDUse *Use;
6501   };
6502 
6503   /// operator< - Sort Memos by User.
operator <(const UseMemo & L,const UseMemo & R)6504   bool operator<(const UseMemo &L, const UseMemo &R) {
6505     return (intptr_t)L.User < (intptr_t)R.User;
6506   }
6507 }
6508 
6509 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
6510 /// uses of other values produced by From.getNode() alone.  The same value
6511 /// may appear in both the From and To list.  The Deleted vector is
6512 /// handled the same way as for ReplaceAllUsesWith.
ReplaceAllUsesOfValuesWith(const SDValue * From,const SDValue * To,unsigned Num)6513 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
6514                                               const SDValue *To,
6515                                               unsigned Num){
6516   // Handle the simple, trivial case efficiently.
6517   if (Num == 1)
6518     return ReplaceAllUsesOfValueWith(*From, *To);
6519 
6520   // Read up all the uses and make records of them. This helps
6521   // processing new uses that are introduced during the
6522   // replacement process.
6523   SmallVector<UseMemo, 4> Uses;
6524   for (unsigned i = 0; i != Num; ++i) {
6525     unsigned FromResNo = From[i].getResNo();
6526     SDNode *FromNode = From[i].getNode();
6527     for (SDNode::use_iterator UI = FromNode->use_begin(),
6528          E = FromNode->use_end(); UI != E; ++UI) {
6529       SDUse &Use = UI.getUse();
6530       if (Use.getResNo() == FromResNo) {
6531         UseMemo Memo = { *UI, i, &Use };
6532         Uses.push_back(Memo);
6533       }
6534     }
6535   }
6536 
6537   // Sort the uses, so that all the uses from a given User are together.
6538   std::sort(Uses.begin(), Uses.end());
6539 
6540   for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
6541        UseIndex != UseIndexEnd; ) {
6542     // We know that this user uses some value of From.  If it is the right
6543     // value, update it.
6544     SDNode *User = Uses[UseIndex].User;
6545 
6546     // This node is about to morph, remove its old self from the CSE maps.
6547     RemoveNodeFromCSEMaps(User);
6548 
6549     // The Uses array is sorted, so all the uses for a given User
6550     // are next to each other in the list.
6551     // To help reduce the number of CSE recomputations, process all
6552     // the uses of this user that we can find this way.
6553     do {
6554       unsigned i = Uses[UseIndex].Index;
6555       SDUse &Use = *Uses[UseIndex].Use;
6556       ++UseIndex;
6557 
6558       Use.set(To[i]);
6559     } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
6560 
6561     // Now that we have modified User, add it back to the CSE maps.  If it
6562     // already exists there, recursively merge the results together.
6563     AddModifiedNodeToCSEMaps(User);
6564   }
6565 }
6566 
6567 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
6568 /// based on their topological order. It returns the maximum id and a vector
6569 /// of the SDNodes* in assigned order by reference.
AssignTopologicalOrder()6570 unsigned SelectionDAG::AssignTopologicalOrder() {
6571 
6572   unsigned DAGSize = 0;
6573 
6574   // SortedPos tracks the progress of the algorithm. Nodes before it are
6575   // sorted, nodes after it are unsorted. When the algorithm completes
6576   // it is at the end of the list.
6577   allnodes_iterator SortedPos = allnodes_begin();
6578 
6579   // Visit all the nodes. Move nodes with no operands to the front of
6580   // the list immediately. Annotate nodes that do have operands with their
6581   // operand count. Before we do this, the Node Id fields of the nodes
6582   // may contain arbitrary values. After, the Node Id fields for nodes
6583   // before SortedPos will contain the topological sort index, and the
6584   // Node Id fields for nodes At SortedPos and after will contain the
6585   // count of outstanding operands.
6586   for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) {
6587     SDNode *N = &*I++;
6588     checkForCycles(N, this);
6589     unsigned Degree = N->getNumOperands();
6590     if (Degree == 0) {
6591       // A node with no uses, add it to the result array immediately.
6592       N->setNodeId(DAGSize++);
6593       allnodes_iterator Q(N);
6594       if (Q != SortedPos)
6595         SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
6596       assert(SortedPos != AllNodes.end() && "Overran node list");
6597       ++SortedPos;
6598     } else {
6599       // Temporarily use the Node Id as scratch space for the degree count.
6600       N->setNodeId(Degree);
6601     }
6602   }
6603 
6604   // Visit all the nodes. As we iterate, move nodes into sorted order,
6605   // such that by the time the end is reached all nodes will be sorted.
6606   for (SDNode &Node : allnodes()) {
6607     SDNode *N = &Node;
6608     checkForCycles(N, this);
6609     // N is in sorted position, so all its uses have one less operand
6610     // that needs to be sorted.
6611     for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
6612          UI != UE; ++UI) {
6613       SDNode *P = *UI;
6614       unsigned Degree = P->getNodeId();
6615       assert(Degree != 0 && "Invalid node degree");
6616       --Degree;
6617       if (Degree == 0) {
6618         // All of P's operands are sorted, so P may sorted now.
6619         P->setNodeId(DAGSize++);
6620         if (P != SortedPos)
6621           SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
6622         assert(SortedPos != AllNodes.end() && "Overran node list");
6623         ++SortedPos;
6624       } else {
6625         // Update P's outstanding operand count.
6626         P->setNodeId(Degree);
6627       }
6628     }
6629     if (&Node == SortedPos) {
6630 #ifndef NDEBUG
6631       allnodes_iterator I(N);
6632       SDNode *S = &*++I;
6633       dbgs() << "Overran sorted position:\n";
6634       S->dumprFull(this); dbgs() << "\n";
6635       dbgs() << "Checking if this is due to cycles\n";
6636       checkForCycles(this, true);
6637 #endif
6638       llvm_unreachable(nullptr);
6639     }
6640   }
6641 
6642   assert(SortedPos == AllNodes.end() &&
6643          "Topological sort incomplete!");
6644   assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
6645          "First node in topological sort is not the entry token!");
6646   assert(AllNodes.front().getNodeId() == 0 &&
6647          "First node in topological sort has non-zero id!");
6648   assert(AllNodes.front().getNumOperands() == 0 &&
6649          "First node in topological sort has operands!");
6650   assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
6651          "Last node in topologic sort has unexpected id!");
6652   assert(AllNodes.back().use_empty() &&
6653          "Last node in topologic sort has users!");
6654   assert(DAGSize == allnodes_size() && "Node count mismatch!");
6655   return DAGSize;
6656 }
6657 
6658 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
6659 /// value is produced by SD.
AddDbgValue(SDDbgValue * DB,SDNode * SD,bool isParameter)6660 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) {
6661   if (SD) {
6662     assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue());
6663     SD->setHasDebugValue(true);
6664   }
6665   DbgInfo->add(DB, SD, isParameter);
6666 }
6667 
6668 /// TransferDbgValues - Transfer SDDbgValues.
TransferDbgValues(SDValue From,SDValue To)6669 void SelectionDAG::TransferDbgValues(SDValue From, SDValue To) {
6670   if (From == To || !From.getNode()->getHasDebugValue())
6671     return;
6672   SDNode *FromNode = From.getNode();
6673   SDNode *ToNode = To.getNode();
6674   ArrayRef<SDDbgValue *> DVs = GetDbgValues(FromNode);
6675   SmallVector<SDDbgValue *, 2> ClonedDVs;
6676   for (ArrayRef<SDDbgValue *>::iterator I = DVs.begin(), E = DVs.end();
6677        I != E; ++I) {
6678     SDDbgValue *Dbg = *I;
6679     if (Dbg->getKind() == SDDbgValue::SDNODE) {
6680       SDDbgValue *Clone =
6681           getDbgValue(Dbg->getVariable(), Dbg->getExpression(), ToNode,
6682                       To.getResNo(), Dbg->isIndirect(), Dbg->getOffset(),
6683                       Dbg->getDebugLoc(), Dbg->getOrder());
6684       ClonedDVs.push_back(Clone);
6685     }
6686   }
6687   for (SmallVectorImpl<SDDbgValue *>::iterator I = ClonedDVs.begin(),
6688          E = ClonedDVs.end(); I != E; ++I)
6689     AddDbgValue(*I, ToNode, false);
6690 }
6691 
6692 //===----------------------------------------------------------------------===//
6693 //                              SDNode Class
6694 //===----------------------------------------------------------------------===//
6695 
isNullConstant(SDValue V)6696 bool llvm::isNullConstant(SDValue V) {
6697   ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
6698   return Const != nullptr && Const->isNullValue();
6699 }
6700 
isNullFPConstant(SDValue V)6701 bool llvm::isNullFPConstant(SDValue V) {
6702   ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V);
6703   return Const != nullptr && Const->isZero() && !Const->isNegative();
6704 }
6705 
isAllOnesConstant(SDValue V)6706 bool llvm::isAllOnesConstant(SDValue V) {
6707   ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
6708   return Const != nullptr && Const->isAllOnesValue();
6709 }
6710 
isOneConstant(SDValue V)6711 bool llvm::isOneConstant(SDValue V) {
6712   ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
6713   return Const != nullptr && Const->isOne();
6714 }
6715 
~HandleSDNode()6716 HandleSDNode::~HandleSDNode() {
6717   DropOperands();
6718 }
6719 
GlobalAddressSDNode(unsigned Opc,unsigned Order,DebugLoc DL,const GlobalValue * GA,EVT VT,int64_t o,unsigned char TF)6720 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order,
6721                                          DebugLoc DL, const GlobalValue *GA,
6722                                          EVT VT, int64_t o, unsigned char TF)
6723   : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) {
6724   TheGlobal = GA;
6725 }
6726 
AddrSpaceCastSDNode(unsigned Order,DebugLoc dl,EVT VT,SDValue X,unsigned SrcAS,unsigned DestAS)6727 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, DebugLoc dl, EVT VT,
6728                                          SDValue X, unsigned SrcAS,
6729                                          unsigned DestAS)
6730  : UnarySDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT), X),
6731    SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {}
6732 
MemSDNode(unsigned Opc,unsigned Order,DebugLoc dl,SDVTList VTs,EVT memvt,MachineMemOperand * mmo)6733 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
6734                      EVT memvt, MachineMemOperand *mmo)
6735  : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
6736   SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
6737                                       MMO->isNonTemporal(), MMO->isInvariant());
6738   assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
6739   assert(isNonTemporal() == MMO->isNonTemporal() &&
6740          "Non-temporal encoding error!");
6741   // We check here that the size of the memory operand fits within the size of
6742   // the MMO. This is because the MMO might indicate only a possible address
6743   // range instead of specifying the affected memory addresses precisely.
6744   assert(memvt.getStoreSize() <= MMO->getSize() && "Size mismatch!");
6745 }
6746 
MemSDNode(unsigned Opc,unsigned Order,DebugLoc dl,SDVTList VTs,ArrayRef<SDValue> Ops,EVT memvt,MachineMemOperand * mmo)6747 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
6748                      ArrayRef<SDValue> Ops, EVT memvt, MachineMemOperand *mmo)
6749    : SDNode(Opc, Order, dl, VTs, Ops),
6750      MemoryVT(memvt), MMO(mmo) {
6751   SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
6752                                       MMO->isNonTemporal(), MMO->isInvariant());
6753   assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
6754   assert(memvt.getStoreSize() <= MMO->getSize() && "Size mismatch!");
6755 }
6756 
6757 /// Profile - Gather unique data for the node.
6758 ///
Profile(FoldingSetNodeID & ID) const6759 void SDNode::Profile(FoldingSetNodeID &ID) const {
6760   AddNodeIDNode(ID, this);
6761 }
6762 
6763 namespace {
6764   struct EVTArray {
6765     std::vector<EVT> VTs;
6766 
EVTArray__anon54267b090711::EVTArray6767     EVTArray() {
6768       VTs.reserve(MVT::LAST_VALUETYPE);
6769       for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i)
6770         VTs.push_back(MVT((MVT::SimpleValueType)i));
6771     }
6772   };
6773 }
6774 
6775 static ManagedStatic<std::set<EVT, EVT::compareRawBits> > EVTs;
6776 static ManagedStatic<EVTArray> SimpleVTArray;
6777 static ManagedStatic<sys::SmartMutex<true> > VTMutex;
6778 
6779 /// getValueTypeList - Return a pointer to the specified value type.
6780 ///
getValueTypeList(EVT VT)6781 const EVT *SDNode::getValueTypeList(EVT VT) {
6782   if (VT.isExtended()) {
6783     sys::SmartScopedLock<true> Lock(*VTMutex);
6784     return &(*EVTs->insert(VT).first);
6785   } else {
6786     assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
6787            "Value type out of range!");
6788     return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
6789   }
6790 }
6791 
6792 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
6793 /// indicated value.  This method ignores uses of other values defined by this
6794 /// operation.
hasNUsesOfValue(unsigned NUses,unsigned Value) const6795 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
6796   assert(Value < getNumValues() && "Bad value!");
6797 
6798   // TODO: Only iterate over uses of a given value of the node
6799   for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
6800     if (UI.getUse().getResNo() == Value) {
6801       if (NUses == 0)
6802         return false;
6803       --NUses;
6804     }
6805   }
6806 
6807   // Found exactly the right number of uses?
6808   return NUses == 0;
6809 }
6810 
6811 
6812 /// hasAnyUseOfValue - Return true if there are any use of the indicated
6813 /// value. This method ignores uses of other values defined by this operation.
hasAnyUseOfValue(unsigned Value) const6814 bool SDNode::hasAnyUseOfValue(unsigned Value) const {
6815   assert(Value < getNumValues() && "Bad value!");
6816 
6817   for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI)
6818     if (UI.getUse().getResNo() == Value)
6819       return true;
6820 
6821   return false;
6822 }
6823 
6824 
6825 /// isOnlyUserOf - Return true if this node is the only use of N.
6826 ///
isOnlyUserOf(const SDNode * N) const6827 bool SDNode::isOnlyUserOf(const SDNode *N) const {
6828   bool Seen = false;
6829   for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
6830     SDNode *User = *I;
6831     if (User == this)
6832       Seen = true;
6833     else
6834       return false;
6835   }
6836 
6837   return Seen;
6838 }
6839 
6840 /// isOperand - Return true if this node is an operand of N.
6841 ///
isOperandOf(const SDNode * N) const6842 bool SDValue::isOperandOf(const SDNode *N) const {
6843   for (const SDValue &Op : N->op_values())
6844     if (*this == Op)
6845       return true;
6846   return false;
6847 }
6848 
isOperandOf(const SDNode * N) const6849 bool SDNode::isOperandOf(const SDNode *N) const {
6850   for (const SDValue &Op : N->op_values())
6851     if (this == Op.getNode())
6852       return true;
6853   return false;
6854 }
6855 
6856 /// reachesChainWithoutSideEffects - Return true if this operand (which must
6857 /// be a chain) reaches the specified operand without crossing any
6858 /// side-effecting instructions on any chain path.  In practice, this looks
6859 /// through token factors and non-volatile loads.  In order to remain efficient,
6860 /// this only looks a couple of nodes in, it does not do an exhaustive search.
reachesChainWithoutSideEffects(SDValue Dest,unsigned Depth) const6861 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
6862                                                unsigned Depth) const {
6863   if (*this == Dest) return true;
6864 
6865   // Don't search too deeply, we just want to be able to see through
6866   // TokenFactor's etc.
6867   if (Depth == 0) return false;
6868 
6869   // If this is a token factor, all inputs to the TF happen in parallel.  If any
6870   // of the operands of the TF does not reach dest, then we cannot do the xform.
6871   if (getOpcode() == ISD::TokenFactor) {
6872     for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
6873       if (!getOperand(i).reachesChainWithoutSideEffects(Dest, Depth-1))
6874         return false;
6875     return true;
6876   }
6877 
6878   // Loads don't have side effects, look through them.
6879   if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
6880     if (!Ld->isVolatile())
6881       return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
6882   }
6883   return false;
6884 }
6885 
6886 /// hasPredecessor - Return true if N is a predecessor of this node.
6887 /// N is either an operand of this node, or can be reached by recursively
6888 /// traversing up the operands.
6889 /// NOTE: This is an expensive method. Use it carefully.
hasPredecessor(const SDNode * N) const6890 bool SDNode::hasPredecessor(const SDNode *N) const {
6891   SmallPtrSet<const SDNode *, 32> Visited;
6892   SmallVector<const SDNode *, 16> Worklist;
6893   return hasPredecessorHelper(N, Visited, Worklist);
6894 }
6895 
6896 bool
hasPredecessorHelper(const SDNode * N,SmallPtrSetImpl<const SDNode * > & Visited,SmallVectorImpl<const SDNode * > & Worklist) const6897 SDNode::hasPredecessorHelper(const SDNode *N,
6898                              SmallPtrSetImpl<const SDNode *> &Visited,
6899                              SmallVectorImpl<const SDNode *> &Worklist) const {
6900   if (Visited.empty()) {
6901     Worklist.push_back(this);
6902   } else {
6903     // Take a look in the visited set. If we've already encountered this node
6904     // we needn't search further.
6905     if (Visited.count(N))
6906       return true;
6907   }
6908 
6909   // Haven't visited N yet. Continue the search.
6910   while (!Worklist.empty()) {
6911     const SDNode *M = Worklist.pop_back_val();
6912     for (const SDValue &OpV : M->op_values()) {
6913       SDNode *Op = OpV.getNode();
6914       if (Visited.insert(Op).second)
6915         Worklist.push_back(Op);
6916       if (Op == N)
6917         return true;
6918     }
6919   }
6920 
6921   return false;
6922 }
6923 
getConstantOperandVal(unsigned Num) const6924 uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
6925   assert(Num < NumOperands && "Invalid child # of SDNode!");
6926   return cast<ConstantSDNode>(OperandList[Num])->getZExtValue();
6927 }
6928 
getFlags() const6929 const SDNodeFlags *SDNode::getFlags() const {
6930   if (auto *FlagsNode = dyn_cast<BinaryWithFlagsSDNode>(this))
6931     return &FlagsNode->Flags;
6932   return nullptr;
6933 }
6934 
UnrollVectorOp(SDNode * N,unsigned ResNE)6935 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
6936   assert(N->getNumValues() == 1 &&
6937          "Can't unroll a vector with multiple results!");
6938 
6939   EVT VT = N->getValueType(0);
6940   unsigned NE = VT.getVectorNumElements();
6941   EVT EltVT = VT.getVectorElementType();
6942   SDLoc dl(N);
6943 
6944   SmallVector<SDValue, 8> Scalars;
6945   SmallVector<SDValue, 4> Operands(N->getNumOperands());
6946 
6947   // If ResNE is 0, fully unroll the vector op.
6948   if (ResNE == 0)
6949     ResNE = NE;
6950   else if (NE > ResNE)
6951     NE = ResNE;
6952 
6953   unsigned i;
6954   for (i= 0; i != NE; ++i) {
6955     for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
6956       SDValue Operand = N->getOperand(j);
6957       EVT OperandVT = Operand.getValueType();
6958       if (OperandVT.isVector()) {
6959         // A vector operand; extract a single element.
6960         EVT OperandEltVT = OperandVT.getVectorElementType();
6961         Operands[j] =
6962             getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, Operand,
6963                     getConstant(i, dl, TLI->getVectorIdxTy(getDataLayout())));
6964       } else {
6965         // A scalar operand; just use it as is.
6966         Operands[j] = Operand;
6967       }
6968     }
6969 
6970     switch (N->getOpcode()) {
6971     default: {
6972       Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands,
6973                                 N->getFlags()));
6974       break;
6975     }
6976     case ISD::VSELECT:
6977       Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands));
6978       break;
6979     case ISD::SHL:
6980     case ISD::SRA:
6981     case ISD::SRL:
6982     case ISD::ROTL:
6983     case ISD::ROTR:
6984       Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
6985                                getShiftAmountOperand(Operands[0].getValueType(),
6986                                                      Operands[1])));
6987       break;
6988     case ISD::SIGN_EXTEND_INREG:
6989     case ISD::FP_ROUND_INREG: {
6990       EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType();
6991       Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
6992                                 Operands[0],
6993                                 getValueType(ExtVT)));
6994     }
6995     }
6996   }
6997 
6998   for (; i < ResNE; ++i)
6999     Scalars.push_back(getUNDEF(EltVT));
7000 
7001   return getNode(ISD::BUILD_VECTOR, dl,
7002                  EVT::getVectorVT(*getContext(), EltVT, ResNE), Scalars);
7003 }
7004 
7005 
7006 /// isConsecutiveLoad - Return true if LD is loading 'Bytes' bytes from a
7007 /// location that is 'Dist' units away from the location that the 'Base' load
7008 /// is loading from.
isConsecutiveLoad(LoadSDNode * LD,LoadSDNode * Base,unsigned Bytes,int Dist) const7009 bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base,
7010                                      unsigned Bytes, int Dist) const {
7011   if (LD->getChain() != Base->getChain())
7012     return false;
7013   EVT VT = LD->getValueType(0);
7014   if (VT.getSizeInBits() / 8 != Bytes)
7015     return false;
7016 
7017   SDValue Loc = LD->getOperand(1);
7018   SDValue BaseLoc = Base->getOperand(1);
7019   if (Loc.getOpcode() == ISD::FrameIndex) {
7020     if (BaseLoc.getOpcode() != ISD::FrameIndex)
7021       return false;
7022     const MachineFrameInfo *MFI = getMachineFunction().getFrameInfo();
7023     int FI  = cast<FrameIndexSDNode>(Loc)->getIndex();
7024     int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
7025     int FS  = MFI->getObjectSize(FI);
7026     int BFS = MFI->getObjectSize(BFI);
7027     if (FS != BFS || FS != (int)Bytes) return false;
7028     return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes);
7029   }
7030 
7031   // Handle X + C.
7032   if (isBaseWithConstantOffset(Loc)) {
7033     int64_t LocOffset = cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
7034     if (Loc.getOperand(0) == BaseLoc) {
7035       // If the base location is a simple address with no offset itself, then
7036       // the second load's first add operand should be the base address.
7037       if (LocOffset == Dist * (int)Bytes)
7038         return true;
7039     } else if (isBaseWithConstantOffset(BaseLoc)) {
7040       // The base location itself has an offset, so subtract that value from the
7041       // second load's offset before comparing to distance * size.
7042       int64_t BOffset =
7043         cast<ConstantSDNode>(BaseLoc.getOperand(1))->getSExtValue();
7044       if (Loc.getOperand(0) == BaseLoc.getOperand(0)) {
7045         if ((LocOffset - BOffset) == Dist * (int)Bytes)
7046           return true;
7047       }
7048     }
7049   }
7050   const GlobalValue *GV1 = nullptr;
7051   const GlobalValue *GV2 = nullptr;
7052   int64_t Offset1 = 0;
7053   int64_t Offset2 = 0;
7054   bool isGA1 = TLI->isGAPlusOffset(Loc.getNode(), GV1, Offset1);
7055   bool isGA2 = TLI->isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
7056   if (isGA1 && isGA2 && GV1 == GV2)
7057     return Offset1 == (Offset2 + Dist*Bytes);
7058   return false;
7059 }
7060 
7061 
7062 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if
7063 /// it cannot be inferred.
InferPtrAlignment(SDValue Ptr) const7064 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
7065   // If this is a GlobalAddress + cst, return the alignment.
7066   const GlobalValue *GV;
7067   int64_t GVOffset = 0;
7068   if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
7069     unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
7070     APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0);
7071     llvm::computeKnownBits(const_cast<GlobalValue *>(GV), KnownZero, KnownOne,
7072                            getDataLayout());
7073     unsigned AlignBits = KnownZero.countTrailingOnes();
7074     unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
7075     if (Align)
7076       return MinAlign(Align, GVOffset);
7077   }
7078 
7079   // If this is a direct reference to a stack slot, use information about the
7080   // stack slot's alignment.
7081   int FrameIdx = 1 << 31;
7082   int64_t FrameOffset = 0;
7083   if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
7084     FrameIdx = FI->getIndex();
7085   } else if (isBaseWithConstantOffset(Ptr) &&
7086              isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
7087     // Handle FI+Cst
7088     FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
7089     FrameOffset = Ptr.getConstantOperandVal(1);
7090   }
7091 
7092   if (FrameIdx != (1 << 31)) {
7093     const MachineFrameInfo &MFI = *getMachineFunction().getFrameInfo();
7094     unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx),
7095                                     FrameOffset);
7096     return FIInfoAlign;
7097   }
7098 
7099   return 0;
7100 }
7101 
7102 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
7103 /// which is split (or expanded) into two not necessarily identical pieces.
GetSplitDestVTs(const EVT & VT) const7104 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const {
7105   // Currently all types are split in half.
7106   EVT LoVT, HiVT;
7107   if (!VT.isVector()) {
7108     LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT);
7109   } else {
7110     unsigned NumElements = VT.getVectorNumElements();
7111     assert(!(NumElements & 1) && "Splitting vector, but not in half!");
7112     LoVT = HiVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(),
7113                                    NumElements/2);
7114   }
7115   return std::make_pair(LoVT, HiVT);
7116 }
7117 
7118 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the
7119 /// low/high part.
7120 std::pair<SDValue, SDValue>
SplitVector(const SDValue & N,const SDLoc & DL,const EVT & LoVT,const EVT & HiVT)7121 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT,
7122                           const EVT &HiVT) {
7123   assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <=
7124          N.getValueType().getVectorNumElements() &&
7125          "More vector elements requested than available!");
7126   SDValue Lo, Hi;
7127   Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N,
7128                getConstant(0, DL, TLI->getVectorIdxTy(getDataLayout())));
7129   Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N,
7130                getConstant(LoVT.getVectorNumElements(), DL,
7131                            TLI->getVectorIdxTy(getDataLayout())));
7132   return std::make_pair(Lo, Hi);
7133 }
7134 
ExtractVectorElements(SDValue Op,SmallVectorImpl<SDValue> & Args,unsigned Start,unsigned Count)7135 void SelectionDAG::ExtractVectorElements(SDValue Op,
7136                                          SmallVectorImpl<SDValue> &Args,
7137                                          unsigned Start, unsigned Count) {
7138   EVT VT = Op.getValueType();
7139   if (Count == 0)
7140     Count = VT.getVectorNumElements();
7141 
7142   EVT EltVT = VT.getVectorElementType();
7143   EVT IdxTy = TLI->getVectorIdxTy(getDataLayout());
7144   SDLoc SL(Op);
7145   for (unsigned i = Start, e = Start + Count; i != e; ++i) {
7146     Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
7147                            Op, getConstant(i, SL, IdxTy)));
7148   }
7149 }
7150 
7151 // getAddressSpace - Return the address space this GlobalAddress belongs to.
getAddressSpace() const7152 unsigned GlobalAddressSDNode::getAddressSpace() const {
7153   return getGlobal()->getType()->getAddressSpace();
7154 }
7155 
7156 
getType() const7157 Type *ConstantPoolSDNode::getType() const {
7158   if (isMachineConstantPoolEntry())
7159     return Val.MachineCPVal->getType();
7160   return Val.ConstVal->getType();
7161 }
7162 
isConstantSplat(APInt & SplatValue,APInt & SplatUndef,unsigned & SplatBitSize,bool & HasAnyUndefs,unsigned MinSplatBits,bool isBigEndian) const7163 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue,
7164                                         APInt &SplatUndef,
7165                                         unsigned &SplatBitSize,
7166                                         bool &HasAnyUndefs,
7167                                         unsigned MinSplatBits,
7168                                         bool isBigEndian) const {
7169   EVT VT = getValueType(0);
7170   assert(VT.isVector() && "Expected a vector type");
7171   unsigned sz = VT.getSizeInBits();
7172   if (MinSplatBits > sz)
7173     return false;
7174 
7175   SplatValue = APInt(sz, 0);
7176   SplatUndef = APInt(sz, 0);
7177 
7178   // Get the bits.  Bits with undefined values (when the corresponding element
7179   // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
7180   // in SplatValue.  If any of the values are not constant, give up and return
7181   // false.
7182   unsigned int nOps = getNumOperands();
7183   assert(nOps > 0 && "isConstantSplat has 0-size build vector");
7184   unsigned EltBitSize = VT.getVectorElementType().getSizeInBits();
7185 
7186   for (unsigned j = 0; j < nOps; ++j) {
7187     unsigned i = isBigEndian ? nOps-1-j : j;
7188     SDValue OpVal = getOperand(i);
7189     unsigned BitPos = j * EltBitSize;
7190 
7191     if (OpVal.getOpcode() == ISD::UNDEF)
7192       SplatUndef |= APInt::getBitsSet(sz, BitPos, BitPos + EltBitSize);
7193     else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal))
7194       SplatValue |= CN->getAPIntValue().zextOrTrunc(EltBitSize).
7195                     zextOrTrunc(sz) << BitPos;
7196     else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal))
7197       SplatValue |= CN->getValueAPF().bitcastToAPInt().zextOrTrunc(sz) <<BitPos;
7198      else
7199       return false;
7200   }
7201 
7202   // The build_vector is all constants or undefs.  Find the smallest element
7203   // size that splats the vector.
7204 
7205   HasAnyUndefs = (SplatUndef != 0);
7206   while (sz > 8) {
7207 
7208     unsigned HalfSize = sz / 2;
7209     APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize);
7210     APInt LowValue = SplatValue.trunc(HalfSize);
7211     APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize);
7212     APInt LowUndef = SplatUndef.trunc(HalfSize);
7213 
7214     // If the two halves do not match (ignoring undef bits), stop here.
7215     if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
7216         MinSplatBits > HalfSize)
7217       break;
7218 
7219     SplatValue = HighValue | LowValue;
7220     SplatUndef = HighUndef & LowUndef;
7221 
7222     sz = HalfSize;
7223   }
7224 
7225   SplatBitSize = sz;
7226   return true;
7227 }
7228 
getSplatValue(BitVector * UndefElements) const7229 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const {
7230   if (UndefElements) {
7231     UndefElements->clear();
7232     UndefElements->resize(getNumOperands());
7233   }
7234   SDValue Splatted;
7235   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
7236     SDValue Op = getOperand(i);
7237     if (Op.getOpcode() == ISD::UNDEF) {
7238       if (UndefElements)
7239         (*UndefElements)[i] = true;
7240     } else if (!Splatted) {
7241       Splatted = Op;
7242     } else if (Splatted != Op) {
7243       return SDValue();
7244     }
7245   }
7246 
7247   if (!Splatted) {
7248     assert(getOperand(0).getOpcode() == ISD::UNDEF &&
7249            "Can only have a splat without a constant for all undefs.");
7250     return getOperand(0);
7251   }
7252 
7253   return Splatted;
7254 }
7255 
7256 ConstantSDNode *
getConstantSplatNode(BitVector * UndefElements) const7257 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const {
7258   return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements));
7259 }
7260 
7261 ConstantFPSDNode *
getConstantFPSplatNode(BitVector * UndefElements) const7262 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const {
7263   return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements));
7264 }
7265 
7266 int32_t
getConstantFPSplatPow2ToLog2Int(BitVector * UndefElements,uint32_t BitWidth) const7267 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements,
7268                                                    uint32_t BitWidth) const {
7269   if (ConstantFPSDNode *CN =
7270           dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) {
7271     bool IsExact;
7272     APSInt IntVal(BitWidth);
7273     APFloat APF = CN->getValueAPF();
7274     if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) !=
7275             APFloat::opOK ||
7276         !IsExact)
7277       return -1;
7278 
7279     return IntVal.exactLogBase2();
7280   }
7281   return -1;
7282 }
7283 
isConstant() const7284 bool BuildVectorSDNode::isConstant() const {
7285   for (const SDValue &Op : op_values()) {
7286     unsigned Opc = Op.getOpcode();
7287     if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP)
7288       return false;
7289   }
7290   return true;
7291 }
7292 
isSplatMask(const int * Mask,EVT VT)7293 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
7294   // Find the first non-undef value in the shuffle mask.
7295   unsigned i, e;
7296   for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i)
7297     /* search */;
7298 
7299   assert(i != e && "VECTOR_SHUFFLE node with all undef indices!");
7300 
7301   // Make sure all remaining elements are either undef or the same as the first
7302   // non-undef value.
7303   for (int Idx = Mask[i]; i != e; ++i)
7304     if (Mask[i] >= 0 && Mask[i] != Idx)
7305       return false;
7306   return true;
7307 }
7308 
7309 #ifndef NDEBUG
checkForCyclesHelper(const SDNode * N,SmallPtrSetImpl<const SDNode * > & Visited,SmallPtrSetImpl<const SDNode * > & Checked,const llvm::SelectionDAG * DAG)7310 static void checkForCyclesHelper(const SDNode *N,
7311                                  SmallPtrSetImpl<const SDNode*> &Visited,
7312                                  SmallPtrSetImpl<const SDNode*> &Checked,
7313                                  const llvm::SelectionDAG *DAG) {
7314   // If this node has already been checked, don't check it again.
7315   if (Checked.count(N))
7316     return;
7317 
7318   // If a node has already been visited on this depth-first walk, reject it as
7319   // a cycle.
7320   if (!Visited.insert(N).second) {
7321     errs() << "Detected cycle in SelectionDAG\n";
7322     dbgs() << "Offending node:\n";
7323     N->dumprFull(DAG); dbgs() << "\n";
7324     abort();
7325   }
7326 
7327   for (const SDValue &Op : N->op_values())
7328     checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG);
7329 
7330   Checked.insert(N);
7331   Visited.erase(N);
7332 }
7333 #endif
7334 
checkForCycles(const llvm::SDNode * N,const llvm::SelectionDAG * DAG,bool force)7335 void llvm::checkForCycles(const llvm::SDNode *N,
7336                           const llvm::SelectionDAG *DAG,
7337                           bool force) {
7338 #ifndef NDEBUG
7339   bool check = force;
7340 #ifdef XDEBUG
7341   check = true;
7342 #endif  // XDEBUG
7343   if (check) {
7344     assert(N && "Checking nonexistent SDNode");
7345     SmallPtrSet<const SDNode*, 32> visited;
7346     SmallPtrSet<const SDNode*, 32> checked;
7347     checkForCyclesHelper(N, visited, checked, DAG);
7348   }
7349 #endif  // !NDEBUG
7350 }
7351 
checkForCycles(const llvm::SelectionDAG * DAG,bool force)7352 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) {
7353   checkForCycles(DAG->getRoot().getNode(), DAG, force);
7354 }
7355