1 //===-- LegalizeDAG.cpp - Implement SelectionDAG::Legalize ----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the SelectionDAG::Legalize method.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/CodeGen/SelectionDAG.h"
15 #include "llvm/ADT/SetVector.h"
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/Triple.h"
20 #include "llvm/CodeGen/Analysis.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineJumpTableInfo.h"
23 #include "llvm/IR/CallingConv.h"
24 #include "llvm/IR/Constants.h"
25 #include "llvm/IR/DataLayout.h"
26 #include "llvm/IR/DebugInfo.h"
27 #include "llvm/IR/DerivedTypes.h"
28 #include "llvm/IR/Function.h"
29 #include "llvm/IR/LLVMContext.h"
30 #include "llvm/Support/Debug.h"
31 #include "llvm/Support/ErrorHandling.h"
32 #include "llvm/Support/MathExtras.h"
33 #include "llvm/Support/raw_ostream.h"
34 #include "llvm/Target/TargetFrameLowering.h"
35 #include "llvm/Target/TargetLowering.h"
36 #include "llvm/Target/TargetMachine.h"
37 #include "llvm/Target/TargetSubtargetInfo.h"
38 using namespace llvm;
39
40 #define DEBUG_TYPE "legalizedag"
41
42 //===----------------------------------------------------------------------===//
43 /// This takes an arbitrary SelectionDAG as input and
44 /// hacks on it until the target machine can handle it. This involves
45 /// eliminating value sizes the machine cannot handle (promoting small sizes to
46 /// large sizes or splitting up large values into small values) as well as
47 /// eliminating operations the machine cannot handle.
48 ///
49 /// This code also does a small amount of optimization and recognition of idioms
50 /// as part of its processing. For example, if a target does not support a
51 /// 'setcc' instruction efficiently, but does support 'brcc' instruction, this
52 /// will attempt merge setcc and brc instructions into brcc's.
53 ///
54 namespace {
55 class SelectionDAGLegalize {
56 const TargetMachine &TM;
57 const TargetLowering &TLI;
58 SelectionDAG &DAG;
59
60 /// \brief The set of nodes which have already been legalized. We hold a
61 /// reference to it in order to update as necessary on node deletion.
62 SmallPtrSetImpl<SDNode *> &LegalizedNodes;
63
64 /// \brief A set of all the nodes updated during legalization.
65 SmallSetVector<SDNode *, 16> *UpdatedNodes;
66
getSetCCResultType(EVT VT) const67 EVT getSetCCResultType(EVT VT) const {
68 return TLI.getSetCCResultType(*DAG.getContext(), VT);
69 }
70
71 // Libcall insertion helpers.
72
73 public:
SelectionDAGLegalize(SelectionDAG & DAG,SmallPtrSetImpl<SDNode * > & LegalizedNodes,SmallSetVector<SDNode *,16> * UpdatedNodes=nullptr)74 SelectionDAGLegalize(SelectionDAG &DAG,
75 SmallPtrSetImpl<SDNode *> &LegalizedNodes,
76 SmallSetVector<SDNode *, 16> *UpdatedNodes = nullptr)
77 : TM(DAG.getTarget()), TLI(DAG.getTargetLoweringInfo()), DAG(DAG),
78 LegalizedNodes(LegalizedNodes), UpdatedNodes(UpdatedNodes) {}
79
80 /// \brief Legalizes the given operation.
81 void LegalizeOp(SDNode *Node);
82
83 private:
84 SDValue OptimizeFloatStore(StoreSDNode *ST);
85
86 void LegalizeLoadOps(SDNode *Node);
87 void LegalizeStoreOps(SDNode *Node);
88
89 /// Some targets cannot handle a variable
90 /// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it
91 /// is necessary to spill the vector being inserted into to memory, perform
92 /// the insert there, and then read the result back.
93 SDValue PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val,
94 SDValue Idx, SDLoc dl);
95 SDValue ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val,
96 SDValue Idx, SDLoc dl);
97
98 /// Return a vector shuffle operation which
99 /// performs the same shuffe in terms of order or result bytes, but on a type
100 /// whose vector element type is narrower than the original shuffle type.
101 /// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3>
102 SDValue ShuffleWithNarrowerEltType(EVT NVT, EVT VT, SDLoc dl,
103 SDValue N1, SDValue N2,
104 ArrayRef<int> Mask) const;
105
106 bool LegalizeSetCCCondCode(EVT VT, SDValue &LHS, SDValue &RHS, SDValue &CC,
107 bool &NeedInvert, SDLoc dl);
108
109 SDValue ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, bool isSigned);
110 SDValue ExpandLibCall(RTLIB::Libcall LC, EVT RetVT, const SDValue *Ops,
111 unsigned NumOps, bool isSigned, SDLoc dl);
112
113 std::pair<SDValue, SDValue> ExpandChainLibCall(RTLIB::Libcall LC,
114 SDNode *Node, bool isSigned);
115 SDValue ExpandFPLibCall(SDNode *Node, RTLIB::Libcall Call_F32,
116 RTLIB::Libcall Call_F64, RTLIB::Libcall Call_F80,
117 RTLIB::Libcall Call_F128,
118 RTLIB::Libcall Call_PPCF128);
119 SDValue ExpandIntLibCall(SDNode *Node, bool isSigned,
120 RTLIB::Libcall Call_I8,
121 RTLIB::Libcall Call_I16,
122 RTLIB::Libcall Call_I32,
123 RTLIB::Libcall Call_I64,
124 RTLIB::Libcall Call_I128);
125 void ExpandDivRemLibCall(SDNode *Node, SmallVectorImpl<SDValue> &Results);
126 void ExpandSinCosLibCall(SDNode *Node, SmallVectorImpl<SDValue> &Results);
127
128 SDValue EmitStackConvert(SDValue SrcOp, EVT SlotVT, EVT DestVT, SDLoc dl);
129 SDValue ExpandBUILD_VECTOR(SDNode *Node);
130 SDValue ExpandSCALAR_TO_VECTOR(SDNode *Node);
131 void ExpandDYNAMIC_STACKALLOC(SDNode *Node,
132 SmallVectorImpl<SDValue> &Results);
133 SDValue ExpandFCOPYSIGN(SDNode *Node);
134 SDValue ExpandLegalINT_TO_FP(bool isSigned, SDValue LegalOp, EVT DestVT,
135 SDLoc dl);
136 SDValue PromoteLegalINT_TO_FP(SDValue LegalOp, EVT DestVT, bool isSigned,
137 SDLoc dl);
138 SDValue PromoteLegalFP_TO_INT(SDValue LegalOp, EVT DestVT, bool isSigned,
139 SDLoc dl);
140
141 SDValue ExpandBSWAP(SDValue Op, SDLoc dl);
142 SDValue ExpandBitCount(unsigned Opc, SDValue Op, SDLoc dl);
143
144 SDValue ExpandExtractFromVectorThroughStack(SDValue Op);
145 SDValue ExpandInsertToVectorThroughStack(SDValue Op);
146 SDValue ExpandVectorBuildThroughStack(SDNode* Node);
147
148 SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP);
149
150 std::pair<SDValue, SDValue> ExpandAtomic(SDNode *Node);
151
152 void ExpandNode(SDNode *Node);
153 void PromoteNode(SDNode *Node);
154
155 public:
156 // Node replacement helpers
ReplacedNode(SDNode * N)157 void ReplacedNode(SDNode *N) {
158 LegalizedNodes.erase(N);
159 if (UpdatedNodes)
160 UpdatedNodes->insert(N);
161 }
ReplaceNode(SDNode * Old,SDNode * New)162 void ReplaceNode(SDNode *Old, SDNode *New) {
163 DEBUG(dbgs() << " ... replacing: "; Old->dump(&DAG);
164 dbgs() << " with: "; New->dump(&DAG));
165
166 assert(Old->getNumValues() == New->getNumValues() &&
167 "Replacing one node with another that produces a different number "
168 "of values!");
169 DAG.ReplaceAllUsesWith(Old, New);
170 for (unsigned i = 0, e = Old->getNumValues(); i != e; ++i)
171 DAG.TransferDbgValues(SDValue(Old, i), SDValue(New, i));
172 if (UpdatedNodes)
173 UpdatedNodes->insert(New);
174 ReplacedNode(Old);
175 }
ReplaceNode(SDValue Old,SDValue New)176 void ReplaceNode(SDValue Old, SDValue New) {
177 DEBUG(dbgs() << " ... replacing: "; Old->dump(&DAG);
178 dbgs() << " with: "; New->dump(&DAG));
179
180 DAG.ReplaceAllUsesWith(Old, New);
181 DAG.TransferDbgValues(Old, New);
182 if (UpdatedNodes)
183 UpdatedNodes->insert(New.getNode());
184 ReplacedNode(Old.getNode());
185 }
ReplaceNode(SDNode * Old,const SDValue * New)186 void ReplaceNode(SDNode *Old, const SDValue *New) {
187 DEBUG(dbgs() << " ... replacing: "; Old->dump(&DAG));
188
189 DAG.ReplaceAllUsesWith(Old, New);
190 for (unsigned i = 0, e = Old->getNumValues(); i != e; ++i) {
191 DEBUG(dbgs() << (i == 0 ? " with: "
192 : " and: ");
193 New[i]->dump(&DAG));
194 DAG.TransferDbgValues(SDValue(Old, i), New[i]);
195 if (UpdatedNodes)
196 UpdatedNodes->insert(New[i].getNode());
197 }
198 ReplacedNode(Old);
199 }
200 };
201 }
202
203 /// Return a vector shuffle operation which
204 /// performs the same shuffe in terms of order or result bytes, but on a type
205 /// whose vector element type is narrower than the original shuffle type.
206 /// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3>
207 SDValue
ShuffleWithNarrowerEltType(EVT NVT,EVT VT,SDLoc dl,SDValue N1,SDValue N2,ArrayRef<int> Mask) const208 SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT, EVT VT, SDLoc dl,
209 SDValue N1, SDValue N2,
210 ArrayRef<int> Mask) const {
211 unsigned NumMaskElts = VT.getVectorNumElements();
212 unsigned NumDestElts = NVT.getVectorNumElements();
213 unsigned NumEltsGrowth = NumDestElts / NumMaskElts;
214
215 assert(NumEltsGrowth && "Cannot promote to vector type with fewer elts!");
216
217 if (NumEltsGrowth == 1)
218 return DAG.getVectorShuffle(NVT, dl, N1, N2, &Mask[0]);
219
220 SmallVector<int, 8> NewMask;
221 for (unsigned i = 0; i != NumMaskElts; ++i) {
222 int Idx = Mask[i];
223 for (unsigned j = 0; j != NumEltsGrowth; ++j) {
224 if (Idx < 0)
225 NewMask.push_back(-1);
226 else
227 NewMask.push_back(Idx * NumEltsGrowth + j);
228 }
229 }
230 assert(NewMask.size() == NumDestElts && "Non-integer NumEltsGrowth?");
231 assert(TLI.isShuffleMaskLegal(NewMask, NVT) && "Shuffle not legal?");
232 return DAG.getVectorShuffle(NVT, dl, N1, N2, &NewMask[0]);
233 }
234
235 /// Expands the ConstantFP node to an integer constant or
236 /// a load from the constant pool.
237 SDValue
ExpandConstantFP(ConstantFPSDNode * CFP,bool UseCP)238 SelectionDAGLegalize::ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP) {
239 bool Extend = false;
240 SDLoc dl(CFP);
241
242 // If a FP immediate is precise when represented as a float and if the
243 // target can do an extending load from float to double, we put it into
244 // the constant pool as a float, even if it's is statically typed as a
245 // double. This shrinks FP constants and canonicalizes them for targets where
246 // an FP extending load is the same cost as a normal load (such as on the x87
247 // fp stack or PPC FP unit).
248 EVT VT = CFP->getValueType(0);
249 ConstantFP *LLVMC = const_cast<ConstantFP*>(CFP->getConstantFPValue());
250 if (!UseCP) {
251 assert((VT == MVT::f64 || VT == MVT::f32) && "Invalid type expansion");
252 return DAG.getConstant(LLVMC->getValueAPF().bitcastToAPInt(),
253 (VT == MVT::f64) ? MVT::i64 : MVT::i32);
254 }
255
256 EVT OrigVT = VT;
257 EVT SVT = VT;
258 while (SVT != MVT::f32 && SVT != MVT::f16) {
259 SVT = (MVT::SimpleValueType)(SVT.getSimpleVT().SimpleTy - 1);
260 if (ConstantFPSDNode::isValueValidForType(SVT, CFP->getValueAPF()) &&
261 // Only do this if the target has a native EXTLOAD instruction from
262 // smaller type.
263 TLI.isLoadExtLegal(ISD::EXTLOAD, OrigVT, SVT) &&
264 TLI.ShouldShrinkFPConstant(OrigVT)) {
265 Type *SType = SVT.getTypeForEVT(*DAG.getContext());
266 LLVMC = cast<ConstantFP>(ConstantExpr::getFPTrunc(LLVMC, SType));
267 VT = SVT;
268 Extend = true;
269 }
270 }
271
272 SDValue CPIdx = DAG.getConstantPool(LLVMC, TLI.getPointerTy());
273 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
274 if (Extend) {
275 SDValue Result =
276 DAG.getExtLoad(ISD::EXTLOAD, dl, OrigVT,
277 DAG.getEntryNode(),
278 CPIdx, MachinePointerInfo::getConstantPool(),
279 VT, false, false, false, Alignment);
280 return Result;
281 }
282 SDValue Result =
283 DAG.getLoad(OrigVT, dl, DAG.getEntryNode(), CPIdx,
284 MachinePointerInfo::getConstantPool(), false, false, false,
285 Alignment);
286 return Result;
287 }
288
289 /// Expands an unaligned store to 2 half-size stores.
ExpandUnalignedStore(StoreSDNode * ST,SelectionDAG & DAG,const TargetLowering & TLI,SelectionDAGLegalize * DAGLegalize)290 static void ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
291 const TargetLowering &TLI,
292 SelectionDAGLegalize *DAGLegalize) {
293 assert(ST->getAddressingMode() == ISD::UNINDEXED &&
294 "unaligned indexed stores not implemented!");
295 SDValue Chain = ST->getChain();
296 SDValue Ptr = ST->getBasePtr();
297 SDValue Val = ST->getValue();
298 EVT VT = Val.getValueType();
299 int Alignment = ST->getAlignment();
300 unsigned AS = ST->getAddressSpace();
301
302 SDLoc dl(ST);
303 if (ST->getMemoryVT().isFloatingPoint() ||
304 ST->getMemoryVT().isVector()) {
305 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
306 if (TLI.isTypeLegal(intVT)) {
307 // Expand to a bitconvert of the value to the integer type of the
308 // same size, then a (misaligned) int store.
309 // FIXME: Does not handle truncating floating point stores!
310 SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val);
311 Result = DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(),
312 ST->isVolatile(), ST->isNonTemporal(), Alignment);
313 DAGLegalize->ReplaceNode(SDValue(ST, 0), Result);
314 return;
315 }
316 // Do a (aligned) store to a stack slot, then copy from the stack slot
317 // to the final destination using (unaligned) integer loads and stores.
318 EVT StoredVT = ST->getMemoryVT();
319 MVT RegVT =
320 TLI.getRegisterType(*DAG.getContext(),
321 EVT::getIntegerVT(*DAG.getContext(),
322 StoredVT.getSizeInBits()));
323 unsigned StoredBytes = StoredVT.getSizeInBits() / 8;
324 unsigned RegBytes = RegVT.getSizeInBits() / 8;
325 unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes;
326
327 // Make sure the stack slot is also aligned for the register type.
328 SDValue StackPtr = DAG.CreateStackTemporary(StoredVT, RegVT);
329
330 // Perform the original store, only redirected to the stack slot.
331 SDValue Store = DAG.getTruncStore(Chain, dl,
332 Val, StackPtr, MachinePointerInfo(),
333 StoredVT, false, false, 0);
334 SDValue Increment = DAG.getConstant(RegBytes, TLI.getPointerTy(AS));
335 SmallVector<SDValue, 8> Stores;
336 unsigned Offset = 0;
337
338 // Do all but one copies using the full register width.
339 for (unsigned i = 1; i < NumRegs; i++) {
340 // Load one integer register's worth from the stack slot.
341 SDValue Load = DAG.getLoad(RegVT, dl, Store, StackPtr,
342 MachinePointerInfo(),
343 false, false, false, 0);
344 // Store it to the final location. Remember the store.
345 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr,
346 ST->getPointerInfo().getWithOffset(Offset),
347 ST->isVolatile(), ST->isNonTemporal(),
348 MinAlign(ST->getAlignment(), Offset)));
349 // Increment the pointers.
350 Offset += RegBytes;
351 StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr,
352 Increment);
353 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
354 }
355
356 // The last store may be partial. Do a truncating store. On big-endian
357 // machines this requires an extending load from the stack slot to ensure
358 // that the bits are in the right place.
359 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(),
360 8 * (StoredBytes - Offset));
361
362 // Load from the stack slot.
363 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Store, StackPtr,
364 MachinePointerInfo(),
365 MemVT, false, false, false, 0);
366
367 Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr,
368 ST->getPointerInfo()
369 .getWithOffset(Offset),
370 MemVT, ST->isVolatile(),
371 ST->isNonTemporal(),
372 MinAlign(ST->getAlignment(), Offset),
373 ST->getAAInfo()));
374 // The order of the stores doesn't matter - say it with a TokenFactor.
375 SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
376 DAGLegalize->ReplaceNode(SDValue(ST, 0), Result);
377 return;
378 }
379 assert(ST->getMemoryVT().isInteger() &&
380 !ST->getMemoryVT().isVector() &&
381 "Unaligned store of unknown type.");
382 // Get the half-size VT
383 EVT NewStoredVT = ST->getMemoryVT().getHalfSizedIntegerVT(*DAG.getContext());
384 int NumBits = NewStoredVT.getSizeInBits();
385 int IncrementSize = NumBits / 8;
386
387 // Divide the stored value in two parts.
388 SDValue ShiftAmount = DAG.getConstant(NumBits,
389 TLI.getShiftAmountTy(Val.getValueType()));
390 SDValue Lo = Val;
391 SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount);
392
393 // Store the two parts
394 SDValue Store1, Store2;
395 Store1 = DAG.getTruncStore(Chain, dl, TLI.isLittleEndian()?Lo:Hi, Ptr,
396 ST->getPointerInfo(), NewStoredVT,
397 ST->isVolatile(), ST->isNonTemporal(), Alignment);
398
399 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
400 DAG.getConstant(IncrementSize, TLI.getPointerTy(AS)));
401 Alignment = MinAlign(Alignment, IncrementSize);
402 Store2 = DAG.getTruncStore(Chain, dl, TLI.isLittleEndian()?Hi:Lo, Ptr,
403 ST->getPointerInfo().getWithOffset(IncrementSize),
404 NewStoredVT, ST->isVolatile(), ST->isNonTemporal(),
405 Alignment, ST->getAAInfo());
406
407 SDValue Result =
408 DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2);
409 DAGLegalize->ReplaceNode(SDValue(ST, 0), Result);
410 }
411
412 /// Expands an unaligned load to 2 half-size loads.
413 static void
ExpandUnalignedLoad(LoadSDNode * LD,SelectionDAG & DAG,const TargetLowering & TLI,SDValue & ValResult,SDValue & ChainResult)414 ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
415 const TargetLowering &TLI,
416 SDValue &ValResult, SDValue &ChainResult) {
417 assert(LD->getAddressingMode() == ISD::UNINDEXED &&
418 "unaligned indexed loads not implemented!");
419 SDValue Chain = LD->getChain();
420 SDValue Ptr = LD->getBasePtr();
421 EVT VT = LD->getValueType(0);
422 EVT LoadedVT = LD->getMemoryVT();
423 SDLoc dl(LD);
424 if (VT.isFloatingPoint() || VT.isVector()) {
425 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits());
426 if (TLI.isTypeLegal(intVT) && TLI.isTypeLegal(LoadedVT)) {
427 // Expand to a (misaligned) integer load of the same size,
428 // then bitconvert to floating point or vector.
429 SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr,
430 LD->getMemOperand());
431 SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad);
432 if (LoadedVT != VT)
433 Result = DAG.getNode(VT.isFloatingPoint() ? ISD::FP_EXTEND :
434 ISD::ANY_EXTEND, dl, VT, Result);
435
436 ValResult = Result;
437 ChainResult = Chain;
438 return;
439 }
440
441 // Copy the value to a (aligned) stack slot using (unaligned) integer
442 // loads and stores, then do a (aligned) load from the stack slot.
443 MVT RegVT = TLI.getRegisterType(*DAG.getContext(), intVT);
444 unsigned LoadedBytes = LoadedVT.getSizeInBits() / 8;
445 unsigned RegBytes = RegVT.getSizeInBits() / 8;
446 unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes;
447
448 // Make sure the stack slot is also aligned for the register type.
449 SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT);
450
451 SDValue Increment = DAG.getConstant(RegBytes, TLI.getPointerTy());
452 SmallVector<SDValue, 8> Stores;
453 SDValue StackPtr = StackBase;
454 unsigned Offset = 0;
455
456 // Do all but one copies using the full register width.
457 for (unsigned i = 1; i < NumRegs; i++) {
458 // Load one integer register's worth from the original location.
459 SDValue Load = DAG.getLoad(RegVT, dl, Chain, Ptr,
460 LD->getPointerInfo().getWithOffset(Offset),
461 LD->isVolatile(), LD->isNonTemporal(),
462 LD->isInvariant(),
463 MinAlign(LD->getAlignment(), Offset),
464 LD->getAAInfo());
465 // Follow the load with a store to the stack slot. Remember the store.
466 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, StackPtr,
467 MachinePointerInfo(), false, false, 0));
468 // Increment the pointers.
469 Offset += RegBytes;
470 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
471 StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr,
472 Increment);
473 }
474
475 // The last copy may be partial. Do an extending load.
476 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(),
477 8 * (LoadedBytes - Offset));
478 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr,
479 LD->getPointerInfo().getWithOffset(Offset),
480 MemVT, LD->isVolatile(),
481 LD->isNonTemporal(),
482 LD->isInvariant(),
483 MinAlign(LD->getAlignment(), Offset),
484 LD->getAAInfo());
485 // Follow the load with a store to the stack slot. Remember the store.
486 // On big-endian machines this requires a truncating store to ensure
487 // that the bits end up in the right place.
488 Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, StackPtr,
489 MachinePointerInfo(), MemVT,
490 false, false, 0));
491
492 // The order of the stores doesn't matter - say it with a TokenFactor.
493 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
494
495 // Finally, perform the original load only redirected to the stack slot.
496 Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase,
497 MachinePointerInfo(), LoadedVT, false,false, false,
498 0);
499
500 // Callers expect a MERGE_VALUES node.
501 ValResult = Load;
502 ChainResult = TF;
503 return;
504 }
505 assert(LoadedVT.isInteger() && !LoadedVT.isVector() &&
506 "Unaligned load of unsupported type.");
507
508 // Compute the new VT that is half the size of the old one. This is an
509 // integer MVT.
510 unsigned NumBits = LoadedVT.getSizeInBits();
511 EVT NewLoadedVT;
512 NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2);
513 NumBits >>= 1;
514
515 unsigned Alignment = LD->getAlignment();
516 unsigned IncrementSize = NumBits / 8;
517 ISD::LoadExtType HiExtType = LD->getExtensionType();
518
519 // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD.
520 if (HiExtType == ISD::NON_EXTLOAD)
521 HiExtType = ISD::ZEXTLOAD;
522
523 // Load the value in two parts
524 SDValue Lo, Hi;
525 if (TLI.isLittleEndian()) {
526 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(),
527 NewLoadedVT, LD->isVolatile(),
528 LD->isNonTemporal(), LD->isInvariant(), Alignment,
529 LD->getAAInfo());
530 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
531 DAG.getConstant(IncrementSize, Ptr.getValueType()));
532 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr,
533 LD->getPointerInfo().getWithOffset(IncrementSize),
534 NewLoadedVT, LD->isVolatile(),
535 LD->isNonTemporal(),LD->isInvariant(),
536 MinAlign(Alignment, IncrementSize), LD->getAAInfo());
537 } else {
538 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(),
539 NewLoadedVT, LD->isVolatile(),
540 LD->isNonTemporal(), LD->isInvariant(), Alignment,
541 LD->getAAInfo());
542 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
543 DAG.getConstant(IncrementSize, Ptr.getValueType()));
544 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr,
545 LD->getPointerInfo().getWithOffset(IncrementSize),
546 NewLoadedVT, LD->isVolatile(),
547 LD->isNonTemporal(), LD->isInvariant(),
548 MinAlign(Alignment, IncrementSize), LD->getAAInfo());
549 }
550
551 // aggregate the two parts
552 SDValue ShiftAmount = DAG.getConstant(NumBits,
553 TLI.getShiftAmountTy(Hi.getValueType()));
554 SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount);
555 Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo);
556
557 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
558 Hi.getValue(1));
559
560 ValResult = Result;
561 ChainResult = TF;
562 }
563
564 /// Some target cannot handle a variable insertion index for the
565 /// INSERT_VECTOR_ELT instruction. In this case, it
566 /// is necessary to spill the vector being inserted into to memory, perform
567 /// the insert there, and then read the result back.
568 SDValue SelectionDAGLegalize::
PerformInsertVectorEltInMemory(SDValue Vec,SDValue Val,SDValue Idx,SDLoc dl)569 PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, SDValue Idx,
570 SDLoc dl) {
571 SDValue Tmp1 = Vec;
572 SDValue Tmp2 = Val;
573 SDValue Tmp3 = Idx;
574
575 // If the target doesn't support this, we have to spill the input vector
576 // to a temporary stack slot, update the element, then reload it. This is
577 // badness. We could also load the value into a vector register (either
578 // with a "move to register" or "extload into register" instruction, then
579 // permute it into place, if the idx is a constant and if the idx is
580 // supported by the target.
581 EVT VT = Tmp1.getValueType();
582 EVT EltVT = VT.getVectorElementType();
583 EVT IdxVT = Tmp3.getValueType();
584 EVT PtrVT = TLI.getPointerTy();
585 SDValue StackPtr = DAG.CreateStackTemporary(VT);
586
587 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
588
589 // Store the vector.
590 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Tmp1, StackPtr,
591 MachinePointerInfo::getFixedStack(SPFI),
592 false, false, 0);
593
594 // Truncate or zero extend offset to target pointer type.
595 unsigned CastOpc = IdxVT.bitsGT(PtrVT) ? ISD::TRUNCATE : ISD::ZERO_EXTEND;
596 Tmp3 = DAG.getNode(CastOpc, dl, PtrVT, Tmp3);
597 // Add the offset to the index.
598 unsigned EltSize = EltVT.getSizeInBits()/8;
599 Tmp3 = DAG.getNode(ISD::MUL, dl, IdxVT, Tmp3,DAG.getConstant(EltSize, IdxVT));
600 SDValue StackPtr2 = DAG.getNode(ISD::ADD, dl, IdxVT, Tmp3, StackPtr);
601 // Store the scalar value.
602 Ch = DAG.getTruncStore(Ch, dl, Tmp2, StackPtr2, MachinePointerInfo(), EltVT,
603 false, false, 0);
604 // Load the updated vector.
605 return DAG.getLoad(VT, dl, Ch, StackPtr,
606 MachinePointerInfo::getFixedStack(SPFI), false, false,
607 false, 0);
608 }
609
610
611 SDValue SelectionDAGLegalize::
ExpandINSERT_VECTOR_ELT(SDValue Vec,SDValue Val,SDValue Idx,SDLoc dl)612 ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val, SDValue Idx, SDLoc dl) {
613 if (ConstantSDNode *InsertPos = dyn_cast<ConstantSDNode>(Idx)) {
614 // SCALAR_TO_VECTOR requires that the type of the value being inserted
615 // match the element type of the vector being created, except for
616 // integers in which case the inserted value can be over width.
617 EVT EltVT = Vec.getValueType().getVectorElementType();
618 if (Val.getValueType() == EltVT ||
619 (EltVT.isInteger() && Val.getValueType().bitsGE(EltVT))) {
620 SDValue ScVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
621 Vec.getValueType(), Val);
622
623 unsigned NumElts = Vec.getValueType().getVectorNumElements();
624 // We generate a shuffle of InVec and ScVec, so the shuffle mask
625 // should be 0,1,2,3,4,5... with the appropriate element replaced with
626 // elt 0 of the RHS.
627 SmallVector<int, 8> ShufOps;
628 for (unsigned i = 0; i != NumElts; ++i)
629 ShufOps.push_back(i != InsertPos->getZExtValue() ? i : NumElts);
630
631 return DAG.getVectorShuffle(Vec.getValueType(), dl, Vec, ScVec,
632 &ShufOps[0]);
633 }
634 }
635 return PerformInsertVectorEltInMemory(Vec, Val, Idx, dl);
636 }
637
OptimizeFloatStore(StoreSDNode * ST)638 SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) {
639 // Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr'
640 // FIXME: We shouldn't do this for TargetConstantFP's.
641 // FIXME: move this to the DAG Combiner! Note that we can't regress due
642 // to phase ordering between legalized code and the dag combiner. This
643 // probably means that we need to integrate dag combiner and legalizer
644 // together.
645 // We generally can't do this one for long doubles.
646 SDValue Chain = ST->getChain();
647 SDValue Ptr = ST->getBasePtr();
648 unsigned Alignment = ST->getAlignment();
649 bool isVolatile = ST->isVolatile();
650 bool isNonTemporal = ST->isNonTemporal();
651 AAMDNodes AAInfo = ST->getAAInfo();
652 SDLoc dl(ST);
653 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(ST->getValue())) {
654 if (CFP->getValueType(0) == MVT::f32 &&
655 TLI.isTypeLegal(MVT::i32)) {
656 SDValue Con = DAG.getConstant(CFP->getValueAPF().
657 bitcastToAPInt().zextOrTrunc(32),
658 MVT::i32);
659 return DAG.getStore(Chain, dl, Con, Ptr, ST->getPointerInfo(),
660 isVolatile, isNonTemporal, Alignment, AAInfo);
661 }
662
663 if (CFP->getValueType(0) == MVT::f64) {
664 // If this target supports 64-bit registers, do a single 64-bit store.
665 if (TLI.isTypeLegal(MVT::i64)) {
666 SDValue Con = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt().
667 zextOrTrunc(64), MVT::i64);
668 return DAG.getStore(Chain, dl, Con, Ptr, ST->getPointerInfo(),
669 isVolatile, isNonTemporal, Alignment, AAInfo);
670 }
671
672 if (TLI.isTypeLegal(MVT::i32) && !ST->isVolatile()) {
673 // Otherwise, if the target supports 32-bit registers, use 2 32-bit
674 // stores. If the target supports neither 32- nor 64-bits, this
675 // xform is certainly not worth it.
676 const APInt &IntVal =CFP->getValueAPF().bitcastToAPInt();
677 SDValue Lo = DAG.getConstant(IntVal.trunc(32), MVT::i32);
678 SDValue Hi = DAG.getConstant(IntVal.lshr(32).trunc(32), MVT::i32);
679 if (TLI.isBigEndian()) std::swap(Lo, Hi);
680
681 Lo = DAG.getStore(Chain, dl, Lo, Ptr, ST->getPointerInfo(), isVolatile,
682 isNonTemporal, Alignment, AAInfo);
683 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
684 DAG.getConstant(4, Ptr.getValueType()));
685 Hi = DAG.getStore(Chain, dl, Hi, Ptr,
686 ST->getPointerInfo().getWithOffset(4),
687 isVolatile, isNonTemporal, MinAlign(Alignment, 4U),
688 AAInfo);
689
690 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi);
691 }
692 }
693 }
694 return SDValue(nullptr, 0);
695 }
696
LegalizeStoreOps(SDNode * Node)697 void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
698 StoreSDNode *ST = cast<StoreSDNode>(Node);
699 SDValue Chain = ST->getChain();
700 SDValue Ptr = ST->getBasePtr();
701 SDLoc dl(Node);
702
703 unsigned Alignment = ST->getAlignment();
704 bool isVolatile = ST->isVolatile();
705 bool isNonTemporal = ST->isNonTemporal();
706 AAMDNodes AAInfo = ST->getAAInfo();
707
708 if (!ST->isTruncatingStore()) {
709 if (SDNode *OptStore = OptimizeFloatStore(ST).getNode()) {
710 ReplaceNode(ST, OptStore);
711 return;
712 }
713
714 {
715 SDValue Value = ST->getValue();
716 MVT VT = Value.getSimpleValueType();
717 switch (TLI.getOperationAction(ISD::STORE, VT)) {
718 default: llvm_unreachable("This action is not supported yet!");
719 case TargetLowering::Legal: {
720 // If this is an unaligned store and the target doesn't support it,
721 // expand it.
722 unsigned AS = ST->getAddressSpace();
723 unsigned Align = ST->getAlignment();
724 if (!TLI.allowsMisalignedMemoryAccesses(ST->getMemoryVT(), AS, Align)) {
725 Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
726 unsigned ABIAlignment= TLI.getDataLayout()->getABITypeAlignment(Ty);
727 if (Align < ABIAlignment)
728 ExpandUnalignedStore(cast<StoreSDNode>(Node), DAG, TLI, this);
729 }
730 break;
731 }
732 case TargetLowering::Custom: {
733 SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
734 if (Res && Res != SDValue(Node, 0))
735 ReplaceNode(SDValue(Node, 0), Res);
736 return;
737 }
738 case TargetLowering::Promote: {
739 MVT NVT = TLI.getTypeToPromoteTo(ISD::STORE, VT);
740 assert(NVT.getSizeInBits() == VT.getSizeInBits() &&
741 "Can only promote stores to same size type");
742 Value = DAG.getNode(ISD::BITCAST, dl, NVT, Value);
743 SDValue Result =
744 DAG.getStore(Chain, dl, Value, Ptr,
745 ST->getPointerInfo(), isVolatile,
746 isNonTemporal, Alignment, AAInfo);
747 ReplaceNode(SDValue(Node, 0), Result);
748 break;
749 }
750 }
751 return;
752 }
753 } else {
754 SDValue Value = ST->getValue();
755
756 EVT StVT = ST->getMemoryVT();
757 unsigned StWidth = StVT.getSizeInBits();
758
759 if (StWidth != StVT.getStoreSizeInBits()) {
760 // Promote to a byte-sized store with upper bits zero if not
761 // storing an integral number of bytes. For example, promote
762 // TRUNCSTORE:i1 X -> TRUNCSTORE:i8 (and X, 1)
763 EVT NVT = EVT::getIntegerVT(*DAG.getContext(),
764 StVT.getStoreSizeInBits());
765 Value = DAG.getZeroExtendInReg(Value, dl, StVT);
766 SDValue Result =
767 DAG.getTruncStore(Chain, dl, Value, Ptr, ST->getPointerInfo(),
768 NVT, isVolatile, isNonTemporal, Alignment, AAInfo);
769 ReplaceNode(SDValue(Node, 0), Result);
770 } else if (StWidth & (StWidth - 1)) {
771 // If not storing a power-of-2 number of bits, expand as two stores.
772 assert(!StVT.isVector() && "Unsupported truncstore!");
773 unsigned RoundWidth = 1 << Log2_32(StWidth);
774 assert(RoundWidth < StWidth);
775 unsigned ExtraWidth = StWidth - RoundWidth;
776 assert(ExtraWidth < RoundWidth);
777 assert(!(RoundWidth % 8) && !(ExtraWidth % 8) &&
778 "Store size not an integral number of bytes!");
779 EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth);
780 EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth);
781 SDValue Lo, Hi;
782 unsigned IncrementSize;
783
784 if (TLI.isLittleEndian()) {
785 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 X, TRUNCSTORE@+2:i8 (srl X, 16)
786 // Store the bottom RoundWidth bits.
787 Lo = DAG.getTruncStore(Chain, dl, Value, Ptr, ST->getPointerInfo(),
788 RoundVT,
789 isVolatile, isNonTemporal, Alignment,
790 AAInfo);
791
792 // Store the remaining ExtraWidth bits.
793 IncrementSize = RoundWidth / 8;
794 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
795 DAG.getConstant(IncrementSize, Ptr.getValueType()));
796 Hi = DAG.getNode(ISD::SRL, dl, Value.getValueType(), Value,
797 DAG.getConstant(RoundWidth,
798 TLI.getShiftAmountTy(Value.getValueType())));
799 Hi = DAG.getTruncStore(Chain, dl, Hi, Ptr,
800 ST->getPointerInfo().getWithOffset(IncrementSize),
801 ExtraVT, isVolatile, isNonTemporal,
802 MinAlign(Alignment, IncrementSize), AAInfo);
803 } else {
804 // Big endian - avoid unaligned stores.
805 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 (srl X, 8), TRUNCSTORE@+2:i8 X
806 // Store the top RoundWidth bits.
807 Hi = DAG.getNode(ISD::SRL, dl, Value.getValueType(), Value,
808 DAG.getConstant(ExtraWidth,
809 TLI.getShiftAmountTy(Value.getValueType())));
810 Hi = DAG.getTruncStore(Chain, dl, Hi, Ptr, ST->getPointerInfo(),
811 RoundVT, isVolatile, isNonTemporal, Alignment,
812 AAInfo);
813
814 // Store the remaining ExtraWidth bits.
815 IncrementSize = RoundWidth / 8;
816 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
817 DAG.getConstant(IncrementSize, Ptr.getValueType()));
818 Lo = DAG.getTruncStore(Chain, dl, Value, Ptr,
819 ST->getPointerInfo().getWithOffset(IncrementSize),
820 ExtraVT, isVolatile, isNonTemporal,
821 MinAlign(Alignment, IncrementSize), AAInfo);
822 }
823
824 // The order of the stores doesn't matter.
825 SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi);
826 ReplaceNode(SDValue(Node, 0), Result);
827 } else {
828 switch (TLI.getTruncStoreAction(ST->getValue().getSimpleValueType(),
829 StVT.getSimpleVT())) {
830 default: llvm_unreachable("This action is not supported yet!");
831 case TargetLowering::Legal: {
832 unsigned AS = ST->getAddressSpace();
833 unsigned Align = ST->getAlignment();
834 // If this is an unaligned store and the target doesn't support it,
835 // expand it.
836 if (!TLI.allowsMisalignedMemoryAccesses(ST->getMemoryVT(), AS, Align)) {
837 Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
838 unsigned ABIAlignment= TLI.getDataLayout()->getABITypeAlignment(Ty);
839 if (Align < ABIAlignment)
840 ExpandUnalignedStore(cast<StoreSDNode>(Node), DAG, TLI, this);
841 }
842 break;
843 }
844 case TargetLowering::Custom: {
845 SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
846 if (Res && Res != SDValue(Node, 0))
847 ReplaceNode(SDValue(Node, 0), Res);
848 return;
849 }
850 case TargetLowering::Expand:
851 assert(!StVT.isVector() &&
852 "Vector Stores are handled in LegalizeVectorOps");
853
854 // TRUNCSTORE:i16 i32 -> STORE i16
855 assert(TLI.isTypeLegal(StVT) &&
856 "Do not know how to expand this store!");
857 Value = DAG.getNode(ISD::TRUNCATE, dl, StVT, Value);
858 SDValue Result =
859 DAG.getStore(Chain, dl, Value, Ptr, ST->getPointerInfo(),
860 isVolatile, isNonTemporal, Alignment, AAInfo);
861 ReplaceNode(SDValue(Node, 0), Result);
862 break;
863 }
864 }
865 }
866 }
867
LegalizeLoadOps(SDNode * Node)868 void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
869 LoadSDNode *LD = cast<LoadSDNode>(Node);
870 SDValue Chain = LD->getChain(); // The chain.
871 SDValue Ptr = LD->getBasePtr(); // The base pointer.
872 SDValue Value; // The value returned by the load op.
873 SDLoc dl(Node);
874
875 ISD::LoadExtType ExtType = LD->getExtensionType();
876 if (ExtType == ISD::NON_EXTLOAD) {
877 MVT VT = Node->getSimpleValueType(0);
878 SDValue RVal = SDValue(Node, 0);
879 SDValue RChain = SDValue(Node, 1);
880
881 switch (TLI.getOperationAction(Node->getOpcode(), VT)) {
882 default: llvm_unreachable("This action is not supported yet!");
883 case TargetLowering::Legal: {
884 unsigned AS = LD->getAddressSpace();
885 unsigned Align = LD->getAlignment();
886 // If this is an unaligned load and the target doesn't support it,
887 // expand it.
888 if (!TLI.allowsMisalignedMemoryAccesses(LD->getMemoryVT(), AS, Align)) {
889 Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
890 unsigned ABIAlignment =
891 TLI.getDataLayout()->getABITypeAlignment(Ty);
892 if (Align < ABIAlignment){
893 ExpandUnalignedLoad(cast<LoadSDNode>(Node), DAG, TLI, RVal, RChain);
894 }
895 }
896 break;
897 }
898 case TargetLowering::Custom: {
899 SDValue Res = TLI.LowerOperation(RVal, DAG);
900 if (Res.getNode()) {
901 RVal = Res;
902 RChain = Res.getValue(1);
903 }
904 break;
905 }
906 case TargetLowering::Promote: {
907 MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT);
908 assert(NVT.getSizeInBits() == VT.getSizeInBits() &&
909 "Can only promote loads to same size type");
910
911 SDValue Res = DAG.getLoad(NVT, dl, Chain, Ptr, LD->getMemOperand());
912 RVal = DAG.getNode(ISD::BITCAST, dl, VT, Res);
913 RChain = Res.getValue(1);
914 break;
915 }
916 }
917 if (RChain.getNode() != Node) {
918 assert(RVal.getNode() != Node && "Load must be completely replaced");
919 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), RVal);
920 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), RChain);
921 if (UpdatedNodes) {
922 UpdatedNodes->insert(RVal.getNode());
923 UpdatedNodes->insert(RChain.getNode());
924 }
925 ReplacedNode(Node);
926 }
927 return;
928 }
929
930 EVT SrcVT = LD->getMemoryVT();
931 unsigned SrcWidth = SrcVT.getSizeInBits();
932 unsigned Alignment = LD->getAlignment();
933 bool isVolatile = LD->isVolatile();
934 bool isNonTemporal = LD->isNonTemporal();
935 bool isInvariant = LD->isInvariant();
936 AAMDNodes AAInfo = LD->getAAInfo();
937
938 if (SrcWidth != SrcVT.getStoreSizeInBits() &&
939 // Some targets pretend to have an i1 loading operation, and actually
940 // load an i8. This trick is correct for ZEXTLOAD because the top 7
941 // bits are guaranteed to be zero; it helps the optimizers understand
942 // that these bits are zero. It is also useful for EXTLOAD, since it
943 // tells the optimizers that those bits are undefined. It would be
944 // nice to have an effective generic way of getting these benefits...
945 // Until such a way is found, don't insist on promoting i1 here.
946 (SrcVT != MVT::i1 ||
947 TLI.getLoadExtAction(ExtType, Node->getValueType(0), MVT::i1) ==
948 TargetLowering::Promote)) {
949 // Promote to a byte-sized load if not loading an integral number of
950 // bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24.
951 unsigned NewWidth = SrcVT.getStoreSizeInBits();
952 EVT NVT = EVT::getIntegerVT(*DAG.getContext(), NewWidth);
953 SDValue Ch;
954
955 // The extra bits are guaranteed to be zero, since we stored them that
956 // way. A zext load from NVT thus automatically gives zext from SrcVT.
957
958 ISD::LoadExtType NewExtType =
959 ExtType == ISD::ZEXTLOAD ? ISD::ZEXTLOAD : ISD::EXTLOAD;
960
961 SDValue Result =
962 DAG.getExtLoad(NewExtType, dl, Node->getValueType(0),
963 Chain, Ptr, LD->getPointerInfo(),
964 NVT, isVolatile, isNonTemporal, isInvariant, Alignment,
965 AAInfo);
966
967 Ch = Result.getValue(1); // The chain.
968
969 if (ExtType == ISD::SEXTLOAD)
970 // Having the top bits zero doesn't help when sign extending.
971 Result = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl,
972 Result.getValueType(),
973 Result, DAG.getValueType(SrcVT));
974 else if (ExtType == ISD::ZEXTLOAD || NVT == Result.getValueType())
975 // All the top bits are guaranteed to be zero - inform the optimizers.
976 Result = DAG.getNode(ISD::AssertZext, dl,
977 Result.getValueType(), Result,
978 DAG.getValueType(SrcVT));
979
980 Value = Result;
981 Chain = Ch;
982 } else if (SrcWidth & (SrcWidth - 1)) {
983 // If not loading a power-of-2 number of bits, expand as two loads.
984 assert(!SrcVT.isVector() && "Unsupported extload!");
985 unsigned RoundWidth = 1 << Log2_32(SrcWidth);
986 assert(RoundWidth < SrcWidth);
987 unsigned ExtraWidth = SrcWidth - RoundWidth;
988 assert(ExtraWidth < RoundWidth);
989 assert(!(RoundWidth % 8) && !(ExtraWidth % 8) &&
990 "Load size not an integral number of bytes!");
991 EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth);
992 EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth);
993 SDValue Lo, Hi, Ch;
994 unsigned IncrementSize;
995
996 if (TLI.isLittleEndian()) {
997 // EXTLOAD:i24 -> ZEXTLOAD:i16 | (shl EXTLOAD@+2:i8, 16)
998 // Load the bottom RoundWidth bits.
999 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, Node->getValueType(0),
1000 Chain, Ptr,
1001 LD->getPointerInfo(), RoundVT, isVolatile,
1002 isNonTemporal, isInvariant, Alignment, AAInfo);
1003
1004 // Load the remaining ExtraWidth bits.
1005 IncrementSize = RoundWidth / 8;
1006 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
1007 DAG.getConstant(IncrementSize, Ptr.getValueType()));
1008 Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Chain, Ptr,
1009 LD->getPointerInfo().getWithOffset(IncrementSize),
1010 ExtraVT, isVolatile, isNonTemporal, isInvariant,
1011 MinAlign(Alignment, IncrementSize), AAInfo);
1012
1013 // Build a factor node to remember that this load is independent of
1014 // the other one.
1015 Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
1016 Hi.getValue(1));
1017
1018 // Move the top bits to the right place.
1019 Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi,
1020 DAG.getConstant(RoundWidth,
1021 TLI.getShiftAmountTy(Hi.getValueType())));
1022
1023 // Join the hi and lo parts.
1024 Value = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi);
1025 } else {
1026 // Big endian - avoid unaligned loads.
1027 // EXTLOAD:i24 -> (shl EXTLOAD:i16, 8) | ZEXTLOAD@+2:i8
1028 // Load the top RoundWidth bits.
1029 Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Chain, Ptr,
1030 LD->getPointerInfo(), RoundVT, isVolatile,
1031 isNonTemporal, isInvariant, Alignment, AAInfo);
1032
1033 // Load the remaining ExtraWidth bits.
1034 IncrementSize = RoundWidth / 8;
1035 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
1036 DAG.getConstant(IncrementSize, Ptr.getValueType()));
1037 Lo = DAG.getExtLoad(ISD::ZEXTLOAD,
1038 dl, Node->getValueType(0), Chain, Ptr,
1039 LD->getPointerInfo().getWithOffset(IncrementSize),
1040 ExtraVT, isVolatile, isNonTemporal, isInvariant,
1041 MinAlign(Alignment, IncrementSize), AAInfo);
1042
1043 // Build a factor node to remember that this load is independent of
1044 // the other one.
1045 Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
1046 Hi.getValue(1));
1047
1048 // Move the top bits to the right place.
1049 Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi,
1050 DAG.getConstant(ExtraWidth,
1051 TLI.getShiftAmountTy(Hi.getValueType())));
1052
1053 // Join the hi and lo parts.
1054 Value = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi);
1055 }
1056
1057 Chain = Ch;
1058 } else {
1059 bool isCustom = false;
1060 switch (TLI.getLoadExtAction(ExtType, Node->getValueType(0),
1061 SrcVT.getSimpleVT())) {
1062 default: llvm_unreachable("This action is not supported yet!");
1063 case TargetLowering::Custom:
1064 isCustom = true;
1065 // FALLTHROUGH
1066 case TargetLowering::Legal: {
1067 Value = SDValue(Node, 0);
1068 Chain = SDValue(Node, 1);
1069
1070 if (isCustom) {
1071 SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
1072 if (Res.getNode()) {
1073 Value = Res;
1074 Chain = Res.getValue(1);
1075 }
1076 } else {
1077 // If this is an unaligned load and the target doesn't support
1078 // it, expand it.
1079 EVT MemVT = LD->getMemoryVT();
1080 unsigned AS = LD->getAddressSpace();
1081 unsigned Align = LD->getAlignment();
1082 if (!TLI.allowsMisalignedMemoryAccesses(MemVT, AS, Align)) {
1083 Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
1084 unsigned ABIAlignment = TLI.getDataLayout()->getABITypeAlignment(Ty);
1085 if (Align < ABIAlignment){
1086 ExpandUnalignedLoad(cast<LoadSDNode>(Node), DAG, TLI, Value, Chain);
1087 }
1088 }
1089 }
1090 break;
1091 }
1092 case TargetLowering::Expand:
1093 if (!TLI.isLoadExtLegal(ISD::EXTLOAD, Node->getValueType(0), SrcVT)) {
1094 // If the source type is not legal, see if there is a legal extload to
1095 // an intermediate type that we can then extend further.
1096 EVT LoadVT = TLI.getRegisterType(SrcVT.getSimpleVT());
1097 if (TLI.isTypeLegal(SrcVT) || // Same as SrcVT == LoadVT?
1098 TLI.isLoadExtLegal(ExtType, LoadVT, SrcVT)) {
1099 // If we are loading a legal type, this is a non-extload followed by a
1100 // full extend.
1101 ISD::LoadExtType MidExtType =
1102 (LoadVT == SrcVT) ? ISD::NON_EXTLOAD : ExtType;
1103
1104 SDValue Load = DAG.getExtLoad(MidExtType, dl, LoadVT, Chain, Ptr,
1105 SrcVT, LD->getMemOperand());
1106 unsigned ExtendOp =
1107 ISD::getExtForLoadExtType(SrcVT.isFloatingPoint(), ExtType);
1108 Value = DAG.getNode(ExtendOp, dl, Node->getValueType(0), Load);
1109 Chain = Load.getValue(1);
1110 break;
1111 }
1112 }
1113
1114 assert(!SrcVT.isVector() &&
1115 "Vector Loads are handled in LegalizeVectorOps");
1116
1117 // FIXME: This does not work for vectors on most targets. Sign-
1118 // and zero-extend operations are currently folded into extending
1119 // loads, whether they are legal or not, and then we end up here
1120 // without any support for legalizing them.
1121 assert(ExtType != ISD::EXTLOAD &&
1122 "EXTLOAD should always be supported!");
1123 // Turn the unsupported load into an EXTLOAD followed by an
1124 // explicit zero/sign extend inreg.
1125 SDValue Result = DAG.getExtLoad(ISD::EXTLOAD, dl,
1126 Node->getValueType(0),
1127 Chain, Ptr, SrcVT,
1128 LD->getMemOperand());
1129 SDValue ValRes;
1130 if (ExtType == ISD::SEXTLOAD)
1131 ValRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl,
1132 Result.getValueType(),
1133 Result, DAG.getValueType(SrcVT));
1134 else
1135 ValRes = DAG.getZeroExtendInReg(Result, dl, SrcVT.getScalarType());
1136 Value = ValRes;
1137 Chain = Result.getValue(1);
1138 break;
1139 }
1140 }
1141
1142 // Since loads produce two values, make sure to remember that we legalized
1143 // both of them.
1144 if (Chain.getNode() != Node) {
1145 assert(Value.getNode() != Node && "Load must be completely replaced");
1146 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), Value);
1147 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), Chain);
1148 if (UpdatedNodes) {
1149 UpdatedNodes->insert(Value.getNode());
1150 UpdatedNodes->insert(Chain.getNode());
1151 }
1152 ReplacedNode(Node);
1153 }
1154 }
1155
1156 /// Return a legal replacement for the given operation, with all legal operands.
LegalizeOp(SDNode * Node)1157 void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
1158 DEBUG(dbgs() << "\nLegalizing: "; Node->dump(&DAG));
1159
1160 if (Node->getOpcode() == ISD::TargetConstant) // Allow illegal target nodes.
1161 return;
1162
1163 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i)
1164 assert(TLI.getTypeAction(*DAG.getContext(), Node->getValueType(i)) ==
1165 TargetLowering::TypeLegal &&
1166 "Unexpected illegal type!");
1167
1168 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i)
1169 assert((TLI.getTypeAction(*DAG.getContext(),
1170 Node->getOperand(i).getValueType()) ==
1171 TargetLowering::TypeLegal ||
1172 Node->getOperand(i).getOpcode() == ISD::TargetConstant) &&
1173 "Unexpected illegal type!");
1174
1175 // Figure out the correct action; the way to query this varies by opcode
1176 TargetLowering::LegalizeAction Action = TargetLowering::Legal;
1177 bool SimpleFinishLegalizing = true;
1178 switch (Node->getOpcode()) {
1179 case ISD::INTRINSIC_W_CHAIN:
1180 case ISD::INTRINSIC_WO_CHAIN:
1181 case ISD::INTRINSIC_VOID:
1182 case ISD::STACKSAVE:
1183 Action = TLI.getOperationAction(Node->getOpcode(), MVT::Other);
1184 break;
1185 case ISD::VAARG:
1186 Action = TLI.getOperationAction(Node->getOpcode(),
1187 Node->getValueType(0));
1188 if (Action != TargetLowering::Promote)
1189 Action = TLI.getOperationAction(Node->getOpcode(), MVT::Other);
1190 break;
1191 case ISD::FP_TO_FP16:
1192 case ISD::SINT_TO_FP:
1193 case ISD::UINT_TO_FP:
1194 case ISD::EXTRACT_VECTOR_ELT:
1195 Action = TLI.getOperationAction(Node->getOpcode(),
1196 Node->getOperand(0).getValueType());
1197 break;
1198 case ISD::FP_ROUND_INREG:
1199 case ISD::SIGN_EXTEND_INREG: {
1200 EVT InnerType = cast<VTSDNode>(Node->getOperand(1))->getVT();
1201 Action = TLI.getOperationAction(Node->getOpcode(), InnerType);
1202 break;
1203 }
1204 case ISD::ATOMIC_STORE: {
1205 Action = TLI.getOperationAction(Node->getOpcode(),
1206 Node->getOperand(2).getValueType());
1207 break;
1208 }
1209 case ISD::SELECT_CC:
1210 case ISD::SETCC:
1211 case ISD::BR_CC: {
1212 unsigned CCOperand = Node->getOpcode() == ISD::SELECT_CC ? 4 :
1213 Node->getOpcode() == ISD::SETCC ? 2 : 1;
1214 unsigned CompareOperand = Node->getOpcode() == ISD::BR_CC ? 2 : 0;
1215 MVT OpVT = Node->getOperand(CompareOperand).getSimpleValueType();
1216 ISD::CondCode CCCode =
1217 cast<CondCodeSDNode>(Node->getOperand(CCOperand))->get();
1218 Action = TLI.getCondCodeAction(CCCode, OpVT);
1219 if (Action == TargetLowering::Legal) {
1220 if (Node->getOpcode() == ISD::SELECT_CC)
1221 Action = TLI.getOperationAction(Node->getOpcode(),
1222 Node->getValueType(0));
1223 else
1224 Action = TLI.getOperationAction(Node->getOpcode(), OpVT);
1225 }
1226 break;
1227 }
1228 case ISD::LOAD:
1229 case ISD::STORE:
1230 // FIXME: Model these properly. LOAD and STORE are complicated, and
1231 // STORE expects the unlegalized operand in some cases.
1232 SimpleFinishLegalizing = false;
1233 break;
1234 case ISD::CALLSEQ_START:
1235 case ISD::CALLSEQ_END:
1236 // FIXME: This shouldn't be necessary. These nodes have special properties
1237 // dealing with the recursive nature of legalization. Removing this
1238 // special case should be done as part of making LegalizeDAG non-recursive.
1239 SimpleFinishLegalizing = false;
1240 break;
1241 case ISD::EXTRACT_ELEMENT:
1242 case ISD::FLT_ROUNDS_:
1243 case ISD::SADDO:
1244 case ISD::SSUBO:
1245 case ISD::UADDO:
1246 case ISD::USUBO:
1247 case ISD::SMULO:
1248 case ISD::UMULO:
1249 case ISD::FPOWI:
1250 case ISD::MERGE_VALUES:
1251 case ISD::EH_RETURN:
1252 case ISD::FRAME_TO_ARGS_OFFSET:
1253 case ISD::EH_SJLJ_SETJMP:
1254 case ISD::EH_SJLJ_LONGJMP:
1255 // These operations lie about being legal: when they claim to be legal,
1256 // they should actually be expanded.
1257 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
1258 if (Action == TargetLowering::Legal)
1259 Action = TargetLowering::Expand;
1260 break;
1261 case ISD::INIT_TRAMPOLINE:
1262 case ISD::ADJUST_TRAMPOLINE:
1263 case ISD::FRAMEADDR:
1264 case ISD::RETURNADDR:
1265 // These operations lie about being legal: when they claim to be legal,
1266 // they should actually be custom-lowered.
1267 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
1268 if (Action == TargetLowering::Legal)
1269 Action = TargetLowering::Custom;
1270 break;
1271 case ISD::READ_REGISTER:
1272 case ISD::WRITE_REGISTER:
1273 // Named register is legal in the DAG, but blocked by register name
1274 // selection if not implemented by target (to chose the correct register)
1275 // They'll be converted to Copy(To/From)Reg.
1276 Action = TargetLowering::Legal;
1277 break;
1278 case ISD::DEBUGTRAP:
1279 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
1280 if (Action == TargetLowering::Expand) {
1281 // replace ISD::DEBUGTRAP with ISD::TRAP
1282 SDValue NewVal;
1283 NewVal = DAG.getNode(ISD::TRAP, SDLoc(Node), Node->getVTList(),
1284 Node->getOperand(0));
1285 ReplaceNode(Node, NewVal.getNode());
1286 LegalizeOp(NewVal.getNode());
1287 return;
1288 }
1289 break;
1290
1291 default:
1292 if (Node->getOpcode() >= ISD::BUILTIN_OP_END) {
1293 Action = TargetLowering::Legal;
1294 } else {
1295 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
1296 }
1297 break;
1298 }
1299
1300 if (SimpleFinishLegalizing) {
1301 SDNode *NewNode = Node;
1302 switch (Node->getOpcode()) {
1303 default: break;
1304 case ISD::SHL:
1305 case ISD::SRL:
1306 case ISD::SRA:
1307 case ISD::ROTL:
1308 case ISD::ROTR:
1309 // Legalizing shifts/rotates requires adjusting the shift amount
1310 // to the appropriate width.
1311 if (!Node->getOperand(1).getValueType().isVector()) {
1312 SDValue SAO =
1313 DAG.getShiftAmountOperand(Node->getOperand(0).getValueType(),
1314 Node->getOperand(1));
1315 HandleSDNode Handle(SAO);
1316 LegalizeOp(SAO.getNode());
1317 NewNode = DAG.UpdateNodeOperands(Node, Node->getOperand(0),
1318 Handle.getValue());
1319 }
1320 break;
1321 case ISD::SRL_PARTS:
1322 case ISD::SRA_PARTS:
1323 case ISD::SHL_PARTS:
1324 // Legalizing shifts/rotates requires adjusting the shift amount
1325 // to the appropriate width.
1326 if (!Node->getOperand(2).getValueType().isVector()) {
1327 SDValue SAO =
1328 DAG.getShiftAmountOperand(Node->getOperand(0).getValueType(),
1329 Node->getOperand(2));
1330 HandleSDNode Handle(SAO);
1331 LegalizeOp(SAO.getNode());
1332 NewNode = DAG.UpdateNodeOperands(Node, Node->getOperand(0),
1333 Node->getOperand(1),
1334 Handle.getValue());
1335 }
1336 break;
1337 }
1338
1339 if (NewNode != Node) {
1340 ReplaceNode(Node, NewNode);
1341 Node = NewNode;
1342 }
1343 switch (Action) {
1344 case TargetLowering::Legal:
1345 return;
1346 case TargetLowering::Custom: {
1347 // FIXME: The handling for custom lowering with multiple results is
1348 // a complete mess.
1349 SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
1350 if (Res.getNode()) {
1351 if (!(Res.getNode() != Node || Res.getResNo() != 0))
1352 return;
1353
1354 if (Node->getNumValues() == 1) {
1355 // We can just directly replace this node with the lowered value.
1356 ReplaceNode(SDValue(Node, 0), Res);
1357 return;
1358 }
1359
1360 SmallVector<SDValue, 8> ResultVals;
1361 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i)
1362 ResultVals.push_back(Res.getValue(i));
1363 ReplaceNode(Node, ResultVals.data());
1364 return;
1365 }
1366 }
1367 // FALL THROUGH
1368 case TargetLowering::Expand:
1369 ExpandNode(Node);
1370 return;
1371 case TargetLowering::Promote:
1372 PromoteNode(Node);
1373 return;
1374 }
1375 }
1376
1377 switch (Node->getOpcode()) {
1378 default:
1379 #ifndef NDEBUG
1380 dbgs() << "NODE: ";
1381 Node->dump( &DAG);
1382 dbgs() << "\n";
1383 #endif
1384 llvm_unreachable("Do not know how to legalize this operator!");
1385
1386 case ISD::CALLSEQ_START:
1387 case ISD::CALLSEQ_END:
1388 break;
1389 case ISD::LOAD: {
1390 return LegalizeLoadOps(Node);
1391 }
1392 case ISD::STORE: {
1393 return LegalizeStoreOps(Node);
1394 }
1395 }
1396 }
1397
ExpandExtractFromVectorThroughStack(SDValue Op)1398 SDValue SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op) {
1399 SDValue Vec = Op.getOperand(0);
1400 SDValue Idx = Op.getOperand(1);
1401 SDLoc dl(Op);
1402
1403 // Before we generate a new store to a temporary stack slot, see if there is
1404 // already one that we can use. There often is because when we scalarize
1405 // vector operations (using SelectionDAG::UnrollVectorOp for example) a whole
1406 // series of EXTRACT_VECTOR_ELT nodes are generated, one for each element in
1407 // the vector. If all are expanded here, we don't want one store per vector
1408 // element.
1409 SDValue StackPtr, Ch;
1410 for (SDNode::use_iterator UI = Vec.getNode()->use_begin(),
1411 UE = Vec.getNode()->use_end(); UI != UE; ++UI) {
1412 SDNode *User = *UI;
1413 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(User)) {
1414 if (ST->isIndexed() || ST->isTruncatingStore() ||
1415 ST->getValue() != Vec)
1416 continue;
1417
1418 // Make sure that nothing else could have stored into the destination of
1419 // this store.
1420 if (!ST->getChain().reachesChainWithoutSideEffects(DAG.getEntryNode()))
1421 continue;
1422
1423 StackPtr = ST->getBasePtr();
1424 Ch = SDValue(ST, 0);
1425 break;
1426 }
1427 }
1428
1429 if (!Ch.getNode()) {
1430 // Store the value to a temporary stack slot, then LOAD the returned part.
1431 StackPtr = DAG.CreateStackTemporary(Vec.getValueType());
1432 Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr,
1433 MachinePointerInfo(), false, false, 0);
1434 }
1435
1436 // Add the offset to the index.
1437 unsigned EltSize =
1438 Vec.getValueType().getVectorElementType().getSizeInBits()/8;
1439 Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx,
1440 DAG.getConstant(EltSize, Idx.getValueType()));
1441
1442 Idx = DAG.getZExtOrTrunc(Idx, dl, TLI.getPointerTy());
1443 StackPtr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, StackPtr);
1444
1445 SDValue NewLoad;
1446
1447 if (Op.getValueType().isVector())
1448 NewLoad = DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr,
1449 MachinePointerInfo(), false, false, false, 0);
1450 else
1451 NewLoad = DAG.getExtLoad(
1452 ISD::EXTLOAD, dl, Op.getValueType(), Ch, StackPtr, MachinePointerInfo(),
1453 Vec.getValueType().getVectorElementType(), false, false, false, 0);
1454
1455 // Replace the chain going out of the store, by the one out of the load.
1456 DAG.ReplaceAllUsesOfValueWith(Ch, SDValue(NewLoad.getNode(), 1));
1457
1458 // We introduced a cycle though, so update the loads operands, making sure
1459 // to use the original store's chain as an incoming chain.
1460 SmallVector<SDValue, 6> NewLoadOperands(NewLoad->op_begin(),
1461 NewLoad->op_end());
1462 NewLoadOperands[0] = Ch;
1463 NewLoad =
1464 SDValue(DAG.UpdateNodeOperands(NewLoad.getNode(), NewLoadOperands), 0);
1465 return NewLoad;
1466 }
1467
ExpandInsertToVectorThroughStack(SDValue Op)1468 SDValue SelectionDAGLegalize::ExpandInsertToVectorThroughStack(SDValue Op) {
1469 assert(Op.getValueType().isVector() && "Non-vector insert subvector!");
1470
1471 SDValue Vec = Op.getOperand(0);
1472 SDValue Part = Op.getOperand(1);
1473 SDValue Idx = Op.getOperand(2);
1474 SDLoc dl(Op);
1475
1476 // Store the value to a temporary stack slot, then LOAD the returned part.
1477
1478 SDValue StackPtr = DAG.CreateStackTemporary(Vec.getValueType());
1479 int FI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
1480 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FI);
1481
1482 // First store the whole vector.
1483 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo,
1484 false, false, 0);
1485
1486 // Then store the inserted part.
1487
1488 // Add the offset to the index.
1489 unsigned EltSize =
1490 Vec.getValueType().getVectorElementType().getSizeInBits()/8;
1491
1492 Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx,
1493 DAG.getConstant(EltSize, Idx.getValueType()));
1494 Idx = DAG.getZExtOrTrunc(Idx, dl, TLI.getPointerTy());
1495
1496 SDValue SubStackPtr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx,
1497 StackPtr);
1498
1499 // Store the subvector.
1500 Ch = DAG.getStore(Ch, dl, Part, SubStackPtr,
1501 MachinePointerInfo(), false, false, 0);
1502
1503 // Finally, load the updated vector.
1504 return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr, PtrInfo,
1505 false, false, false, 0);
1506 }
1507
ExpandVectorBuildThroughStack(SDNode * Node)1508 SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) {
1509 // We can't handle this case efficiently. Allocate a sufficiently
1510 // aligned object on the stack, store each element into it, then load
1511 // the result as a vector.
1512 // Create the stack frame object.
1513 EVT VT = Node->getValueType(0);
1514 EVT EltVT = VT.getVectorElementType();
1515 SDLoc dl(Node);
1516 SDValue FIPtr = DAG.CreateStackTemporary(VT);
1517 int FI = cast<FrameIndexSDNode>(FIPtr.getNode())->getIndex();
1518 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FI);
1519
1520 // Emit a store of each element to the stack slot.
1521 SmallVector<SDValue, 8> Stores;
1522 unsigned TypeByteSize = EltVT.getSizeInBits() / 8;
1523 // Store (in the right endianness) the elements to memory.
1524 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) {
1525 // Ignore undef elements.
1526 if (Node->getOperand(i).getOpcode() == ISD::UNDEF) continue;
1527
1528 unsigned Offset = TypeByteSize*i;
1529
1530 SDValue Idx = DAG.getConstant(Offset, FIPtr.getValueType());
1531 Idx = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, Idx);
1532
1533 // If the destination vector element type is narrower than the source
1534 // element type, only store the bits necessary.
1535 if (EltVT.bitsLT(Node->getOperand(i).getValueType().getScalarType())) {
1536 Stores.push_back(DAG.getTruncStore(DAG.getEntryNode(), dl,
1537 Node->getOperand(i), Idx,
1538 PtrInfo.getWithOffset(Offset),
1539 EltVT, false, false, 0));
1540 } else
1541 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl,
1542 Node->getOperand(i), Idx,
1543 PtrInfo.getWithOffset(Offset),
1544 false, false, 0));
1545 }
1546
1547 SDValue StoreChain;
1548 if (!Stores.empty()) // Not all undef elements?
1549 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
1550 else
1551 StoreChain = DAG.getEntryNode();
1552
1553 // Result is a load from the stack slot.
1554 return DAG.getLoad(VT, dl, StoreChain, FIPtr, PtrInfo,
1555 false, false, false, 0);
1556 }
1557
ExpandFCOPYSIGN(SDNode * Node)1558 SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) {
1559 SDLoc dl(Node);
1560 SDValue Tmp1 = Node->getOperand(0);
1561 SDValue Tmp2 = Node->getOperand(1);
1562
1563 // Get the sign bit of the RHS. First obtain a value that has the same
1564 // sign as the sign bit, i.e. negative if and only if the sign bit is 1.
1565 SDValue SignBit;
1566 EVT FloatVT = Tmp2.getValueType();
1567 EVT IVT = EVT::getIntegerVT(*DAG.getContext(), FloatVT.getSizeInBits());
1568 if (TLI.isTypeLegal(IVT)) {
1569 // Convert to an integer with the same sign bit.
1570 SignBit = DAG.getNode(ISD::BITCAST, dl, IVT, Tmp2);
1571 } else {
1572 // Store the float to memory, then load the sign part out as an integer.
1573 MVT LoadTy = TLI.getPointerTy();
1574 // First create a temporary that is aligned for both the load and store.
1575 SDValue StackPtr = DAG.CreateStackTemporary(FloatVT, LoadTy);
1576 // Then store the float to it.
1577 SDValue Ch =
1578 DAG.getStore(DAG.getEntryNode(), dl, Tmp2, StackPtr, MachinePointerInfo(),
1579 false, false, 0);
1580 if (TLI.isBigEndian()) {
1581 assert(FloatVT.isByteSized() && "Unsupported floating point type!");
1582 // Load out a legal integer with the same sign bit as the float.
1583 SignBit = DAG.getLoad(LoadTy, dl, Ch, StackPtr, MachinePointerInfo(),
1584 false, false, false, 0);
1585 } else { // Little endian
1586 SDValue LoadPtr = StackPtr;
1587 // The float may be wider than the integer we are going to load. Advance
1588 // the pointer so that the loaded integer will contain the sign bit.
1589 unsigned Strides = (FloatVT.getSizeInBits()-1)/LoadTy.getSizeInBits();
1590 unsigned ByteOffset = (Strides * LoadTy.getSizeInBits()) / 8;
1591 LoadPtr = DAG.getNode(ISD::ADD, dl, LoadPtr.getValueType(), LoadPtr,
1592 DAG.getConstant(ByteOffset, LoadPtr.getValueType()));
1593 // Load a legal integer containing the sign bit.
1594 SignBit = DAG.getLoad(LoadTy, dl, Ch, LoadPtr, MachinePointerInfo(),
1595 false, false, false, 0);
1596 // Move the sign bit to the top bit of the loaded integer.
1597 unsigned BitShift = LoadTy.getSizeInBits() -
1598 (FloatVT.getSizeInBits() - 8 * ByteOffset);
1599 assert(BitShift < LoadTy.getSizeInBits() && "Pointer advanced wrong?");
1600 if (BitShift)
1601 SignBit = DAG.getNode(ISD::SHL, dl, LoadTy, SignBit,
1602 DAG.getConstant(BitShift,
1603 TLI.getShiftAmountTy(SignBit.getValueType())));
1604 }
1605 }
1606 // Now get the sign bit proper, by seeing whether the value is negative.
1607 SignBit = DAG.getSetCC(dl, getSetCCResultType(SignBit.getValueType()),
1608 SignBit, DAG.getConstant(0, SignBit.getValueType()),
1609 ISD::SETLT);
1610 // Get the absolute value of the result.
1611 SDValue AbsVal = DAG.getNode(ISD::FABS, dl, Tmp1.getValueType(), Tmp1);
1612 // Select between the nabs and abs value based on the sign bit of
1613 // the input.
1614 return DAG.getSelect(dl, AbsVal.getValueType(), SignBit,
1615 DAG.getNode(ISD::FNEG, dl, AbsVal.getValueType(), AbsVal),
1616 AbsVal);
1617 }
1618
ExpandDYNAMIC_STACKALLOC(SDNode * Node,SmallVectorImpl<SDValue> & Results)1619 void SelectionDAGLegalize::ExpandDYNAMIC_STACKALLOC(SDNode* Node,
1620 SmallVectorImpl<SDValue> &Results) {
1621 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
1622 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
1623 " not tell us which reg is the stack pointer!");
1624 SDLoc dl(Node);
1625 EVT VT = Node->getValueType(0);
1626 SDValue Tmp1 = SDValue(Node, 0);
1627 SDValue Tmp2 = SDValue(Node, 1);
1628 SDValue Tmp3 = Node->getOperand(2);
1629 SDValue Chain = Tmp1.getOperand(0);
1630
1631 // Chain the dynamic stack allocation so that it doesn't modify the stack
1632 // pointer when other instructions are using the stack.
1633 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true),
1634 SDLoc(Node));
1635
1636 SDValue Size = Tmp2.getOperand(1);
1637 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
1638 Chain = SP.getValue(1);
1639 unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue();
1640 unsigned StackAlign =
1641 DAG.getSubtarget().getFrameLowering()->getStackAlignment();
1642 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
1643 if (Align > StackAlign)
1644 Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1,
1645 DAG.getConstant(-(uint64_t)Align, VT));
1646 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain
1647
1648 Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true),
1649 DAG.getIntPtrConstant(0, true), SDValue(),
1650 SDLoc(Node));
1651
1652 Results.push_back(Tmp1);
1653 Results.push_back(Tmp2);
1654 }
1655
1656 /// Legalize a SETCC with given LHS and RHS and condition code CC on the current
1657 /// target.
1658 ///
1659 /// If the SETCC has been legalized using AND / OR, then the legalized node
1660 /// will be stored in LHS. RHS and CC will be set to SDValue(). NeedInvert
1661 /// will be set to false.
1662 ///
1663 /// If the SETCC has been legalized by using getSetCCSwappedOperands(),
1664 /// then the values of LHS and RHS will be swapped, CC will be set to the
1665 /// new condition, and NeedInvert will be set to false.
1666 ///
1667 /// If the SETCC has been legalized using the inverse condcode, then LHS and
1668 /// RHS will be unchanged, CC will set to the inverted condcode, and NeedInvert
1669 /// will be set to true. The caller must invert the result of the SETCC with
1670 /// SelectionDAG::getLogicalNOT() or take equivalent action to swap the effect
1671 /// of a true/false result.
1672 ///
1673 /// \returns true if the SetCC has been legalized, false if it hasn't.
LegalizeSetCCCondCode(EVT VT,SDValue & LHS,SDValue & RHS,SDValue & CC,bool & NeedInvert,SDLoc dl)1674 bool SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT,
1675 SDValue &LHS, SDValue &RHS,
1676 SDValue &CC,
1677 bool &NeedInvert,
1678 SDLoc dl) {
1679 MVT OpVT = LHS.getSimpleValueType();
1680 ISD::CondCode CCCode = cast<CondCodeSDNode>(CC)->get();
1681 NeedInvert = false;
1682 switch (TLI.getCondCodeAction(CCCode, OpVT)) {
1683 default: llvm_unreachable("Unknown condition code action!");
1684 case TargetLowering::Legal:
1685 // Nothing to do.
1686 break;
1687 case TargetLowering::Expand: {
1688 ISD::CondCode InvCC = ISD::getSetCCSwappedOperands(CCCode);
1689 if (TLI.isCondCodeLegal(InvCC, OpVT)) {
1690 std::swap(LHS, RHS);
1691 CC = DAG.getCondCode(InvCC);
1692 return true;
1693 }
1694 ISD::CondCode CC1 = ISD::SETCC_INVALID, CC2 = ISD::SETCC_INVALID;
1695 unsigned Opc = 0;
1696 switch (CCCode) {
1697 default: llvm_unreachable("Don't know how to expand this condition!");
1698 case ISD::SETO:
1699 assert(TLI.getCondCodeAction(ISD::SETOEQ, OpVT)
1700 == TargetLowering::Legal
1701 && "If SETO is expanded, SETOEQ must be legal!");
1702 CC1 = ISD::SETOEQ; CC2 = ISD::SETOEQ; Opc = ISD::AND; break;
1703 case ISD::SETUO:
1704 assert(TLI.getCondCodeAction(ISD::SETUNE, OpVT)
1705 == TargetLowering::Legal
1706 && "If SETUO is expanded, SETUNE must be legal!");
1707 CC1 = ISD::SETUNE; CC2 = ISD::SETUNE; Opc = ISD::OR; break;
1708 case ISD::SETOEQ:
1709 case ISD::SETOGT:
1710 case ISD::SETOGE:
1711 case ISD::SETOLT:
1712 case ISD::SETOLE:
1713 case ISD::SETONE:
1714 case ISD::SETUEQ:
1715 case ISD::SETUNE:
1716 case ISD::SETUGT:
1717 case ISD::SETUGE:
1718 case ISD::SETULT:
1719 case ISD::SETULE:
1720 // If we are floating point, assign and break, otherwise fall through.
1721 if (!OpVT.isInteger()) {
1722 // We can use the 4th bit to tell if we are the unordered
1723 // or ordered version of the opcode.
1724 CC2 = ((unsigned)CCCode & 0x8U) ? ISD::SETUO : ISD::SETO;
1725 Opc = ((unsigned)CCCode & 0x8U) ? ISD::OR : ISD::AND;
1726 CC1 = (ISD::CondCode)(((int)CCCode & 0x7) | 0x10);
1727 break;
1728 }
1729 // Fallthrough if we are unsigned integer.
1730 case ISD::SETLE:
1731 case ISD::SETGT:
1732 case ISD::SETGE:
1733 case ISD::SETLT:
1734 // We only support using the inverted operation, which is computed above
1735 // and not a different manner of supporting expanding these cases.
1736 llvm_unreachable("Don't know how to expand this condition!");
1737 case ISD::SETNE:
1738 case ISD::SETEQ:
1739 // Try inverting the result of the inverse condition.
1740 InvCC = CCCode == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ;
1741 if (TLI.isCondCodeLegal(InvCC, OpVT)) {
1742 CC = DAG.getCondCode(InvCC);
1743 NeedInvert = true;
1744 return true;
1745 }
1746 // If inverting the condition didn't work then we have no means to expand
1747 // the condition.
1748 llvm_unreachable("Don't know how to expand this condition!");
1749 }
1750
1751 SDValue SetCC1, SetCC2;
1752 if (CCCode != ISD::SETO && CCCode != ISD::SETUO) {
1753 // If we aren't the ordered or unorder operation,
1754 // then the pattern is (LHS CC1 RHS) Opc (LHS CC2 RHS).
1755 SetCC1 = DAG.getSetCC(dl, VT, LHS, RHS, CC1);
1756 SetCC2 = DAG.getSetCC(dl, VT, LHS, RHS, CC2);
1757 } else {
1758 // Otherwise, the pattern is (LHS CC1 LHS) Opc (RHS CC2 RHS)
1759 SetCC1 = DAG.getSetCC(dl, VT, LHS, LHS, CC1);
1760 SetCC2 = DAG.getSetCC(dl, VT, RHS, RHS, CC2);
1761 }
1762 LHS = DAG.getNode(Opc, dl, VT, SetCC1, SetCC2);
1763 RHS = SDValue();
1764 CC = SDValue();
1765 return true;
1766 }
1767 }
1768 return false;
1769 }
1770
1771 /// Emit a store/load combination to the stack. This stores
1772 /// SrcOp to a stack slot of type SlotVT, truncating it if needed. It then does
1773 /// a load from the stack slot to DestVT, extending it if needed.
1774 /// The resultant code need not be legal.
EmitStackConvert(SDValue SrcOp,EVT SlotVT,EVT DestVT,SDLoc dl)1775 SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp,
1776 EVT SlotVT,
1777 EVT DestVT,
1778 SDLoc dl) {
1779 // Create the stack frame object.
1780 unsigned SrcAlign =
1781 TLI.getDataLayout()->getPrefTypeAlignment(SrcOp.getValueType().
1782 getTypeForEVT(*DAG.getContext()));
1783 SDValue FIPtr = DAG.CreateStackTemporary(SlotVT, SrcAlign);
1784
1785 FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(FIPtr);
1786 int SPFI = StackPtrFI->getIndex();
1787 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(SPFI);
1788
1789 unsigned SrcSize = SrcOp.getValueType().getSizeInBits();
1790 unsigned SlotSize = SlotVT.getSizeInBits();
1791 unsigned DestSize = DestVT.getSizeInBits();
1792 Type *DestType = DestVT.getTypeForEVT(*DAG.getContext());
1793 unsigned DestAlign = TLI.getDataLayout()->getPrefTypeAlignment(DestType);
1794
1795 // Emit a store to the stack slot. Use a truncstore if the input value is
1796 // later than DestVT.
1797 SDValue Store;
1798
1799 if (SrcSize > SlotSize)
1800 Store = DAG.getTruncStore(DAG.getEntryNode(), dl, SrcOp, FIPtr,
1801 PtrInfo, SlotVT, false, false, SrcAlign);
1802 else {
1803 assert(SrcSize == SlotSize && "Invalid store");
1804 Store = DAG.getStore(DAG.getEntryNode(), dl, SrcOp, FIPtr,
1805 PtrInfo, false, false, SrcAlign);
1806 }
1807
1808 // Result is a load from the stack slot.
1809 if (SlotSize == DestSize)
1810 return DAG.getLoad(DestVT, dl, Store, FIPtr, PtrInfo,
1811 false, false, false, DestAlign);
1812
1813 assert(SlotSize < DestSize && "Unknown extension!");
1814 return DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT, Store, FIPtr,
1815 PtrInfo, SlotVT, false, false, false, DestAlign);
1816 }
1817
ExpandSCALAR_TO_VECTOR(SDNode * Node)1818 SDValue SelectionDAGLegalize::ExpandSCALAR_TO_VECTOR(SDNode *Node) {
1819 SDLoc dl(Node);
1820 // Create a vector sized/aligned stack slot, store the value to element #0,
1821 // then load the whole vector back out.
1822 SDValue StackPtr = DAG.CreateStackTemporary(Node->getValueType(0));
1823
1824 FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(StackPtr);
1825 int SPFI = StackPtrFI->getIndex();
1826
1827 SDValue Ch = DAG.getTruncStore(DAG.getEntryNode(), dl, Node->getOperand(0),
1828 StackPtr,
1829 MachinePointerInfo::getFixedStack(SPFI),
1830 Node->getValueType(0).getVectorElementType(),
1831 false, false, 0);
1832 return DAG.getLoad(Node->getValueType(0), dl, Ch, StackPtr,
1833 MachinePointerInfo::getFixedStack(SPFI),
1834 false, false, false, 0);
1835 }
1836
1837 static bool
ExpandBVWithShuffles(SDNode * Node,SelectionDAG & DAG,const TargetLowering & TLI,SDValue & Res)1838 ExpandBVWithShuffles(SDNode *Node, SelectionDAG &DAG,
1839 const TargetLowering &TLI, SDValue &Res) {
1840 unsigned NumElems = Node->getNumOperands();
1841 SDLoc dl(Node);
1842 EVT VT = Node->getValueType(0);
1843
1844 // Try to group the scalars into pairs, shuffle the pairs together, then
1845 // shuffle the pairs of pairs together, etc. until the vector has
1846 // been built. This will work only if all of the necessary shuffle masks
1847 // are legal.
1848
1849 // We do this in two phases; first to check the legality of the shuffles,
1850 // and next, assuming that all shuffles are legal, to create the new nodes.
1851 for (int Phase = 0; Phase < 2; ++Phase) {
1852 SmallVector<std::pair<SDValue, SmallVector<int, 16> >, 16> IntermedVals,
1853 NewIntermedVals;
1854 for (unsigned i = 0; i < NumElems; ++i) {
1855 SDValue V = Node->getOperand(i);
1856 if (V.getOpcode() == ISD::UNDEF)
1857 continue;
1858
1859 SDValue Vec;
1860 if (Phase)
1861 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, V);
1862 IntermedVals.push_back(std::make_pair(Vec, SmallVector<int, 16>(1, i)));
1863 }
1864
1865 while (IntermedVals.size() > 2) {
1866 NewIntermedVals.clear();
1867 for (unsigned i = 0, e = (IntermedVals.size() & ~1u); i < e; i += 2) {
1868 // This vector and the next vector are shuffled together (simply to
1869 // append the one to the other).
1870 SmallVector<int, 16> ShuffleVec(NumElems, -1);
1871
1872 SmallVector<int, 16> FinalIndices;
1873 FinalIndices.reserve(IntermedVals[i].second.size() +
1874 IntermedVals[i+1].second.size());
1875
1876 int k = 0;
1877 for (unsigned j = 0, f = IntermedVals[i].second.size(); j != f;
1878 ++j, ++k) {
1879 ShuffleVec[k] = j;
1880 FinalIndices.push_back(IntermedVals[i].second[j]);
1881 }
1882 for (unsigned j = 0, f = IntermedVals[i+1].second.size(); j != f;
1883 ++j, ++k) {
1884 ShuffleVec[k] = NumElems + j;
1885 FinalIndices.push_back(IntermedVals[i+1].second[j]);
1886 }
1887
1888 SDValue Shuffle;
1889 if (Phase)
1890 Shuffle = DAG.getVectorShuffle(VT, dl, IntermedVals[i].first,
1891 IntermedVals[i+1].first,
1892 ShuffleVec.data());
1893 else if (!TLI.isShuffleMaskLegal(ShuffleVec, VT))
1894 return false;
1895 NewIntermedVals.push_back(
1896 std::make_pair(Shuffle, std::move(FinalIndices)));
1897 }
1898
1899 // If we had an odd number of defined values, then append the last
1900 // element to the array of new vectors.
1901 if ((IntermedVals.size() & 1) != 0)
1902 NewIntermedVals.push_back(IntermedVals.back());
1903
1904 IntermedVals.swap(NewIntermedVals);
1905 }
1906
1907 assert(IntermedVals.size() <= 2 && IntermedVals.size() > 0 &&
1908 "Invalid number of intermediate vectors");
1909 SDValue Vec1 = IntermedVals[0].first;
1910 SDValue Vec2;
1911 if (IntermedVals.size() > 1)
1912 Vec2 = IntermedVals[1].first;
1913 else if (Phase)
1914 Vec2 = DAG.getUNDEF(VT);
1915
1916 SmallVector<int, 16> ShuffleVec(NumElems, -1);
1917 for (unsigned i = 0, e = IntermedVals[0].second.size(); i != e; ++i)
1918 ShuffleVec[IntermedVals[0].second[i]] = i;
1919 for (unsigned i = 0, e = IntermedVals[1].second.size(); i != e; ++i)
1920 ShuffleVec[IntermedVals[1].second[i]] = NumElems + i;
1921
1922 if (Phase)
1923 Res = DAG.getVectorShuffle(VT, dl, Vec1, Vec2, ShuffleVec.data());
1924 else if (!TLI.isShuffleMaskLegal(ShuffleVec, VT))
1925 return false;
1926 }
1927
1928 return true;
1929 }
1930
1931 /// Expand a BUILD_VECTOR node on targets that don't
1932 /// support the operation, but do support the resultant vector type.
ExpandBUILD_VECTOR(SDNode * Node)1933 SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) {
1934 unsigned NumElems = Node->getNumOperands();
1935 SDValue Value1, Value2;
1936 SDLoc dl(Node);
1937 EVT VT = Node->getValueType(0);
1938 EVT OpVT = Node->getOperand(0).getValueType();
1939 EVT EltVT = VT.getVectorElementType();
1940
1941 // If the only non-undef value is the low element, turn this into a
1942 // SCALAR_TO_VECTOR node. If this is { X, X, X, X }, determine X.
1943 bool isOnlyLowElement = true;
1944 bool MoreThanTwoValues = false;
1945 bool isConstant = true;
1946 for (unsigned i = 0; i < NumElems; ++i) {
1947 SDValue V = Node->getOperand(i);
1948 if (V.getOpcode() == ISD::UNDEF)
1949 continue;
1950 if (i > 0)
1951 isOnlyLowElement = false;
1952 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
1953 isConstant = false;
1954
1955 if (!Value1.getNode()) {
1956 Value1 = V;
1957 } else if (!Value2.getNode()) {
1958 if (V != Value1)
1959 Value2 = V;
1960 } else if (V != Value1 && V != Value2) {
1961 MoreThanTwoValues = true;
1962 }
1963 }
1964
1965 if (!Value1.getNode())
1966 return DAG.getUNDEF(VT);
1967
1968 if (isOnlyLowElement)
1969 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Node->getOperand(0));
1970
1971 // If all elements are constants, create a load from the constant pool.
1972 if (isConstant) {
1973 SmallVector<Constant*, 16> CV;
1974 for (unsigned i = 0, e = NumElems; i != e; ++i) {
1975 if (ConstantFPSDNode *V =
1976 dyn_cast<ConstantFPSDNode>(Node->getOperand(i))) {
1977 CV.push_back(const_cast<ConstantFP *>(V->getConstantFPValue()));
1978 } else if (ConstantSDNode *V =
1979 dyn_cast<ConstantSDNode>(Node->getOperand(i))) {
1980 if (OpVT==EltVT)
1981 CV.push_back(const_cast<ConstantInt *>(V->getConstantIntValue()));
1982 else {
1983 // If OpVT and EltVT don't match, EltVT is not legal and the
1984 // element values have been promoted/truncated earlier. Undo this;
1985 // we don't want a v16i8 to become a v16i32 for example.
1986 const ConstantInt *CI = V->getConstantIntValue();
1987 CV.push_back(ConstantInt::get(EltVT.getTypeForEVT(*DAG.getContext()),
1988 CI->getZExtValue()));
1989 }
1990 } else {
1991 assert(Node->getOperand(i).getOpcode() == ISD::UNDEF);
1992 Type *OpNTy = EltVT.getTypeForEVT(*DAG.getContext());
1993 CV.push_back(UndefValue::get(OpNTy));
1994 }
1995 }
1996 Constant *CP = ConstantVector::get(CV);
1997 SDValue CPIdx = DAG.getConstantPool(CP, TLI.getPointerTy());
1998 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
1999 return DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
2000 MachinePointerInfo::getConstantPool(),
2001 false, false, false, Alignment);
2002 }
2003
2004 SmallSet<SDValue, 16> DefinedValues;
2005 for (unsigned i = 0; i < NumElems; ++i) {
2006 if (Node->getOperand(i).getOpcode() == ISD::UNDEF)
2007 continue;
2008 DefinedValues.insert(Node->getOperand(i));
2009 }
2010
2011 if (TLI.shouldExpandBuildVectorWithShuffles(VT, DefinedValues.size())) {
2012 if (!MoreThanTwoValues) {
2013 SmallVector<int, 8> ShuffleVec(NumElems, -1);
2014 for (unsigned i = 0; i < NumElems; ++i) {
2015 SDValue V = Node->getOperand(i);
2016 if (V.getOpcode() == ISD::UNDEF)
2017 continue;
2018 ShuffleVec[i] = V == Value1 ? 0 : NumElems;
2019 }
2020 if (TLI.isShuffleMaskLegal(ShuffleVec, Node->getValueType(0))) {
2021 // Get the splatted value into the low element of a vector register.
2022 SDValue Vec1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value1);
2023 SDValue Vec2;
2024 if (Value2.getNode())
2025 Vec2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value2);
2026 else
2027 Vec2 = DAG.getUNDEF(VT);
2028
2029 // Return shuffle(LowValVec, undef, <0,0,0,0>)
2030 return DAG.getVectorShuffle(VT, dl, Vec1, Vec2, ShuffleVec.data());
2031 }
2032 } else {
2033 SDValue Res;
2034 if (ExpandBVWithShuffles(Node, DAG, TLI, Res))
2035 return Res;
2036 }
2037 }
2038
2039 // Otherwise, we can't handle this case efficiently.
2040 return ExpandVectorBuildThroughStack(Node);
2041 }
2042
2043 // Expand a node into a call to a libcall. If the result value
2044 // does not fit into a register, return the lo part and set the hi part to the
2045 // by-reg argument. If it does fit into a single register, return the result
2046 // and leave the Hi part unset.
ExpandLibCall(RTLIB::Libcall LC,SDNode * Node,bool isSigned)2047 SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node,
2048 bool isSigned) {
2049 TargetLowering::ArgListTy Args;
2050 TargetLowering::ArgListEntry Entry;
2051 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) {
2052 EVT ArgVT = Node->getOperand(i).getValueType();
2053 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2054 Entry.Node = Node->getOperand(i); Entry.Ty = ArgTy;
2055 Entry.isSExt = isSigned;
2056 Entry.isZExt = !isSigned;
2057 Args.push_back(Entry);
2058 }
2059 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
2060 TLI.getPointerTy());
2061
2062 Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
2063
2064 // By default, the input chain to this libcall is the entry node of the
2065 // function. If the libcall is going to be emitted as a tail call then
2066 // TLI.isUsedByReturnOnly will change it to the right chain if the return
2067 // node which is being folded has a non-entry input chain.
2068 SDValue InChain = DAG.getEntryNode();
2069
2070 // isTailCall may be true since the callee does not reference caller stack
2071 // frame. Check if it's in the right position.
2072 SDValue TCChain = InChain;
2073 bool isTailCall = TLI.isInTailCallPosition(DAG, Node, TCChain);
2074 if (isTailCall)
2075 InChain = TCChain;
2076
2077 TargetLowering::CallLoweringInfo CLI(DAG);
2078 CLI.setDebugLoc(SDLoc(Node)).setChain(InChain)
2079 .setCallee(TLI.getLibcallCallingConv(LC), RetTy, Callee, std::move(Args), 0)
2080 .setTailCall(isTailCall).setSExtResult(isSigned).setZExtResult(!isSigned);
2081
2082 std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
2083
2084 if (!CallInfo.second.getNode())
2085 // It's a tailcall, return the chain (which is the DAG root).
2086 return DAG.getRoot();
2087
2088 return CallInfo.first;
2089 }
2090
2091 /// Generate a libcall taking the given operands as arguments
2092 /// and returning a result of type RetVT.
ExpandLibCall(RTLIB::Libcall LC,EVT RetVT,const SDValue * Ops,unsigned NumOps,bool isSigned,SDLoc dl)2093 SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, EVT RetVT,
2094 const SDValue *Ops, unsigned NumOps,
2095 bool isSigned, SDLoc dl) {
2096 TargetLowering::ArgListTy Args;
2097 Args.reserve(NumOps);
2098
2099 TargetLowering::ArgListEntry Entry;
2100 for (unsigned i = 0; i != NumOps; ++i) {
2101 Entry.Node = Ops[i];
2102 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext());
2103 Entry.isSExt = isSigned;
2104 Entry.isZExt = !isSigned;
2105 Args.push_back(Entry);
2106 }
2107 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
2108 TLI.getPointerTy());
2109
2110 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
2111
2112 TargetLowering::CallLoweringInfo CLI(DAG);
2113 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
2114 .setCallee(TLI.getLibcallCallingConv(LC), RetTy, Callee, std::move(Args), 0)
2115 .setSExtResult(isSigned).setZExtResult(!isSigned);
2116
2117 std::pair<SDValue,SDValue> CallInfo = TLI.LowerCallTo(CLI);
2118
2119 return CallInfo.first;
2120 }
2121
2122 // Expand a node into a call to a libcall. Similar to
2123 // ExpandLibCall except that the first operand is the in-chain.
2124 std::pair<SDValue, SDValue>
ExpandChainLibCall(RTLIB::Libcall LC,SDNode * Node,bool isSigned)2125 SelectionDAGLegalize::ExpandChainLibCall(RTLIB::Libcall LC,
2126 SDNode *Node,
2127 bool isSigned) {
2128 SDValue InChain = Node->getOperand(0);
2129
2130 TargetLowering::ArgListTy Args;
2131 TargetLowering::ArgListEntry Entry;
2132 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) {
2133 EVT ArgVT = Node->getOperand(i).getValueType();
2134 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2135 Entry.Node = Node->getOperand(i);
2136 Entry.Ty = ArgTy;
2137 Entry.isSExt = isSigned;
2138 Entry.isZExt = !isSigned;
2139 Args.push_back(Entry);
2140 }
2141 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
2142 TLI.getPointerTy());
2143
2144 Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
2145
2146 TargetLowering::CallLoweringInfo CLI(DAG);
2147 CLI.setDebugLoc(SDLoc(Node)).setChain(InChain)
2148 .setCallee(TLI.getLibcallCallingConv(LC), RetTy, Callee, std::move(Args), 0)
2149 .setSExtResult(isSigned).setZExtResult(!isSigned);
2150
2151 std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
2152
2153 return CallInfo;
2154 }
2155
ExpandFPLibCall(SDNode * Node,RTLIB::Libcall Call_F32,RTLIB::Libcall Call_F64,RTLIB::Libcall Call_F80,RTLIB::Libcall Call_F128,RTLIB::Libcall Call_PPCF128)2156 SDValue SelectionDAGLegalize::ExpandFPLibCall(SDNode* Node,
2157 RTLIB::Libcall Call_F32,
2158 RTLIB::Libcall Call_F64,
2159 RTLIB::Libcall Call_F80,
2160 RTLIB::Libcall Call_F128,
2161 RTLIB::Libcall Call_PPCF128) {
2162 RTLIB::Libcall LC;
2163 switch (Node->getSimpleValueType(0).SimpleTy) {
2164 default: llvm_unreachable("Unexpected request for libcall!");
2165 case MVT::f32: LC = Call_F32; break;
2166 case MVT::f64: LC = Call_F64; break;
2167 case MVT::f80: LC = Call_F80; break;
2168 case MVT::f128: LC = Call_F128; break;
2169 case MVT::ppcf128: LC = Call_PPCF128; break;
2170 }
2171 return ExpandLibCall(LC, Node, false);
2172 }
2173
ExpandIntLibCall(SDNode * Node,bool isSigned,RTLIB::Libcall Call_I8,RTLIB::Libcall Call_I16,RTLIB::Libcall Call_I32,RTLIB::Libcall Call_I64,RTLIB::Libcall Call_I128)2174 SDValue SelectionDAGLegalize::ExpandIntLibCall(SDNode* Node, bool isSigned,
2175 RTLIB::Libcall Call_I8,
2176 RTLIB::Libcall Call_I16,
2177 RTLIB::Libcall Call_I32,
2178 RTLIB::Libcall Call_I64,
2179 RTLIB::Libcall Call_I128) {
2180 RTLIB::Libcall LC;
2181 switch (Node->getSimpleValueType(0).SimpleTy) {
2182 default: llvm_unreachable("Unexpected request for libcall!");
2183 case MVT::i8: LC = Call_I8; break;
2184 case MVT::i16: LC = Call_I16; break;
2185 case MVT::i32: LC = Call_I32; break;
2186 case MVT::i64: LC = Call_I64; break;
2187 case MVT::i128: LC = Call_I128; break;
2188 }
2189 return ExpandLibCall(LC, Node, isSigned);
2190 }
2191
2192 /// Return true if divmod libcall is available.
isDivRemLibcallAvailable(SDNode * Node,bool isSigned,const TargetLowering & TLI)2193 static bool isDivRemLibcallAvailable(SDNode *Node, bool isSigned,
2194 const TargetLowering &TLI) {
2195 RTLIB::Libcall LC;
2196 switch (Node->getSimpleValueType(0).SimpleTy) {
2197 default: llvm_unreachable("Unexpected request for libcall!");
2198 case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break;
2199 case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break;
2200 case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break;
2201 case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break;
2202 case MVT::i128: LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128; break;
2203 }
2204
2205 return TLI.getLibcallName(LC) != nullptr;
2206 }
2207
2208 /// Only issue divrem libcall if both quotient and remainder are needed.
useDivRem(SDNode * Node,bool isSigned,bool isDIV)2209 static bool useDivRem(SDNode *Node, bool isSigned, bool isDIV) {
2210 // The other use might have been replaced with a divrem already.
2211 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM;
2212 unsigned OtherOpcode = 0;
2213 if (isSigned)
2214 OtherOpcode = isDIV ? ISD::SREM : ISD::SDIV;
2215 else
2216 OtherOpcode = isDIV ? ISD::UREM : ISD::UDIV;
2217
2218 SDValue Op0 = Node->getOperand(0);
2219 SDValue Op1 = Node->getOperand(1);
2220 for (SDNode::use_iterator UI = Op0.getNode()->use_begin(),
2221 UE = Op0.getNode()->use_end(); UI != UE; ++UI) {
2222 SDNode *User = *UI;
2223 if (User == Node)
2224 continue;
2225 if ((User->getOpcode() == OtherOpcode || User->getOpcode() == DivRemOpc) &&
2226 User->getOperand(0) == Op0 &&
2227 User->getOperand(1) == Op1)
2228 return true;
2229 }
2230 return false;
2231 }
2232
2233 /// Issue libcalls to __{u}divmod to compute div / rem pairs.
2234 void
ExpandDivRemLibCall(SDNode * Node,SmallVectorImpl<SDValue> & Results)2235 SelectionDAGLegalize::ExpandDivRemLibCall(SDNode *Node,
2236 SmallVectorImpl<SDValue> &Results) {
2237 unsigned Opcode = Node->getOpcode();
2238 bool isSigned = Opcode == ISD::SDIVREM;
2239
2240 RTLIB::Libcall LC;
2241 switch (Node->getSimpleValueType(0).SimpleTy) {
2242 default: llvm_unreachable("Unexpected request for libcall!");
2243 case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break;
2244 case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break;
2245 case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break;
2246 case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break;
2247 case MVT::i128: LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128; break;
2248 }
2249
2250 // The input chain to this libcall is the entry node of the function.
2251 // Legalizing the call will automatically add the previous call to the
2252 // dependence.
2253 SDValue InChain = DAG.getEntryNode();
2254
2255 EVT RetVT = Node->getValueType(0);
2256 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
2257
2258 TargetLowering::ArgListTy Args;
2259 TargetLowering::ArgListEntry Entry;
2260 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) {
2261 EVT ArgVT = Node->getOperand(i).getValueType();
2262 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2263 Entry.Node = Node->getOperand(i); Entry.Ty = ArgTy;
2264 Entry.isSExt = isSigned;
2265 Entry.isZExt = !isSigned;
2266 Args.push_back(Entry);
2267 }
2268
2269 // Also pass the return address of the remainder.
2270 SDValue FIPtr = DAG.CreateStackTemporary(RetVT);
2271 Entry.Node = FIPtr;
2272 Entry.Ty = RetTy->getPointerTo();
2273 Entry.isSExt = isSigned;
2274 Entry.isZExt = !isSigned;
2275 Args.push_back(Entry);
2276
2277 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
2278 TLI.getPointerTy());
2279
2280 SDLoc dl(Node);
2281 TargetLowering::CallLoweringInfo CLI(DAG);
2282 CLI.setDebugLoc(dl).setChain(InChain)
2283 .setCallee(TLI.getLibcallCallingConv(LC), RetTy, Callee, std::move(Args), 0)
2284 .setSExtResult(isSigned).setZExtResult(!isSigned);
2285
2286 std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
2287
2288 // Remainder is loaded back from the stack frame.
2289 SDValue Rem = DAG.getLoad(RetVT, dl, CallInfo.second, FIPtr,
2290 MachinePointerInfo(), false, false, false, 0);
2291 Results.push_back(CallInfo.first);
2292 Results.push_back(Rem);
2293 }
2294
2295 /// Return true if sincos libcall is available.
isSinCosLibcallAvailable(SDNode * Node,const TargetLowering & TLI)2296 static bool isSinCosLibcallAvailable(SDNode *Node, const TargetLowering &TLI) {
2297 RTLIB::Libcall LC;
2298 switch (Node->getSimpleValueType(0).SimpleTy) {
2299 default: llvm_unreachable("Unexpected request for libcall!");
2300 case MVT::f32: LC = RTLIB::SINCOS_F32; break;
2301 case MVT::f64: LC = RTLIB::SINCOS_F64; break;
2302 case MVT::f80: LC = RTLIB::SINCOS_F80; break;
2303 case MVT::f128: LC = RTLIB::SINCOS_F128; break;
2304 case MVT::ppcf128: LC = RTLIB::SINCOS_PPCF128; break;
2305 }
2306 return TLI.getLibcallName(LC) != nullptr;
2307 }
2308
2309 /// Return true if sincos libcall is available and can be used to combine sin
2310 /// and cos.
canCombineSinCosLibcall(SDNode * Node,const TargetLowering & TLI,const TargetMachine & TM)2311 static bool canCombineSinCosLibcall(SDNode *Node, const TargetLowering &TLI,
2312 const TargetMachine &TM) {
2313 if (!isSinCosLibcallAvailable(Node, TLI))
2314 return false;
2315 // GNU sin/cos functions set errno while sincos does not. Therefore
2316 // combining sin and cos is only safe if unsafe-fpmath is enabled.
2317 bool isGNU = Triple(TM.getTargetTriple()).getEnvironment() == Triple::GNU;
2318 if (isGNU && !TM.Options.UnsafeFPMath)
2319 return false;
2320 return true;
2321 }
2322
2323 /// Only issue sincos libcall if both sin and cos are needed.
useSinCos(SDNode * Node)2324 static bool useSinCos(SDNode *Node) {
2325 unsigned OtherOpcode = Node->getOpcode() == ISD::FSIN
2326 ? ISD::FCOS : ISD::FSIN;
2327
2328 SDValue Op0 = Node->getOperand(0);
2329 for (SDNode::use_iterator UI = Op0.getNode()->use_begin(),
2330 UE = Op0.getNode()->use_end(); UI != UE; ++UI) {
2331 SDNode *User = *UI;
2332 if (User == Node)
2333 continue;
2334 // The other user might have been turned into sincos already.
2335 if (User->getOpcode() == OtherOpcode || User->getOpcode() == ISD::FSINCOS)
2336 return true;
2337 }
2338 return false;
2339 }
2340
2341 /// Issue libcalls to sincos to compute sin / cos pairs.
2342 void
ExpandSinCosLibCall(SDNode * Node,SmallVectorImpl<SDValue> & Results)2343 SelectionDAGLegalize::ExpandSinCosLibCall(SDNode *Node,
2344 SmallVectorImpl<SDValue> &Results) {
2345 RTLIB::Libcall LC;
2346 switch (Node->getSimpleValueType(0).SimpleTy) {
2347 default: llvm_unreachable("Unexpected request for libcall!");
2348 case MVT::f32: LC = RTLIB::SINCOS_F32; break;
2349 case MVT::f64: LC = RTLIB::SINCOS_F64; break;
2350 case MVT::f80: LC = RTLIB::SINCOS_F80; break;
2351 case MVT::f128: LC = RTLIB::SINCOS_F128; break;
2352 case MVT::ppcf128: LC = RTLIB::SINCOS_PPCF128; break;
2353 }
2354
2355 // The input chain to this libcall is the entry node of the function.
2356 // Legalizing the call will automatically add the previous call to the
2357 // dependence.
2358 SDValue InChain = DAG.getEntryNode();
2359
2360 EVT RetVT = Node->getValueType(0);
2361 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
2362
2363 TargetLowering::ArgListTy Args;
2364 TargetLowering::ArgListEntry Entry;
2365
2366 // Pass the argument.
2367 Entry.Node = Node->getOperand(0);
2368 Entry.Ty = RetTy;
2369 Entry.isSExt = false;
2370 Entry.isZExt = false;
2371 Args.push_back(Entry);
2372
2373 // Pass the return address of sin.
2374 SDValue SinPtr = DAG.CreateStackTemporary(RetVT);
2375 Entry.Node = SinPtr;
2376 Entry.Ty = RetTy->getPointerTo();
2377 Entry.isSExt = false;
2378 Entry.isZExt = false;
2379 Args.push_back(Entry);
2380
2381 // Also pass the return address of the cos.
2382 SDValue CosPtr = DAG.CreateStackTemporary(RetVT);
2383 Entry.Node = CosPtr;
2384 Entry.Ty = RetTy->getPointerTo();
2385 Entry.isSExt = false;
2386 Entry.isZExt = false;
2387 Args.push_back(Entry);
2388
2389 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
2390 TLI.getPointerTy());
2391
2392 SDLoc dl(Node);
2393 TargetLowering::CallLoweringInfo CLI(DAG);
2394 CLI.setDebugLoc(dl).setChain(InChain)
2395 .setCallee(TLI.getLibcallCallingConv(LC),
2396 Type::getVoidTy(*DAG.getContext()), Callee, std::move(Args), 0);
2397
2398 std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
2399
2400 Results.push_back(DAG.getLoad(RetVT, dl, CallInfo.second, SinPtr,
2401 MachinePointerInfo(), false, false, false, 0));
2402 Results.push_back(DAG.getLoad(RetVT, dl, CallInfo.second, CosPtr,
2403 MachinePointerInfo(), false, false, false, 0));
2404 }
2405
2406 /// This function is responsible for legalizing a
2407 /// INT_TO_FP operation of the specified operand when the target requests that
2408 /// we expand it. At this point, we know that the result and operand types are
2409 /// legal for the target.
ExpandLegalINT_TO_FP(bool isSigned,SDValue Op0,EVT DestVT,SDLoc dl)2410 SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
2411 SDValue Op0,
2412 EVT DestVT,
2413 SDLoc dl) {
2414 if (Op0.getValueType() == MVT::i32 && TLI.isTypeLegal(MVT::f64)) {
2415 // simple 32-bit [signed|unsigned] integer to float/double expansion
2416
2417 // Get the stack frame index of a 8 byte buffer.
2418 SDValue StackSlot = DAG.CreateStackTemporary(MVT::f64);
2419
2420 // word offset constant for Hi/Lo address computation
2421 SDValue WordOff = DAG.getConstant(sizeof(int), StackSlot.getValueType());
2422 // set up Hi and Lo (into buffer) address based on endian
2423 SDValue Hi = StackSlot;
2424 SDValue Lo = DAG.getNode(ISD::ADD, dl, StackSlot.getValueType(),
2425 StackSlot, WordOff);
2426 if (TLI.isLittleEndian())
2427 std::swap(Hi, Lo);
2428
2429 // if signed map to unsigned space
2430 SDValue Op0Mapped;
2431 if (isSigned) {
2432 // constant used to invert sign bit (signed to unsigned mapping)
2433 SDValue SignBit = DAG.getConstant(0x80000000u, MVT::i32);
2434 Op0Mapped = DAG.getNode(ISD::XOR, dl, MVT::i32, Op0, SignBit);
2435 } else {
2436 Op0Mapped = Op0;
2437 }
2438 // store the lo of the constructed double - based on integer input
2439 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl,
2440 Op0Mapped, Lo, MachinePointerInfo(),
2441 false, false, 0);
2442 // initial hi portion of constructed double
2443 SDValue InitialHi = DAG.getConstant(0x43300000u, MVT::i32);
2444 // store the hi of the constructed double - biased exponent
2445 SDValue Store2 = DAG.getStore(Store1, dl, InitialHi, Hi,
2446 MachinePointerInfo(),
2447 false, false, 0);
2448 // load the constructed double
2449 SDValue Load = DAG.getLoad(MVT::f64, dl, Store2, StackSlot,
2450 MachinePointerInfo(), false, false, false, 0);
2451 // FP constant to bias correct the final result
2452 SDValue Bias = DAG.getConstantFP(isSigned ?
2453 BitsToDouble(0x4330000080000000ULL) :
2454 BitsToDouble(0x4330000000000000ULL),
2455 MVT::f64);
2456 // subtract the bias
2457 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Load, Bias);
2458 // final result
2459 SDValue Result;
2460 // handle final rounding
2461 if (DestVT == MVT::f64) {
2462 // do nothing
2463 Result = Sub;
2464 } else if (DestVT.bitsLT(MVT::f64)) {
2465 Result = DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub,
2466 DAG.getIntPtrConstant(0));
2467 } else if (DestVT.bitsGT(MVT::f64)) {
2468 Result = DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub);
2469 }
2470 return Result;
2471 }
2472 assert(!isSigned && "Legalize cannot Expand SINT_TO_FP for i64 yet");
2473 // Code below here assumes !isSigned without checking again.
2474
2475 // Implementation of unsigned i64 to f64 following the algorithm in
2476 // __floatundidf in compiler_rt. This implementation has the advantage
2477 // of performing rounding correctly, both in the default rounding mode
2478 // and in all alternate rounding modes.
2479 // TODO: Generalize this for use with other types.
2480 if (Op0.getValueType() == MVT::i64 && DestVT == MVT::f64) {
2481 SDValue TwoP52 =
2482 DAG.getConstant(UINT64_C(0x4330000000000000), MVT::i64);
2483 SDValue TwoP84PlusTwoP52 =
2484 DAG.getConstantFP(BitsToDouble(UINT64_C(0x4530000000100000)), MVT::f64);
2485 SDValue TwoP84 =
2486 DAG.getConstant(UINT64_C(0x4530000000000000), MVT::i64);
2487
2488 SDValue Lo = DAG.getZeroExtendInReg(Op0, dl, MVT::i32);
2489 SDValue Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Op0,
2490 DAG.getConstant(32, MVT::i64));
2491 SDValue LoOr = DAG.getNode(ISD::OR, dl, MVT::i64, Lo, TwoP52);
2492 SDValue HiOr = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, TwoP84);
2493 SDValue LoFlt = DAG.getNode(ISD::BITCAST, dl, MVT::f64, LoOr);
2494 SDValue HiFlt = DAG.getNode(ISD::BITCAST, dl, MVT::f64, HiOr);
2495 SDValue HiSub = DAG.getNode(ISD::FSUB, dl, MVT::f64, HiFlt,
2496 TwoP84PlusTwoP52);
2497 return DAG.getNode(ISD::FADD, dl, MVT::f64, LoFlt, HiSub);
2498 }
2499
2500 // Implementation of unsigned i64 to f32.
2501 // TODO: Generalize this for use with other types.
2502 if (Op0.getValueType() == MVT::i64 && DestVT == MVT::f32) {
2503 // For unsigned conversions, convert them to signed conversions using the
2504 // algorithm from the x86_64 __floatundidf in compiler_rt.
2505 if (!isSigned) {
2506 SDValue Fast = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Op0);
2507
2508 SDValue ShiftConst =
2509 DAG.getConstant(1, TLI.getShiftAmountTy(Op0.getValueType()));
2510 SDValue Shr = DAG.getNode(ISD::SRL, dl, MVT::i64, Op0, ShiftConst);
2511 SDValue AndConst = DAG.getConstant(1, MVT::i64);
2512 SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, AndConst);
2513 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And, Shr);
2514
2515 SDValue SignCvt = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Or);
2516 SDValue Slow = DAG.getNode(ISD::FADD, dl, MVT::f32, SignCvt, SignCvt);
2517
2518 // TODO: This really should be implemented using a branch rather than a
2519 // select. We happen to get lucky and machinesink does the right
2520 // thing most of the time. This would be a good candidate for a
2521 //pseudo-op, or, even better, for whole-function isel.
2522 SDValue SignBitTest = DAG.getSetCC(dl, getSetCCResultType(MVT::i64),
2523 Op0, DAG.getConstant(0, MVT::i64), ISD::SETLT);
2524 return DAG.getSelect(dl, MVT::f32, SignBitTest, Slow, Fast);
2525 }
2526
2527 // Otherwise, implement the fully general conversion.
2528
2529 SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0,
2530 DAG.getConstant(UINT64_C(0xfffffffffffff800), MVT::i64));
2531 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And,
2532 DAG.getConstant(UINT64_C(0x800), MVT::i64));
2533 SDValue And2 = DAG.getNode(ISD::AND, dl, MVT::i64, Op0,
2534 DAG.getConstant(UINT64_C(0x7ff), MVT::i64));
2535 SDValue Ne = DAG.getSetCC(dl, getSetCCResultType(MVT::i64),
2536 And2, DAG.getConstant(UINT64_C(0), MVT::i64), ISD::SETNE);
2537 SDValue Sel = DAG.getSelect(dl, MVT::i64, Ne, Or, Op0);
2538 SDValue Ge = DAG.getSetCC(dl, getSetCCResultType(MVT::i64),
2539 Op0, DAG.getConstant(UINT64_C(0x0020000000000000), MVT::i64),
2540 ISD::SETUGE);
2541 SDValue Sel2 = DAG.getSelect(dl, MVT::i64, Ge, Sel, Op0);
2542 EVT SHVT = TLI.getShiftAmountTy(Sel2.getValueType());
2543
2544 SDValue Sh = DAG.getNode(ISD::SRL, dl, MVT::i64, Sel2,
2545 DAG.getConstant(32, SHVT));
2546 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Sh);
2547 SDValue Fcvt = DAG.getNode(ISD::UINT_TO_FP, dl, MVT::f64, Trunc);
2548 SDValue TwoP32 =
2549 DAG.getConstantFP(BitsToDouble(UINT64_C(0x41f0000000000000)), MVT::f64);
2550 SDValue Fmul = DAG.getNode(ISD::FMUL, dl, MVT::f64, TwoP32, Fcvt);
2551 SDValue Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Sel2);
2552 SDValue Fcvt2 = DAG.getNode(ISD::UINT_TO_FP, dl, MVT::f64, Lo);
2553 SDValue Fadd = DAG.getNode(ISD::FADD, dl, MVT::f64, Fmul, Fcvt2);
2554 return DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Fadd,
2555 DAG.getIntPtrConstant(0));
2556 }
2557
2558 SDValue Tmp1 = DAG.getNode(ISD::SINT_TO_FP, dl, DestVT, Op0);
2559
2560 SDValue SignSet = DAG.getSetCC(dl, getSetCCResultType(Op0.getValueType()),
2561 Op0, DAG.getConstant(0, Op0.getValueType()),
2562 ISD::SETLT);
2563 SDValue Zero = DAG.getIntPtrConstant(0), Four = DAG.getIntPtrConstant(4);
2564 SDValue CstOffset = DAG.getSelect(dl, Zero.getValueType(),
2565 SignSet, Four, Zero);
2566
2567 // If the sign bit of the integer is set, the large number will be treated
2568 // as a negative number. To counteract this, the dynamic code adds an
2569 // offset depending on the data type.
2570 uint64_t FF;
2571 switch (Op0.getSimpleValueType().SimpleTy) {
2572 default: llvm_unreachable("Unsupported integer type!");
2573 case MVT::i8 : FF = 0x43800000ULL; break; // 2^8 (as a float)
2574 case MVT::i16: FF = 0x47800000ULL; break; // 2^16 (as a float)
2575 case MVT::i32: FF = 0x4F800000ULL; break; // 2^32 (as a float)
2576 case MVT::i64: FF = 0x5F800000ULL; break; // 2^64 (as a float)
2577 }
2578 if (TLI.isLittleEndian()) FF <<= 32;
2579 Constant *FudgeFactor = ConstantInt::get(
2580 Type::getInt64Ty(*DAG.getContext()), FF);
2581
2582 SDValue CPIdx = DAG.getConstantPool(FudgeFactor, TLI.getPointerTy());
2583 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
2584 CPIdx = DAG.getNode(ISD::ADD, dl, CPIdx.getValueType(), CPIdx, CstOffset);
2585 Alignment = std::min(Alignment, 4u);
2586 SDValue FudgeInReg;
2587 if (DestVT == MVT::f32)
2588 FudgeInReg = DAG.getLoad(MVT::f32, dl, DAG.getEntryNode(), CPIdx,
2589 MachinePointerInfo::getConstantPool(),
2590 false, false, false, Alignment);
2591 else {
2592 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT,
2593 DAG.getEntryNode(), CPIdx,
2594 MachinePointerInfo::getConstantPool(),
2595 MVT::f32, false, false, false, Alignment);
2596 HandleSDNode Handle(Load);
2597 LegalizeOp(Load.getNode());
2598 FudgeInReg = Handle.getValue();
2599 }
2600
2601 return DAG.getNode(ISD::FADD, dl, DestVT, Tmp1, FudgeInReg);
2602 }
2603
2604 /// This function is responsible for legalizing a
2605 /// *INT_TO_FP operation of the specified operand when the target requests that
2606 /// we promote it. At this point, we know that the result and operand types are
2607 /// legal for the target, and that there is a legal UINT_TO_FP or SINT_TO_FP
2608 /// operation that takes a larger input.
PromoteLegalINT_TO_FP(SDValue LegalOp,EVT DestVT,bool isSigned,SDLoc dl)2609 SDValue SelectionDAGLegalize::PromoteLegalINT_TO_FP(SDValue LegalOp,
2610 EVT DestVT,
2611 bool isSigned,
2612 SDLoc dl) {
2613 // First step, figure out the appropriate *INT_TO_FP operation to use.
2614 EVT NewInTy = LegalOp.getValueType();
2615
2616 unsigned OpToUse = 0;
2617
2618 // Scan for the appropriate larger type to use.
2619 while (1) {
2620 NewInTy = (MVT::SimpleValueType)(NewInTy.getSimpleVT().SimpleTy+1);
2621 assert(NewInTy.isInteger() && "Ran out of possibilities!");
2622
2623 // If the target supports SINT_TO_FP of this type, use it.
2624 if (TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, NewInTy)) {
2625 OpToUse = ISD::SINT_TO_FP;
2626 break;
2627 }
2628 if (isSigned) continue;
2629
2630 // If the target supports UINT_TO_FP of this type, use it.
2631 if (TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, NewInTy)) {
2632 OpToUse = ISD::UINT_TO_FP;
2633 break;
2634 }
2635
2636 // Otherwise, try a larger type.
2637 }
2638
2639 // Okay, we found the operation and type to use. Zero extend our input to the
2640 // desired type then run the operation on it.
2641 return DAG.getNode(OpToUse, dl, DestVT,
2642 DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
2643 dl, NewInTy, LegalOp));
2644 }
2645
2646 /// This function is responsible for legalizing a
2647 /// FP_TO_*INT operation of the specified operand when the target requests that
2648 /// we promote it. At this point, we know that the result and operand types are
2649 /// legal for the target, and that there is a legal FP_TO_UINT or FP_TO_SINT
2650 /// operation that returns a larger result.
PromoteLegalFP_TO_INT(SDValue LegalOp,EVT DestVT,bool isSigned,SDLoc dl)2651 SDValue SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDValue LegalOp,
2652 EVT DestVT,
2653 bool isSigned,
2654 SDLoc dl) {
2655 // First step, figure out the appropriate FP_TO*INT operation to use.
2656 EVT NewOutTy = DestVT;
2657
2658 unsigned OpToUse = 0;
2659
2660 // Scan for the appropriate larger type to use.
2661 while (1) {
2662 NewOutTy = (MVT::SimpleValueType)(NewOutTy.getSimpleVT().SimpleTy+1);
2663 assert(NewOutTy.isInteger() && "Ran out of possibilities!");
2664
2665 // A larger signed type can hold all unsigned values of the requested type,
2666 // so using FP_TO_SINT is valid
2667 if (TLI.isOperationLegalOrCustom(ISD::FP_TO_SINT, NewOutTy)) {
2668 OpToUse = ISD::FP_TO_SINT;
2669 break;
2670 }
2671
2672 // However, if the value may be < 0.0, we *must* use some FP_TO_SINT.
2673 if (!isSigned && TLI.isOperationLegalOrCustom(ISD::FP_TO_UINT, NewOutTy)) {
2674 OpToUse = ISD::FP_TO_UINT;
2675 break;
2676 }
2677
2678 // Otherwise, try a larger type.
2679 }
2680
2681
2682 // Okay, we found the operation and type to use.
2683 SDValue Operation = DAG.getNode(OpToUse, dl, NewOutTy, LegalOp);
2684
2685 // Truncate the result of the extended FP_TO_*INT operation to the desired
2686 // size.
2687 return DAG.getNode(ISD::TRUNCATE, dl, DestVT, Operation);
2688 }
2689
2690 /// Open code the operations for BSWAP of the specified operation.
ExpandBSWAP(SDValue Op,SDLoc dl)2691 SDValue SelectionDAGLegalize::ExpandBSWAP(SDValue Op, SDLoc dl) {
2692 EVT VT = Op.getValueType();
2693 EVT SHVT = TLI.getShiftAmountTy(VT);
2694 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8;
2695 switch (VT.getSimpleVT().SimpleTy) {
2696 default: llvm_unreachable("Unhandled Expand type in BSWAP!");
2697 case MVT::i16:
2698 Tmp2 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT));
2699 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT));
2700 return DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
2701 case MVT::i32:
2702 Tmp4 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, SHVT));
2703 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT));
2704 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT));
2705 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, SHVT));
2706 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, DAG.getConstant(0xFF0000, VT));
2707 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(0xFF00, VT));
2708 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3);
2709 Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1);
2710 return DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2);
2711 case MVT::i64:
2712 Tmp8 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(56, SHVT));
2713 Tmp7 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(40, SHVT));
2714 Tmp6 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, SHVT));
2715 Tmp5 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT));
2716 Tmp4 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT));
2717 Tmp3 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, SHVT));
2718 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(40, SHVT));
2719 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(56, SHVT));
2720 Tmp7 = DAG.getNode(ISD::AND, dl, VT, Tmp7, DAG.getConstant(255ULL<<48, VT));
2721 Tmp6 = DAG.getNode(ISD::AND, dl, VT, Tmp6, DAG.getConstant(255ULL<<40, VT));
2722 Tmp5 = DAG.getNode(ISD::AND, dl, VT, Tmp5, DAG.getConstant(255ULL<<32, VT));
2723 Tmp4 = DAG.getNode(ISD::AND, dl, VT, Tmp4, DAG.getConstant(255ULL<<24, VT));
2724 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, DAG.getConstant(255ULL<<16, VT));
2725 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(255ULL<<8 , VT));
2726 Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp7);
2727 Tmp6 = DAG.getNode(ISD::OR, dl, VT, Tmp6, Tmp5);
2728 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3);
2729 Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1);
2730 Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp6);
2731 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2);
2732 return DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp4);
2733 }
2734 }
2735
2736 /// Expand the specified bitcount instruction into operations.
ExpandBitCount(unsigned Opc,SDValue Op,SDLoc dl)2737 SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op,
2738 SDLoc dl) {
2739 switch (Opc) {
2740 default: llvm_unreachable("Cannot expand this yet!");
2741 case ISD::CTPOP: {
2742 EVT VT = Op.getValueType();
2743 EVT ShVT = TLI.getShiftAmountTy(VT);
2744 unsigned Len = VT.getSizeInBits();
2745
2746 assert(VT.isInteger() && Len <= 128 && Len % 8 == 0 &&
2747 "CTPOP not implemented for this type.");
2748
2749 // This is the "best" algorithm from
2750 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
2751
2752 SDValue Mask55 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), VT);
2753 SDValue Mask33 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), VT);
2754 SDValue Mask0F = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), VT);
2755 SDValue Mask01 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x01)), VT);
2756
2757 // v = v - ((v >> 1) & 0x55555555...)
2758 Op = DAG.getNode(ISD::SUB, dl, VT, Op,
2759 DAG.getNode(ISD::AND, dl, VT,
2760 DAG.getNode(ISD::SRL, dl, VT, Op,
2761 DAG.getConstant(1, ShVT)),
2762 Mask55));
2763 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
2764 Op = DAG.getNode(ISD::ADD, dl, VT,
2765 DAG.getNode(ISD::AND, dl, VT, Op, Mask33),
2766 DAG.getNode(ISD::AND, dl, VT,
2767 DAG.getNode(ISD::SRL, dl, VT, Op,
2768 DAG.getConstant(2, ShVT)),
2769 Mask33));
2770 // v = (v + (v >> 4)) & 0x0F0F0F0F...
2771 Op = DAG.getNode(ISD::AND, dl, VT,
2772 DAG.getNode(ISD::ADD, dl, VT, Op,
2773 DAG.getNode(ISD::SRL, dl, VT, Op,
2774 DAG.getConstant(4, ShVT))),
2775 Mask0F);
2776 // v = (v * 0x01010101...) >> (Len - 8)
2777 Op = DAG.getNode(ISD::SRL, dl, VT,
2778 DAG.getNode(ISD::MUL, dl, VT, Op, Mask01),
2779 DAG.getConstant(Len - 8, ShVT));
2780
2781 return Op;
2782 }
2783 case ISD::CTLZ_ZERO_UNDEF:
2784 // This trivially expands to CTLZ.
2785 return DAG.getNode(ISD::CTLZ, dl, Op.getValueType(), Op);
2786 case ISD::CTLZ: {
2787 // for now, we do this:
2788 // x = x | (x >> 1);
2789 // x = x | (x >> 2);
2790 // ...
2791 // x = x | (x >>16);
2792 // x = x | (x >>32); // for 64-bit input
2793 // return popcount(~x);
2794 //
2795 // Ref: "Hacker's Delight" by Henry Warren
2796 EVT VT = Op.getValueType();
2797 EVT ShVT = TLI.getShiftAmountTy(VT);
2798 unsigned len = VT.getSizeInBits();
2799 for (unsigned i = 0; (1U << i) <= (len / 2); ++i) {
2800 SDValue Tmp3 = DAG.getConstant(1ULL << i, ShVT);
2801 Op = DAG.getNode(ISD::OR, dl, VT, Op,
2802 DAG.getNode(ISD::SRL, dl, VT, Op, Tmp3));
2803 }
2804 Op = DAG.getNOT(dl, Op, VT);
2805 return DAG.getNode(ISD::CTPOP, dl, VT, Op);
2806 }
2807 case ISD::CTTZ_ZERO_UNDEF:
2808 // This trivially expands to CTTZ.
2809 return DAG.getNode(ISD::CTTZ, dl, Op.getValueType(), Op);
2810 case ISD::CTTZ: {
2811 // for now, we use: { return popcount(~x & (x - 1)); }
2812 // unless the target has ctlz but not ctpop, in which case we use:
2813 // { return 32 - nlz(~x & (x-1)); }
2814 // Ref: "Hacker's Delight" by Henry Warren
2815 EVT VT = Op.getValueType();
2816 SDValue Tmp3 = DAG.getNode(ISD::AND, dl, VT,
2817 DAG.getNOT(dl, Op, VT),
2818 DAG.getNode(ISD::SUB, dl, VT, Op,
2819 DAG.getConstant(1, VT)));
2820 // If ISD::CTLZ is legal and CTPOP isn't, then do that instead.
2821 if (!TLI.isOperationLegalOrCustom(ISD::CTPOP, VT) &&
2822 TLI.isOperationLegalOrCustom(ISD::CTLZ, VT))
2823 return DAG.getNode(ISD::SUB, dl, VT,
2824 DAG.getConstant(VT.getSizeInBits(), VT),
2825 DAG.getNode(ISD::CTLZ, dl, VT, Tmp3));
2826 return DAG.getNode(ISD::CTPOP, dl, VT, Tmp3);
2827 }
2828 }
2829 }
2830
ExpandAtomic(SDNode * Node)2831 std::pair <SDValue, SDValue> SelectionDAGLegalize::ExpandAtomic(SDNode *Node) {
2832 unsigned Opc = Node->getOpcode();
2833 MVT VT = cast<AtomicSDNode>(Node)->getMemoryVT().getSimpleVT();
2834 RTLIB::Libcall LC = RTLIB::getATOMIC(Opc, VT);
2835 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpected atomic op or value type!");
2836
2837 return ExpandChainLibCall(LC, Node, false);
2838 }
2839
ExpandNode(SDNode * Node)2840 void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
2841 SmallVector<SDValue, 8> Results;
2842 SDLoc dl(Node);
2843 SDValue Tmp1, Tmp2, Tmp3, Tmp4;
2844 bool NeedInvert;
2845 switch (Node->getOpcode()) {
2846 case ISD::CTPOP:
2847 case ISD::CTLZ:
2848 case ISD::CTLZ_ZERO_UNDEF:
2849 case ISD::CTTZ:
2850 case ISD::CTTZ_ZERO_UNDEF:
2851 Tmp1 = ExpandBitCount(Node->getOpcode(), Node->getOperand(0), dl);
2852 Results.push_back(Tmp1);
2853 break;
2854 case ISD::BSWAP:
2855 Results.push_back(ExpandBSWAP(Node->getOperand(0), dl));
2856 break;
2857 case ISD::FRAMEADDR:
2858 case ISD::RETURNADDR:
2859 case ISD::FRAME_TO_ARGS_OFFSET:
2860 Results.push_back(DAG.getConstant(0, Node->getValueType(0)));
2861 break;
2862 case ISD::FLT_ROUNDS_:
2863 Results.push_back(DAG.getConstant(1, Node->getValueType(0)));
2864 break;
2865 case ISD::EH_RETURN:
2866 case ISD::EH_LABEL:
2867 case ISD::PREFETCH:
2868 case ISD::VAEND:
2869 case ISD::EH_SJLJ_LONGJMP:
2870 // If the target didn't expand these, there's nothing to do, so just
2871 // preserve the chain and be done.
2872 Results.push_back(Node->getOperand(0));
2873 break;
2874 case ISD::EH_SJLJ_SETJMP:
2875 // If the target didn't expand this, just return 'zero' and preserve the
2876 // chain.
2877 Results.push_back(DAG.getConstant(0, MVT::i32));
2878 Results.push_back(Node->getOperand(0));
2879 break;
2880 case ISD::ATOMIC_FENCE: {
2881 // If the target didn't lower this, lower it to '__sync_synchronize()' call
2882 // FIXME: handle "fence singlethread" more efficiently.
2883 TargetLowering::ArgListTy Args;
2884
2885 TargetLowering::CallLoweringInfo CLI(DAG);
2886 CLI.setDebugLoc(dl).setChain(Node->getOperand(0))
2887 .setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()),
2888 DAG.getExternalSymbol("__sync_synchronize",
2889 TLI.getPointerTy()), std::move(Args), 0);
2890
2891 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
2892
2893 Results.push_back(CallResult.second);
2894 break;
2895 }
2896 case ISD::ATOMIC_LOAD: {
2897 // There is no libcall for atomic load; fake it with ATOMIC_CMP_SWAP.
2898 SDValue Zero = DAG.getConstant(0, Node->getValueType(0));
2899 SDVTList VTs = DAG.getVTList(Node->getValueType(0), MVT::Other);
2900 SDValue Swap = DAG.getAtomicCmpSwap(
2901 ISD::ATOMIC_CMP_SWAP, dl, cast<AtomicSDNode>(Node)->getMemoryVT(), VTs,
2902 Node->getOperand(0), Node->getOperand(1), Zero, Zero,
2903 cast<AtomicSDNode>(Node)->getMemOperand(),
2904 cast<AtomicSDNode>(Node)->getOrdering(),
2905 cast<AtomicSDNode>(Node)->getOrdering(),
2906 cast<AtomicSDNode>(Node)->getSynchScope());
2907 Results.push_back(Swap.getValue(0));
2908 Results.push_back(Swap.getValue(1));
2909 break;
2910 }
2911 case ISD::ATOMIC_STORE: {
2912 // There is no libcall for atomic store; fake it with ATOMIC_SWAP.
2913 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
2914 cast<AtomicSDNode>(Node)->getMemoryVT(),
2915 Node->getOperand(0),
2916 Node->getOperand(1), Node->getOperand(2),
2917 cast<AtomicSDNode>(Node)->getMemOperand(),
2918 cast<AtomicSDNode>(Node)->getOrdering(),
2919 cast<AtomicSDNode>(Node)->getSynchScope());
2920 Results.push_back(Swap.getValue(1));
2921 break;
2922 }
2923 // By default, atomic intrinsics are marked Legal and lowered. Targets
2924 // which don't support them directly, however, may want libcalls, in which
2925 // case they mark them Expand, and we get here.
2926 case ISD::ATOMIC_SWAP:
2927 case ISD::ATOMIC_LOAD_ADD:
2928 case ISD::ATOMIC_LOAD_SUB:
2929 case ISD::ATOMIC_LOAD_AND:
2930 case ISD::ATOMIC_LOAD_OR:
2931 case ISD::ATOMIC_LOAD_XOR:
2932 case ISD::ATOMIC_LOAD_NAND:
2933 case ISD::ATOMIC_LOAD_MIN:
2934 case ISD::ATOMIC_LOAD_MAX:
2935 case ISD::ATOMIC_LOAD_UMIN:
2936 case ISD::ATOMIC_LOAD_UMAX:
2937 case ISD::ATOMIC_CMP_SWAP: {
2938 std::pair<SDValue, SDValue> Tmp = ExpandAtomic(Node);
2939 Results.push_back(Tmp.first);
2940 Results.push_back(Tmp.second);
2941 break;
2942 }
2943 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
2944 // Expanding an ATOMIC_CMP_SWAP_WITH_SUCCESS produces an ATOMIC_CMP_SWAP and
2945 // splits out the success value as a comparison. Expanding the resulting
2946 // ATOMIC_CMP_SWAP will produce a libcall.
2947 SDVTList VTs = DAG.getVTList(Node->getValueType(0), MVT::Other);
2948 SDValue Res = DAG.getAtomicCmpSwap(
2949 ISD::ATOMIC_CMP_SWAP, dl, cast<AtomicSDNode>(Node)->getMemoryVT(), VTs,
2950 Node->getOperand(0), Node->getOperand(1), Node->getOperand(2),
2951 Node->getOperand(3), cast<MemSDNode>(Node)->getMemOperand(),
2952 cast<AtomicSDNode>(Node)->getSuccessOrdering(),
2953 cast<AtomicSDNode>(Node)->getFailureOrdering(),
2954 cast<AtomicSDNode>(Node)->getSynchScope());
2955
2956 SDValue Success = DAG.getSetCC(SDLoc(Node), Node->getValueType(1),
2957 Res, Node->getOperand(2), ISD::SETEQ);
2958
2959 Results.push_back(Res.getValue(0));
2960 Results.push_back(Success);
2961 Results.push_back(Res.getValue(1));
2962 break;
2963 }
2964 case ISD::DYNAMIC_STACKALLOC:
2965 ExpandDYNAMIC_STACKALLOC(Node, Results);
2966 break;
2967 case ISD::MERGE_VALUES:
2968 for (unsigned i = 0; i < Node->getNumValues(); i++)
2969 Results.push_back(Node->getOperand(i));
2970 break;
2971 case ISD::UNDEF: {
2972 EVT VT = Node->getValueType(0);
2973 if (VT.isInteger())
2974 Results.push_back(DAG.getConstant(0, VT));
2975 else {
2976 assert(VT.isFloatingPoint() && "Unknown value type!");
2977 Results.push_back(DAG.getConstantFP(0, VT));
2978 }
2979 break;
2980 }
2981 case ISD::TRAP: {
2982 // If this operation is not supported, lower it to 'abort()' call
2983 TargetLowering::ArgListTy Args;
2984 TargetLowering::CallLoweringInfo CLI(DAG);
2985 CLI.setDebugLoc(dl).setChain(Node->getOperand(0))
2986 .setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()),
2987 DAG.getExternalSymbol("abort", TLI.getPointerTy()),
2988 std::move(Args), 0);
2989 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
2990
2991 Results.push_back(CallResult.second);
2992 break;
2993 }
2994 case ISD::FP_ROUND:
2995 case ISD::BITCAST:
2996 Tmp1 = EmitStackConvert(Node->getOperand(0), Node->getValueType(0),
2997 Node->getValueType(0), dl);
2998 Results.push_back(Tmp1);
2999 break;
3000 case ISD::FP_EXTEND:
3001 Tmp1 = EmitStackConvert(Node->getOperand(0),
3002 Node->getOperand(0).getValueType(),
3003 Node->getValueType(0), dl);
3004 Results.push_back(Tmp1);
3005 break;
3006 case ISD::SIGN_EXTEND_INREG: {
3007 // NOTE: we could fall back on load/store here too for targets without
3008 // SAR. However, it is doubtful that any exist.
3009 EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
3010 EVT VT = Node->getValueType(0);
3011 EVT ShiftAmountTy = TLI.getShiftAmountTy(VT);
3012 if (VT.isVector())
3013 ShiftAmountTy = VT;
3014 unsigned BitsDiff = VT.getScalarType().getSizeInBits() -
3015 ExtraVT.getScalarType().getSizeInBits();
3016 SDValue ShiftCst = DAG.getConstant(BitsDiff, ShiftAmountTy);
3017 Tmp1 = DAG.getNode(ISD::SHL, dl, Node->getValueType(0),
3018 Node->getOperand(0), ShiftCst);
3019 Tmp1 = DAG.getNode(ISD::SRA, dl, Node->getValueType(0), Tmp1, ShiftCst);
3020 Results.push_back(Tmp1);
3021 break;
3022 }
3023 case ISD::FP_ROUND_INREG: {
3024 // The only way we can lower this is to turn it into a TRUNCSTORE,
3025 // EXTLOAD pair, targeting a temporary location (a stack slot).
3026
3027 // NOTE: there is a choice here between constantly creating new stack
3028 // slots and always reusing the same one. We currently always create
3029 // new ones, as reuse may inhibit scheduling.
3030 EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
3031 Tmp1 = EmitStackConvert(Node->getOperand(0), ExtraVT,
3032 Node->getValueType(0), dl);
3033 Results.push_back(Tmp1);
3034 break;
3035 }
3036 case ISD::SINT_TO_FP:
3037 case ISD::UINT_TO_FP:
3038 Tmp1 = ExpandLegalINT_TO_FP(Node->getOpcode() == ISD::SINT_TO_FP,
3039 Node->getOperand(0), Node->getValueType(0), dl);
3040 Results.push_back(Tmp1);
3041 break;
3042 case ISD::FP_TO_SINT:
3043 if (TLI.expandFP_TO_SINT(Node, Tmp1, DAG))
3044 Results.push_back(Tmp1);
3045 break;
3046 case ISD::FP_TO_UINT: {
3047 SDValue True, False;
3048 EVT VT = Node->getOperand(0).getValueType();
3049 EVT NVT = Node->getValueType(0);
3050 APFloat apf(DAG.EVTToAPFloatSemantics(VT),
3051 APInt::getNullValue(VT.getSizeInBits()));
3052 APInt x = APInt::getSignBit(NVT.getSizeInBits());
3053 (void)apf.convertFromAPInt(x, false, APFloat::rmNearestTiesToEven);
3054 Tmp1 = DAG.getConstantFP(apf, VT);
3055 Tmp2 = DAG.getSetCC(dl, getSetCCResultType(VT),
3056 Node->getOperand(0),
3057 Tmp1, ISD::SETLT);
3058 True = DAG.getNode(ISD::FP_TO_SINT, dl, NVT, Node->getOperand(0));
3059 False = DAG.getNode(ISD::FP_TO_SINT, dl, NVT,
3060 DAG.getNode(ISD::FSUB, dl, VT,
3061 Node->getOperand(0), Tmp1));
3062 False = DAG.getNode(ISD::XOR, dl, NVT, False,
3063 DAG.getConstant(x, NVT));
3064 Tmp1 = DAG.getSelect(dl, NVT, Tmp2, True, False);
3065 Results.push_back(Tmp1);
3066 break;
3067 }
3068 case ISD::VAARG: {
3069 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
3070 EVT VT = Node->getValueType(0);
3071 Tmp1 = Node->getOperand(0);
3072 Tmp2 = Node->getOperand(1);
3073 unsigned Align = Node->getConstantOperandVal(3);
3074
3075 SDValue VAListLoad = DAG.getLoad(TLI.getPointerTy(), dl, Tmp1, Tmp2,
3076 MachinePointerInfo(V),
3077 false, false, false, 0);
3078 SDValue VAList = VAListLoad;
3079
3080 if (Align > TLI.getMinStackArgumentAlignment()) {
3081 assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2");
3082
3083 VAList = DAG.getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
3084 DAG.getConstant(Align - 1,
3085 VAList.getValueType()));
3086
3087 VAList = DAG.getNode(ISD::AND, dl, VAList.getValueType(), VAList,
3088 DAG.getConstant(-(int64_t)Align,
3089 VAList.getValueType()));
3090 }
3091
3092 // Increment the pointer, VAList, to the next vaarg
3093 Tmp3 = DAG.getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
3094 DAG.getConstant(TLI.getDataLayout()->
3095 getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext())),
3096 VAList.getValueType()));
3097 // Store the incremented VAList to the legalized pointer
3098 Tmp3 = DAG.getStore(VAListLoad.getValue(1), dl, Tmp3, Tmp2,
3099 MachinePointerInfo(V), false, false, 0);
3100 // Load the actual argument out of the pointer VAList
3101 Results.push_back(DAG.getLoad(VT, dl, Tmp3, VAList, MachinePointerInfo(),
3102 false, false, false, 0));
3103 Results.push_back(Results[0].getValue(1));
3104 break;
3105 }
3106 case ISD::VACOPY: {
3107 // This defaults to loading a pointer from the input and storing it to the
3108 // output, returning the chain.
3109 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue();
3110 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue();
3111 Tmp1 = DAG.getLoad(TLI.getPointerTy(), dl, Node->getOperand(0),
3112 Node->getOperand(2), MachinePointerInfo(VS),
3113 false, false, false, 0);
3114 Tmp1 = DAG.getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1),
3115 MachinePointerInfo(VD), false, false, 0);
3116 Results.push_back(Tmp1);
3117 break;
3118 }
3119 case ISD::EXTRACT_VECTOR_ELT:
3120 if (Node->getOperand(0).getValueType().getVectorNumElements() == 1)
3121 // This must be an access of the only element. Return it.
3122 Tmp1 = DAG.getNode(ISD::BITCAST, dl, Node->getValueType(0),
3123 Node->getOperand(0));
3124 else
3125 Tmp1 = ExpandExtractFromVectorThroughStack(SDValue(Node, 0));
3126 Results.push_back(Tmp1);
3127 break;
3128 case ISD::EXTRACT_SUBVECTOR:
3129 Results.push_back(ExpandExtractFromVectorThroughStack(SDValue(Node, 0)));
3130 break;
3131 case ISD::INSERT_SUBVECTOR:
3132 Results.push_back(ExpandInsertToVectorThroughStack(SDValue(Node, 0)));
3133 break;
3134 case ISD::CONCAT_VECTORS: {
3135 Results.push_back(ExpandVectorBuildThroughStack(Node));
3136 break;
3137 }
3138 case ISD::SCALAR_TO_VECTOR:
3139 Results.push_back(ExpandSCALAR_TO_VECTOR(Node));
3140 break;
3141 case ISD::INSERT_VECTOR_ELT:
3142 Results.push_back(ExpandINSERT_VECTOR_ELT(Node->getOperand(0),
3143 Node->getOperand(1),
3144 Node->getOperand(2), dl));
3145 break;
3146 case ISD::VECTOR_SHUFFLE: {
3147 SmallVector<int, 32> NewMask;
3148 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Node)->getMask();
3149
3150 EVT VT = Node->getValueType(0);
3151 EVT EltVT = VT.getVectorElementType();
3152 SDValue Op0 = Node->getOperand(0);
3153 SDValue Op1 = Node->getOperand(1);
3154 if (!TLI.isTypeLegal(EltVT)) {
3155
3156 EVT NewEltVT = TLI.getTypeToTransformTo(*DAG.getContext(), EltVT);
3157
3158 // BUILD_VECTOR operands are allowed to be wider than the element type.
3159 // But if NewEltVT is smaller that EltVT the BUILD_VECTOR does not accept
3160 // it.
3161 if (NewEltVT.bitsLT(EltVT)) {
3162
3163 // Convert shuffle node.
3164 // If original node was v4i64 and the new EltVT is i32,
3165 // cast operands to v8i32 and re-build the mask.
3166
3167 // Calculate new VT, the size of the new VT should be equal to original.
3168 EVT NewVT =
3169 EVT::getVectorVT(*DAG.getContext(), NewEltVT,
3170 VT.getSizeInBits() / NewEltVT.getSizeInBits());
3171 assert(NewVT.bitsEq(VT));
3172
3173 // cast operands to new VT
3174 Op0 = DAG.getNode(ISD::BITCAST, dl, NewVT, Op0);
3175 Op1 = DAG.getNode(ISD::BITCAST, dl, NewVT, Op1);
3176
3177 // Convert the shuffle mask
3178 unsigned int factor =
3179 NewVT.getVectorNumElements()/VT.getVectorNumElements();
3180
3181 // EltVT gets smaller
3182 assert(factor > 0);
3183
3184 for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) {
3185 if (Mask[i] < 0) {
3186 for (unsigned fi = 0; fi < factor; ++fi)
3187 NewMask.push_back(Mask[i]);
3188 }
3189 else {
3190 for (unsigned fi = 0; fi < factor; ++fi)
3191 NewMask.push_back(Mask[i]*factor+fi);
3192 }
3193 }
3194 Mask = NewMask;
3195 VT = NewVT;
3196 }
3197 EltVT = NewEltVT;
3198 }
3199 unsigned NumElems = VT.getVectorNumElements();
3200 SmallVector<SDValue, 16> Ops;
3201 for (unsigned i = 0; i != NumElems; ++i) {
3202 if (Mask[i] < 0) {
3203 Ops.push_back(DAG.getUNDEF(EltVT));
3204 continue;
3205 }
3206 unsigned Idx = Mask[i];
3207 if (Idx < NumElems)
3208 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
3209 Op0,
3210 DAG.getConstant(Idx, TLI.getVectorIdxTy())));
3211 else
3212 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
3213 Op1,
3214 DAG.getConstant(Idx - NumElems,
3215 TLI.getVectorIdxTy())));
3216 }
3217
3218 Tmp1 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
3219 // We may have changed the BUILD_VECTOR type. Cast it back to the Node type.
3220 Tmp1 = DAG.getNode(ISD::BITCAST, dl, Node->getValueType(0), Tmp1);
3221 Results.push_back(Tmp1);
3222 break;
3223 }
3224 case ISD::EXTRACT_ELEMENT: {
3225 EVT OpTy = Node->getOperand(0).getValueType();
3226 if (cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue()) {
3227 // 1 -> Hi
3228 Tmp1 = DAG.getNode(ISD::SRL, dl, OpTy, Node->getOperand(0),
3229 DAG.getConstant(OpTy.getSizeInBits()/2,
3230 TLI.getShiftAmountTy(Node->getOperand(0).getValueType())));
3231 Tmp1 = DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0), Tmp1);
3232 } else {
3233 // 0 -> Lo
3234 Tmp1 = DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0),
3235 Node->getOperand(0));
3236 }
3237 Results.push_back(Tmp1);
3238 break;
3239 }
3240 case ISD::STACKSAVE:
3241 // Expand to CopyFromReg if the target set
3242 // StackPointerRegisterToSaveRestore.
3243 if (unsigned SP = TLI.getStackPointerRegisterToSaveRestore()) {
3244 Results.push_back(DAG.getCopyFromReg(Node->getOperand(0), dl, SP,
3245 Node->getValueType(0)));
3246 Results.push_back(Results[0].getValue(1));
3247 } else {
3248 Results.push_back(DAG.getUNDEF(Node->getValueType(0)));
3249 Results.push_back(Node->getOperand(0));
3250 }
3251 break;
3252 case ISD::STACKRESTORE:
3253 // Expand to CopyToReg if the target set
3254 // StackPointerRegisterToSaveRestore.
3255 if (unsigned SP = TLI.getStackPointerRegisterToSaveRestore()) {
3256 Results.push_back(DAG.getCopyToReg(Node->getOperand(0), dl, SP,
3257 Node->getOperand(1)));
3258 } else {
3259 Results.push_back(Node->getOperand(0));
3260 }
3261 break;
3262 case ISD::FCOPYSIGN:
3263 Results.push_back(ExpandFCOPYSIGN(Node));
3264 break;
3265 case ISD::FNEG:
3266 // Expand Y = FNEG(X) -> Y = SUB -0.0, X
3267 Tmp1 = DAG.getConstantFP(-0.0, Node->getValueType(0));
3268 Tmp1 = DAG.getNode(ISD::FSUB, dl, Node->getValueType(0), Tmp1,
3269 Node->getOperand(0));
3270 Results.push_back(Tmp1);
3271 break;
3272 case ISD::FABS: {
3273 // Expand Y = FABS(X) -> Y = (X >u 0.0) ? X : fneg(X).
3274 EVT VT = Node->getValueType(0);
3275 Tmp1 = Node->getOperand(0);
3276 Tmp2 = DAG.getConstantFP(0.0, VT);
3277 Tmp2 = DAG.getSetCC(dl, getSetCCResultType(Tmp1.getValueType()),
3278 Tmp1, Tmp2, ISD::SETUGT);
3279 Tmp3 = DAG.getNode(ISD::FNEG, dl, VT, Tmp1);
3280 Tmp1 = DAG.getSelect(dl, VT, Tmp2, Tmp1, Tmp3);
3281 Results.push_back(Tmp1);
3282 break;
3283 }
3284 case ISD::FMINNUM:
3285 Results.push_back(ExpandFPLibCall(Node, RTLIB::FMIN_F32, RTLIB::FMIN_F64,
3286 RTLIB::FMIN_F80, RTLIB::FMIN_F128,
3287 RTLIB::FMIN_PPCF128));
3288 break;
3289 case ISD::FMAXNUM:
3290 Results.push_back(ExpandFPLibCall(Node, RTLIB::FMAX_F32, RTLIB::FMAX_F64,
3291 RTLIB::FMAX_F80, RTLIB::FMAX_F128,
3292 RTLIB::FMAX_PPCF128));
3293 break;
3294 case ISD::FSQRT:
3295 Results.push_back(ExpandFPLibCall(Node, RTLIB::SQRT_F32, RTLIB::SQRT_F64,
3296 RTLIB::SQRT_F80, RTLIB::SQRT_F128,
3297 RTLIB::SQRT_PPCF128));
3298 break;
3299 case ISD::FSIN:
3300 case ISD::FCOS: {
3301 EVT VT = Node->getValueType(0);
3302 bool isSIN = Node->getOpcode() == ISD::FSIN;
3303 // Turn fsin / fcos into ISD::FSINCOS node if there are a pair of fsin /
3304 // fcos which share the same operand and both are used.
3305 if ((TLI.isOperationLegalOrCustom(ISD::FSINCOS, VT) ||
3306 canCombineSinCosLibcall(Node, TLI, TM))
3307 && useSinCos(Node)) {
3308 SDVTList VTs = DAG.getVTList(VT, VT);
3309 Tmp1 = DAG.getNode(ISD::FSINCOS, dl, VTs, Node->getOperand(0));
3310 if (!isSIN)
3311 Tmp1 = Tmp1.getValue(1);
3312 Results.push_back(Tmp1);
3313 } else if (isSIN) {
3314 Results.push_back(ExpandFPLibCall(Node, RTLIB::SIN_F32, RTLIB::SIN_F64,
3315 RTLIB::SIN_F80, RTLIB::SIN_F128,
3316 RTLIB::SIN_PPCF128));
3317 } else {
3318 Results.push_back(ExpandFPLibCall(Node, RTLIB::COS_F32, RTLIB::COS_F64,
3319 RTLIB::COS_F80, RTLIB::COS_F128,
3320 RTLIB::COS_PPCF128));
3321 }
3322 break;
3323 }
3324 case ISD::FSINCOS:
3325 // Expand into sincos libcall.
3326 ExpandSinCosLibCall(Node, Results);
3327 break;
3328 case ISD::FLOG:
3329 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG_F32, RTLIB::LOG_F64,
3330 RTLIB::LOG_F80, RTLIB::LOG_F128,
3331 RTLIB::LOG_PPCF128));
3332 break;
3333 case ISD::FLOG2:
3334 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG2_F32, RTLIB::LOG2_F64,
3335 RTLIB::LOG2_F80, RTLIB::LOG2_F128,
3336 RTLIB::LOG2_PPCF128));
3337 break;
3338 case ISD::FLOG10:
3339 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG10_F32, RTLIB::LOG10_F64,
3340 RTLIB::LOG10_F80, RTLIB::LOG10_F128,
3341 RTLIB::LOG10_PPCF128));
3342 break;
3343 case ISD::FEXP:
3344 Results.push_back(ExpandFPLibCall(Node, RTLIB::EXP_F32, RTLIB::EXP_F64,
3345 RTLIB::EXP_F80, RTLIB::EXP_F128,
3346 RTLIB::EXP_PPCF128));
3347 break;
3348 case ISD::FEXP2:
3349 Results.push_back(ExpandFPLibCall(Node, RTLIB::EXP2_F32, RTLIB::EXP2_F64,
3350 RTLIB::EXP2_F80, RTLIB::EXP2_F128,
3351 RTLIB::EXP2_PPCF128));
3352 break;
3353 case ISD::FTRUNC:
3354 Results.push_back(ExpandFPLibCall(Node, RTLIB::TRUNC_F32, RTLIB::TRUNC_F64,
3355 RTLIB::TRUNC_F80, RTLIB::TRUNC_F128,
3356 RTLIB::TRUNC_PPCF128));
3357 break;
3358 case ISD::FFLOOR:
3359 Results.push_back(ExpandFPLibCall(Node, RTLIB::FLOOR_F32, RTLIB::FLOOR_F64,
3360 RTLIB::FLOOR_F80, RTLIB::FLOOR_F128,
3361 RTLIB::FLOOR_PPCF128));
3362 break;
3363 case ISD::FCEIL:
3364 Results.push_back(ExpandFPLibCall(Node, RTLIB::CEIL_F32, RTLIB::CEIL_F64,
3365 RTLIB::CEIL_F80, RTLIB::CEIL_F128,
3366 RTLIB::CEIL_PPCF128));
3367 break;
3368 case ISD::FRINT:
3369 Results.push_back(ExpandFPLibCall(Node, RTLIB::RINT_F32, RTLIB::RINT_F64,
3370 RTLIB::RINT_F80, RTLIB::RINT_F128,
3371 RTLIB::RINT_PPCF128));
3372 break;
3373 case ISD::FNEARBYINT:
3374 Results.push_back(ExpandFPLibCall(Node, RTLIB::NEARBYINT_F32,
3375 RTLIB::NEARBYINT_F64,
3376 RTLIB::NEARBYINT_F80,
3377 RTLIB::NEARBYINT_F128,
3378 RTLIB::NEARBYINT_PPCF128));
3379 break;
3380 case ISD::FROUND:
3381 Results.push_back(ExpandFPLibCall(Node, RTLIB::ROUND_F32,
3382 RTLIB::ROUND_F64,
3383 RTLIB::ROUND_F80,
3384 RTLIB::ROUND_F128,
3385 RTLIB::ROUND_PPCF128));
3386 break;
3387 case ISD::FPOWI:
3388 Results.push_back(ExpandFPLibCall(Node, RTLIB::POWI_F32, RTLIB::POWI_F64,
3389 RTLIB::POWI_F80, RTLIB::POWI_F128,
3390 RTLIB::POWI_PPCF128));
3391 break;
3392 case ISD::FPOW:
3393 Results.push_back(ExpandFPLibCall(Node, RTLIB::POW_F32, RTLIB::POW_F64,
3394 RTLIB::POW_F80, RTLIB::POW_F128,
3395 RTLIB::POW_PPCF128));
3396 break;
3397 case ISD::FDIV:
3398 Results.push_back(ExpandFPLibCall(Node, RTLIB::DIV_F32, RTLIB::DIV_F64,
3399 RTLIB::DIV_F80, RTLIB::DIV_F128,
3400 RTLIB::DIV_PPCF128));
3401 break;
3402 case ISD::FREM:
3403 Results.push_back(ExpandFPLibCall(Node, RTLIB::REM_F32, RTLIB::REM_F64,
3404 RTLIB::REM_F80, RTLIB::REM_F128,
3405 RTLIB::REM_PPCF128));
3406 break;
3407 case ISD::FMA:
3408 Results.push_back(ExpandFPLibCall(Node, RTLIB::FMA_F32, RTLIB::FMA_F64,
3409 RTLIB::FMA_F80, RTLIB::FMA_F128,
3410 RTLIB::FMA_PPCF128));
3411 break;
3412 case ISD::FMAD:
3413 llvm_unreachable("Illegal fmad should never be formed");
3414
3415 case ISD::FADD:
3416 Results.push_back(ExpandFPLibCall(Node, RTLIB::ADD_F32, RTLIB::ADD_F64,
3417 RTLIB::ADD_F80, RTLIB::ADD_F128,
3418 RTLIB::ADD_PPCF128));
3419 break;
3420 case ISD::FMUL:
3421 Results.push_back(ExpandFPLibCall(Node, RTLIB::MUL_F32, RTLIB::MUL_F64,
3422 RTLIB::MUL_F80, RTLIB::MUL_F128,
3423 RTLIB::MUL_PPCF128));
3424 break;
3425 case ISD::FP16_TO_FP: {
3426 if (Node->getValueType(0) == MVT::f32) {
3427 Results.push_back(ExpandLibCall(RTLIB::FPEXT_F16_F32, Node, false));
3428 break;
3429 }
3430
3431 // We can extend to types bigger than f32 in two steps without changing the
3432 // result. Since "f16 -> f32" is much more commonly available, give CodeGen
3433 // the option of emitting that before resorting to a libcall.
3434 SDValue Res =
3435 DAG.getNode(ISD::FP16_TO_FP, dl, MVT::f32, Node->getOperand(0));
3436 Results.push_back(
3437 DAG.getNode(ISD::FP_EXTEND, dl, Node->getValueType(0), Res));
3438 break;
3439 }
3440 case ISD::FP_TO_FP16: {
3441 if (!TM.Options.UseSoftFloat && TM.Options.UnsafeFPMath) {
3442 SDValue Op = Node->getOperand(0);
3443 MVT SVT = Op.getSimpleValueType();
3444 if ((SVT == MVT::f64 || SVT == MVT::f80) &&
3445 TLI.isOperationLegalOrCustom(ISD::FP_TO_FP16, MVT::f32)) {
3446 // Under fastmath, we can expand this node into a fround followed by
3447 // a float-half conversion.
3448 SDValue FloatVal = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Op,
3449 DAG.getIntPtrConstant(0));
3450 Results.push_back(
3451 DAG.getNode(ISD::FP_TO_FP16, dl, MVT::i16, FloatVal));
3452 break;
3453 }
3454 }
3455
3456 RTLIB::Libcall LC =
3457 RTLIB::getFPROUND(Node->getOperand(0).getValueType(), MVT::f16);
3458 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unable to expand fp_to_fp16");
3459 Results.push_back(ExpandLibCall(LC, Node, false));
3460 break;
3461 }
3462 case ISD::ConstantFP: {
3463 ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Node);
3464 // Check to see if this FP immediate is already legal.
3465 // If this is a legal constant, turn it into a TargetConstantFP node.
3466 if (!TLI.isFPImmLegal(CFP->getValueAPF(), Node->getValueType(0)))
3467 Results.push_back(ExpandConstantFP(CFP, true));
3468 break;
3469 }
3470 case ISD::FSUB: {
3471 EVT VT = Node->getValueType(0);
3472 if (TLI.isOperationLegalOrCustom(ISD::FADD, VT) &&
3473 TLI.isOperationLegalOrCustom(ISD::FNEG, VT)) {
3474 Tmp1 = DAG.getNode(ISD::FNEG, dl, VT, Node->getOperand(1));
3475 Tmp1 = DAG.getNode(ISD::FADD, dl, VT, Node->getOperand(0), Tmp1);
3476 Results.push_back(Tmp1);
3477 } else {
3478 Results.push_back(ExpandFPLibCall(Node, RTLIB::SUB_F32, RTLIB::SUB_F64,
3479 RTLIB::SUB_F80, RTLIB::SUB_F128,
3480 RTLIB::SUB_PPCF128));
3481 }
3482 break;
3483 }
3484 case ISD::SUB: {
3485 EVT VT = Node->getValueType(0);
3486 assert(TLI.isOperationLegalOrCustom(ISD::ADD, VT) &&
3487 TLI.isOperationLegalOrCustom(ISD::XOR, VT) &&
3488 "Don't know how to expand this subtraction!");
3489 Tmp1 = DAG.getNode(ISD::XOR, dl, VT, Node->getOperand(1),
3490 DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT));
3491 Tmp1 = DAG.getNode(ISD::ADD, dl, VT, Tmp1, DAG.getConstant(1, VT));
3492 Results.push_back(DAG.getNode(ISD::ADD, dl, VT, Node->getOperand(0), Tmp1));
3493 break;
3494 }
3495 case ISD::UREM:
3496 case ISD::SREM: {
3497 EVT VT = Node->getValueType(0);
3498 bool isSigned = Node->getOpcode() == ISD::SREM;
3499 unsigned DivOpc = isSigned ? ISD::SDIV : ISD::UDIV;
3500 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM;
3501 Tmp2 = Node->getOperand(0);
3502 Tmp3 = Node->getOperand(1);
3503 if (TLI.isOperationLegalOrCustom(DivRemOpc, VT) ||
3504 (isDivRemLibcallAvailable(Node, isSigned, TLI) &&
3505 // If div is legal, it's better to do the normal expansion
3506 !TLI.isOperationLegalOrCustom(DivOpc, Node->getValueType(0)) &&
3507 useDivRem(Node, isSigned, false))) {
3508 SDVTList VTs = DAG.getVTList(VT, VT);
3509 Tmp1 = DAG.getNode(DivRemOpc, dl, VTs, Tmp2, Tmp3).getValue(1);
3510 } else if (TLI.isOperationLegalOrCustom(DivOpc, VT)) {
3511 // X % Y -> X-X/Y*Y
3512 Tmp1 = DAG.getNode(DivOpc, dl, VT, Tmp2, Tmp3);
3513 Tmp1 = DAG.getNode(ISD::MUL, dl, VT, Tmp1, Tmp3);
3514 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, Tmp2, Tmp1);
3515 } else if (isSigned)
3516 Tmp1 = ExpandIntLibCall(Node, true,
3517 RTLIB::SREM_I8,
3518 RTLIB::SREM_I16, RTLIB::SREM_I32,
3519 RTLIB::SREM_I64, RTLIB::SREM_I128);
3520 else
3521 Tmp1 = ExpandIntLibCall(Node, false,
3522 RTLIB::UREM_I8,
3523 RTLIB::UREM_I16, RTLIB::UREM_I32,
3524 RTLIB::UREM_I64, RTLIB::UREM_I128);
3525 Results.push_back(Tmp1);
3526 break;
3527 }
3528 case ISD::UDIV:
3529 case ISD::SDIV: {
3530 bool isSigned = Node->getOpcode() == ISD::SDIV;
3531 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM;
3532 EVT VT = Node->getValueType(0);
3533 SDVTList VTs = DAG.getVTList(VT, VT);
3534 if (TLI.isOperationLegalOrCustom(DivRemOpc, VT) ||
3535 (isDivRemLibcallAvailable(Node, isSigned, TLI) &&
3536 useDivRem(Node, isSigned, true)))
3537 Tmp1 = DAG.getNode(DivRemOpc, dl, VTs, Node->getOperand(0),
3538 Node->getOperand(1));
3539 else if (isSigned)
3540 Tmp1 = ExpandIntLibCall(Node, true,
3541 RTLIB::SDIV_I8,
3542 RTLIB::SDIV_I16, RTLIB::SDIV_I32,
3543 RTLIB::SDIV_I64, RTLIB::SDIV_I128);
3544 else
3545 Tmp1 = ExpandIntLibCall(Node, false,
3546 RTLIB::UDIV_I8,
3547 RTLIB::UDIV_I16, RTLIB::UDIV_I32,
3548 RTLIB::UDIV_I64, RTLIB::UDIV_I128);
3549 Results.push_back(Tmp1);
3550 break;
3551 }
3552 case ISD::MULHU:
3553 case ISD::MULHS: {
3554 unsigned ExpandOpcode = Node->getOpcode() == ISD::MULHU ? ISD::UMUL_LOHI :
3555 ISD::SMUL_LOHI;
3556 EVT VT = Node->getValueType(0);
3557 SDVTList VTs = DAG.getVTList(VT, VT);
3558 assert(TLI.isOperationLegalOrCustom(ExpandOpcode, VT) &&
3559 "If this wasn't legal, it shouldn't have been created!");
3560 Tmp1 = DAG.getNode(ExpandOpcode, dl, VTs, Node->getOperand(0),
3561 Node->getOperand(1));
3562 Results.push_back(Tmp1.getValue(1));
3563 break;
3564 }
3565 case ISD::SDIVREM:
3566 case ISD::UDIVREM:
3567 // Expand into divrem libcall
3568 ExpandDivRemLibCall(Node, Results);
3569 break;
3570 case ISD::MUL: {
3571 EVT VT = Node->getValueType(0);
3572 SDVTList VTs = DAG.getVTList(VT, VT);
3573 // See if multiply or divide can be lowered using two-result operations.
3574 // We just need the low half of the multiply; try both the signed
3575 // and unsigned forms. If the target supports both SMUL_LOHI and
3576 // UMUL_LOHI, form a preference by checking which forms of plain
3577 // MULH it supports.
3578 bool HasSMUL_LOHI = TLI.isOperationLegalOrCustom(ISD::SMUL_LOHI, VT);
3579 bool HasUMUL_LOHI = TLI.isOperationLegalOrCustom(ISD::UMUL_LOHI, VT);
3580 bool HasMULHS = TLI.isOperationLegalOrCustom(ISD::MULHS, VT);
3581 bool HasMULHU = TLI.isOperationLegalOrCustom(ISD::MULHU, VT);
3582 unsigned OpToUse = 0;
3583 if (HasSMUL_LOHI && !HasMULHS) {
3584 OpToUse = ISD::SMUL_LOHI;
3585 } else if (HasUMUL_LOHI && !HasMULHU) {
3586 OpToUse = ISD::UMUL_LOHI;
3587 } else if (HasSMUL_LOHI) {
3588 OpToUse = ISD::SMUL_LOHI;
3589 } else if (HasUMUL_LOHI) {
3590 OpToUse = ISD::UMUL_LOHI;
3591 }
3592 if (OpToUse) {
3593 Results.push_back(DAG.getNode(OpToUse, dl, VTs, Node->getOperand(0),
3594 Node->getOperand(1)));
3595 break;
3596 }
3597
3598 SDValue Lo, Hi;
3599 EVT HalfType = VT.getHalfSizedIntegerVT(*DAG.getContext());
3600 if (TLI.isOperationLegalOrCustom(ISD::ZERO_EXTEND, VT) &&
3601 TLI.isOperationLegalOrCustom(ISD::ANY_EXTEND, VT) &&
3602 TLI.isOperationLegalOrCustom(ISD::SHL, VT) &&
3603 TLI.isOperationLegalOrCustom(ISD::OR, VT) &&
3604 TLI.expandMUL(Node, Lo, Hi, HalfType, DAG)) {
3605 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Lo);
3606 Hi = DAG.getNode(ISD::ANY_EXTEND, dl, VT, Hi);
3607 SDValue Shift = DAG.getConstant(HalfType.getSizeInBits(),
3608 TLI.getShiftAmountTy(HalfType));
3609 Hi = DAG.getNode(ISD::SHL, dl, VT, Hi, Shift);
3610 Results.push_back(DAG.getNode(ISD::OR, dl, VT, Lo, Hi));
3611 break;
3612 }
3613
3614 Tmp1 = ExpandIntLibCall(Node, false,
3615 RTLIB::MUL_I8,
3616 RTLIB::MUL_I16, RTLIB::MUL_I32,
3617 RTLIB::MUL_I64, RTLIB::MUL_I128);
3618 Results.push_back(Tmp1);
3619 break;
3620 }
3621 case ISD::SADDO:
3622 case ISD::SSUBO: {
3623 SDValue LHS = Node->getOperand(0);
3624 SDValue RHS = Node->getOperand(1);
3625 SDValue Sum = DAG.getNode(Node->getOpcode() == ISD::SADDO ?
3626 ISD::ADD : ISD::SUB, dl, LHS.getValueType(),
3627 LHS, RHS);
3628 Results.push_back(Sum);
3629 EVT ResultType = Node->getValueType(1);
3630 EVT OType = getSetCCResultType(Node->getValueType(0));
3631
3632 SDValue Zero = DAG.getConstant(0, LHS.getValueType());
3633
3634 // LHSSign -> LHS >= 0
3635 // RHSSign -> RHS >= 0
3636 // SumSign -> Sum >= 0
3637 //
3638 // Add:
3639 // Overflow -> (LHSSign == RHSSign) && (LHSSign != SumSign)
3640 // Sub:
3641 // Overflow -> (LHSSign != RHSSign) && (LHSSign != SumSign)
3642 //
3643 SDValue LHSSign = DAG.getSetCC(dl, OType, LHS, Zero, ISD::SETGE);
3644 SDValue RHSSign = DAG.getSetCC(dl, OType, RHS, Zero, ISD::SETGE);
3645 SDValue SignsMatch = DAG.getSetCC(dl, OType, LHSSign, RHSSign,
3646 Node->getOpcode() == ISD::SADDO ?
3647 ISD::SETEQ : ISD::SETNE);
3648
3649 SDValue SumSign = DAG.getSetCC(dl, OType, Sum, Zero, ISD::SETGE);
3650 SDValue SumSignNE = DAG.getSetCC(dl, OType, LHSSign, SumSign, ISD::SETNE);
3651
3652 SDValue Cmp = DAG.getNode(ISD::AND, dl, OType, SignsMatch, SumSignNE);
3653 Results.push_back(DAG.getBoolExtOrTrunc(Cmp, dl, ResultType, ResultType));
3654 break;
3655 }
3656 case ISD::UADDO:
3657 case ISD::USUBO: {
3658 SDValue LHS = Node->getOperand(0);
3659 SDValue RHS = Node->getOperand(1);
3660 SDValue Sum = DAG.getNode(Node->getOpcode() == ISD::UADDO ?
3661 ISD::ADD : ISD::SUB, dl, LHS.getValueType(),
3662 LHS, RHS);
3663 Results.push_back(Sum);
3664
3665 EVT ResultType = Node->getValueType(1);
3666 EVT SetCCType = getSetCCResultType(Node->getValueType(0));
3667 ISD::CondCode CC
3668 = Node->getOpcode() == ISD::UADDO ? ISD::SETULT : ISD::SETUGT;
3669 SDValue SetCC = DAG.getSetCC(dl, SetCCType, Sum, LHS, CC);
3670
3671 Results.push_back(DAG.getBoolExtOrTrunc(SetCC, dl, ResultType, ResultType));
3672 break;
3673 }
3674 case ISD::UMULO:
3675 case ISD::SMULO: {
3676 EVT VT = Node->getValueType(0);
3677 EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits() * 2);
3678 SDValue LHS = Node->getOperand(0);
3679 SDValue RHS = Node->getOperand(1);
3680 SDValue BottomHalf;
3681 SDValue TopHalf;
3682 static const unsigned Ops[2][3] =
3683 { { ISD::MULHU, ISD::UMUL_LOHI, ISD::ZERO_EXTEND },
3684 { ISD::MULHS, ISD::SMUL_LOHI, ISD::SIGN_EXTEND }};
3685 bool isSigned = Node->getOpcode() == ISD::SMULO;
3686 if (TLI.isOperationLegalOrCustom(Ops[isSigned][0], VT)) {
3687 BottomHalf = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS);
3688 TopHalf = DAG.getNode(Ops[isSigned][0], dl, VT, LHS, RHS);
3689 } else if (TLI.isOperationLegalOrCustom(Ops[isSigned][1], VT)) {
3690 BottomHalf = DAG.getNode(Ops[isSigned][1], dl, DAG.getVTList(VT, VT), LHS,
3691 RHS);
3692 TopHalf = BottomHalf.getValue(1);
3693 } else if (TLI.isTypeLegal(WideVT)) {
3694 LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS);
3695 RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS);
3696 Tmp1 = DAG.getNode(ISD::MUL, dl, WideVT, LHS, RHS);
3697 BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Tmp1,
3698 DAG.getIntPtrConstant(0));
3699 TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Tmp1,
3700 DAG.getIntPtrConstant(1));
3701 } else {
3702 // We can fall back to a libcall with an illegal type for the MUL if we
3703 // have a libcall big enough.
3704 // Also, we can fall back to a division in some cases, but that's a big
3705 // performance hit in the general case.
3706 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
3707 if (WideVT == MVT::i16)
3708 LC = RTLIB::MUL_I16;
3709 else if (WideVT == MVT::i32)
3710 LC = RTLIB::MUL_I32;
3711 else if (WideVT == MVT::i64)
3712 LC = RTLIB::MUL_I64;
3713 else if (WideVT == MVT::i128)
3714 LC = RTLIB::MUL_I128;
3715 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Cannot expand this operation!");
3716
3717 // The high part is obtained by SRA'ing all but one of the bits of low
3718 // part.
3719 unsigned LoSize = VT.getSizeInBits();
3720 SDValue HiLHS = DAG.getNode(ISD::SRA, dl, VT, RHS,
3721 DAG.getConstant(LoSize-1, TLI.getPointerTy()));
3722 SDValue HiRHS = DAG.getNode(ISD::SRA, dl, VT, LHS,
3723 DAG.getConstant(LoSize-1, TLI.getPointerTy()));
3724
3725 // Here we're passing the 2 arguments explicitly as 4 arguments that are
3726 // pre-lowered to the correct types. This all depends upon WideVT not
3727 // being a legal type for the architecture and thus has to be split to
3728 // two arguments.
3729 SDValue Args[] = { LHS, HiLHS, RHS, HiRHS };
3730 SDValue Ret = ExpandLibCall(LC, WideVT, Args, 4, isSigned, dl);
3731 BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Ret,
3732 DAG.getIntPtrConstant(0));
3733 TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Ret,
3734 DAG.getIntPtrConstant(1));
3735 // Ret is a node with an illegal type. Because such things are not
3736 // generally permitted during this phase of legalization, make sure the
3737 // node has no more uses. The above EXTRACT_ELEMENT nodes should have been
3738 // folded.
3739 assert(Ret->use_empty() &&
3740 "Unexpected uses of illegally type from expanded lib call.");
3741 }
3742
3743 if (isSigned) {
3744 Tmp1 = DAG.getConstant(VT.getSizeInBits() - 1,
3745 TLI.getShiftAmountTy(BottomHalf.getValueType()));
3746 Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, Tmp1);
3747 TopHalf = DAG.getSetCC(dl, getSetCCResultType(VT), TopHalf, Tmp1,
3748 ISD::SETNE);
3749 } else {
3750 TopHalf = DAG.getSetCC(dl, getSetCCResultType(VT), TopHalf,
3751 DAG.getConstant(0, VT), ISD::SETNE);
3752 }
3753 Results.push_back(BottomHalf);
3754 Results.push_back(TopHalf);
3755 break;
3756 }
3757 case ISD::BUILD_PAIR: {
3758 EVT PairTy = Node->getValueType(0);
3759 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, PairTy, Node->getOperand(0));
3760 Tmp2 = DAG.getNode(ISD::ANY_EXTEND, dl, PairTy, Node->getOperand(1));
3761 Tmp2 = DAG.getNode(ISD::SHL, dl, PairTy, Tmp2,
3762 DAG.getConstant(PairTy.getSizeInBits()/2,
3763 TLI.getShiftAmountTy(PairTy)));
3764 Results.push_back(DAG.getNode(ISD::OR, dl, PairTy, Tmp1, Tmp2));
3765 break;
3766 }
3767 case ISD::SELECT:
3768 Tmp1 = Node->getOperand(0);
3769 Tmp2 = Node->getOperand(1);
3770 Tmp3 = Node->getOperand(2);
3771 if (Tmp1.getOpcode() == ISD::SETCC) {
3772 Tmp1 = DAG.getSelectCC(dl, Tmp1.getOperand(0), Tmp1.getOperand(1),
3773 Tmp2, Tmp3,
3774 cast<CondCodeSDNode>(Tmp1.getOperand(2))->get());
3775 } else {
3776 Tmp1 = DAG.getSelectCC(dl, Tmp1,
3777 DAG.getConstant(0, Tmp1.getValueType()),
3778 Tmp2, Tmp3, ISD::SETNE);
3779 }
3780 Results.push_back(Tmp1);
3781 break;
3782 case ISD::BR_JT: {
3783 SDValue Chain = Node->getOperand(0);
3784 SDValue Table = Node->getOperand(1);
3785 SDValue Index = Node->getOperand(2);
3786
3787 EVT PTy = TLI.getPointerTy();
3788
3789 const DataLayout &TD = *TLI.getDataLayout();
3790 unsigned EntrySize =
3791 DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(TD);
3792
3793 Index = DAG.getNode(ISD::MUL, dl, Index.getValueType(),
3794 Index, DAG.getConstant(EntrySize, Index.getValueType()));
3795 SDValue Addr = DAG.getNode(ISD::ADD, dl, Index.getValueType(),
3796 Index, Table);
3797
3798 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), EntrySize * 8);
3799 SDValue LD = DAG.getExtLoad(ISD::SEXTLOAD, dl, PTy, Chain, Addr,
3800 MachinePointerInfo::getJumpTable(), MemVT,
3801 false, false, false, 0);
3802 Addr = LD;
3803 if (TM.getRelocationModel() == Reloc::PIC_) {
3804 // For PIC, the sequence is:
3805 // BRIND(load(Jumptable + index) + RelocBase)
3806 // RelocBase can be JumpTable, GOT or some sort of global base.
3807 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr,
3808 TLI.getPICJumpTableRelocBase(Table, DAG));
3809 }
3810 Tmp1 = DAG.getNode(ISD::BRIND, dl, MVT::Other, LD.getValue(1), Addr);
3811 Results.push_back(Tmp1);
3812 break;
3813 }
3814 case ISD::BRCOND:
3815 // Expand brcond's setcc into its constituent parts and create a BR_CC
3816 // Node.
3817 Tmp1 = Node->getOperand(0);
3818 Tmp2 = Node->getOperand(1);
3819 if (Tmp2.getOpcode() == ISD::SETCC) {
3820 Tmp1 = DAG.getNode(ISD::BR_CC, dl, MVT::Other,
3821 Tmp1, Tmp2.getOperand(2),
3822 Tmp2.getOperand(0), Tmp2.getOperand(1),
3823 Node->getOperand(2));
3824 } else {
3825 // We test only the i1 bit. Skip the AND if UNDEF.
3826 Tmp3 = (Tmp2.getOpcode() == ISD::UNDEF) ? Tmp2 :
3827 DAG.getNode(ISD::AND, dl, Tmp2.getValueType(), Tmp2,
3828 DAG.getConstant(1, Tmp2.getValueType()));
3829 Tmp1 = DAG.getNode(ISD::BR_CC, dl, MVT::Other, Tmp1,
3830 DAG.getCondCode(ISD::SETNE), Tmp3,
3831 DAG.getConstant(0, Tmp3.getValueType()),
3832 Node->getOperand(2));
3833 }
3834 Results.push_back(Tmp1);
3835 break;
3836 case ISD::SETCC: {
3837 Tmp1 = Node->getOperand(0);
3838 Tmp2 = Node->getOperand(1);
3839 Tmp3 = Node->getOperand(2);
3840 bool Legalized = LegalizeSetCCCondCode(Node->getValueType(0), Tmp1, Tmp2,
3841 Tmp3, NeedInvert, dl);
3842
3843 if (Legalized) {
3844 // If we expanded the SETCC by swapping LHS and RHS, or by inverting the
3845 // condition code, create a new SETCC node.
3846 if (Tmp3.getNode())
3847 Tmp1 = DAG.getNode(ISD::SETCC, dl, Node->getValueType(0),
3848 Tmp1, Tmp2, Tmp3);
3849
3850 // If we expanded the SETCC by inverting the condition code, then wrap
3851 // the existing SETCC in a NOT to restore the intended condition.
3852 if (NeedInvert)
3853 Tmp1 = DAG.getLogicalNOT(dl, Tmp1, Tmp1->getValueType(0));
3854
3855 Results.push_back(Tmp1);
3856 break;
3857 }
3858
3859 // Otherwise, SETCC for the given comparison type must be completely
3860 // illegal; expand it into a SELECT_CC.
3861 EVT VT = Node->getValueType(0);
3862 int TrueValue;
3863 switch (TLI.getBooleanContents(Tmp1->getValueType(0))) {
3864 case TargetLowering::ZeroOrOneBooleanContent:
3865 case TargetLowering::UndefinedBooleanContent:
3866 TrueValue = 1;
3867 break;
3868 case TargetLowering::ZeroOrNegativeOneBooleanContent:
3869 TrueValue = -1;
3870 break;
3871 }
3872 Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, VT, Tmp1, Tmp2,
3873 DAG.getConstant(TrueValue, VT), DAG.getConstant(0, VT),
3874 Tmp3);
3875 Results.push_back(Tmp1);
3876 break;
3877 }
3878 case ISD::SELECT_CC: {
3879 Tmp1 = Node->getOperand(0); // LHS
3880 Tmp2 = Node->getOperand(1); // RHS
3881 Tmp3 = Node->getOperand(2); // True
3882 Tmp4 = Node->getOperand(3); // False
3883 EVT VT = Node->getValueType(0);
3884 SDValue CC = Node->getOperand(4);
3885 ISD::CondCode CCOp = cast<CondCodeSDNode>(CC)->get();
3886
3887 if (TLI.isCondCodeLegal(CCOp, Tmp1.getSimpleValueType())) {
3888 // If the condition code is legal, then we need to expand this
3889 // node using SETCC and SELECT.
3890 EVT CmpVT = Tmp1.getValueType();
3891 assert(!TLI.isOperationExpand(ISD::SELECT, VT) &&
3892 "Cannot expand ISD::SELECT_CC when ISD::SELECT also needs to be "
3893 "expanded.");
3894 EVT CCVT = TLI.getSetCCResultType(*DAG.getContext(), CmpVT);
3895 SDValue Cond = DAG.getNode(ISD::SETCC, dl, CCVT, Tmp1, Tmp2, CC);
3896 Results.push_back(DAG.getSelect(dl, VT, Cond, Tmp3, Tmp4));
3897 break;
3898 }
3899
3900 // SELECT_CC is legal, so the condition code must not be.
3901 bool Legalized = false;
3902 // Try to legalize by inverting the condition. This is for targets that
3903 // might support an ordered version of a condition, but not the unordered
3904 // version (or vice versa).
3905 ISD::CondCode InvCC = ISD::getSetCCInverse(CCOp,
3906 Tmp1.getValueType().isInteger());
3907 if (TLI.isCondCodeLegal(InvCC, Tmp1.getSimpleValueType())) {
3908 // Use the new condition code and swap true and false
3909 Legalized = true;
3910 Tmp1 = DAG.getSelectCC(dl, Tmp1, Tmp2, Tmp4, Tmp3, InvCC);
3911 } else {
3912 // If The inverse is not legal, then try to swap the arguments using
3913 // the inverse condition code.
3914 ISD::CondCode SwapInvCC = ISD::getSetCCSwappedOperands(InvCC);
3915 if (TLI.isCondCodeLegal(SwapInvCC, Tmp1.getSimpleValueType())) {
3916 // The swapped inverse condition is legal, so swap true and false,
3917 // lhs and rhs.
3918 Legalized = true;
3919 Tmp1 = DAG.getSelectCC(dl, Tmp2, Tmp1, Tmp4, Tmp3, SwapInvCC);
3920 }
3921 }
3922
3923 if (!Legalized) {
3924 Legalized = LegalizeSetCCCondCode(
3925 getSetCCResultType(Tmp1.getValueType()), Tmp1, Tmp2, CC, NeedInvert,
3926 dl);
3927
3928 assert(Legalized && "Can't legalize SELECT_CC with legal condition!");
3929
3930 // If we expanded the SETCC by inverting the condition code, then swap
3931 // the True/False operands to match.
3932 if (NeedInvert)
3933 std::swap(Tmp3, Tmp4);
3934
3935 // If we expanded the SETCC by swapping LHS and RHS, or by inverting the
3936 // condition code, create a new SELECT_CC node.
3937 if (CC.getNode()) {
3938 Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, Node->getValueType(0),
3939 Tmp1, Tmp2, Tmp3, Tmp4, CC);
3940 } else {
3941 Tmp2 = DAG.getConstant(0, Tmp1.getValueType());
3942 CC = DAG.getCondCode(ISD::SETNE);
3943 Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, Node->getValueType(0), Tmp1,
3944 Tmp2, Tmp3, Tmp4, CC);
3945 }
3946 }
3947 Results.push_back(Tmp1);
3948 break;
3949 }
3950 case ISD::BR_CC: {
3951 Tmp1 = Node->getOperand(0); // Chain
3952 Tmp2 = Node->getOperand(2); // LHS
3953 Tmp3 = Node->getOperand(3); // RHS
3954 Tmp4 = Node->getOperand(1); // CC
3955
3956 bool Legalized = LegalizeSetCCCondCode(getSetCCResultType(
3957 Tmp2.getValueType()), Tmp2, Tmp3, Tmp4, NeedInvert, dl);
3958 (void)Legalized;
3959 assert(Legalized && "Can't legalize BR_CC with legal condition!");
3960
3961 // If we expanded the SETCC by inverting the condition code, then wrap
3962 // the existing SETCC in a NOT to restore the intended condition.
3963 if (NeedInvert)
3964 Tmp4 = DAG.getNOT(dl, Tmp4, Tmp4->getValueType(0));
3965
3966 // If we expanded the SETCC by swapping LHS and RHS, create a new BR_CC
3967 // node.
3968 if (Tmp4.getNode()) {
3969 Tmp1 = DAG.getNode(ISD::BR_CC, dl, Node->getValueType(0), Tmp1,
3970 Tmp4, Tmp2, Tmp3, Node->getOperand(4));
3971 } else {
3972 Tmp3 = DAG.getConstant(0, Tmp2.getValueType());
3973 Tmp4 = DAG.getCondCode(ISD::SETNE);
3974 Tmp1 = DAG.getNode(ISD::BR_CC, dl, Node->getValueType(0), Tmp1, Tmp4,
3975 Tmp2, Tmp3, Node->getOperand(4));
3976 }
3977 Results.push_back(Tmp1);
3978 break;
3979 }
3980 case ISD::BUILD_VECTOR:
3981 Results.push_back(ExpandBUILD_VECTOR(Node));
3982 break;
3983 case ISD::SRA:
3984 case ISD::SRL:
3985 case ISD::SHL: {
3986 // Scalarize vector SRA/SRL/SHL.
3987 EVT VT = Node->getValueType(0);
3988 assert(VT.isVector() && "Unable to legalize non-vector shift");
3989 assert(TLI.isTypeLegal(VT.getScalarType())&& "Element type must be legal");
3990 unsigned NumElem = VT.getVectorNumElements();
3991
3992 SmallVector<SDValue, 8> Scalars;
3993 for (unsigned Idx = 0; Idx < NumElem; Idx++) {
3994 SDValue Ex = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
3995 VT.getScalarType(),
3996 Node->getOperand(0), DAG.getConstant(Idx,
3997 TLI.getVectorIdxTy()));
3998 SDValue Sh = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
3999 VT.getScalarType(),
4000 Node->getOperand(1), DAG.getConstant(Idx,
4001 TLI.getVectorIdxTy()));
4002 Scalars.push_back(DAG.getNode(Node->getOpcode(), dl,
4003 VT.getScalarType(), Ex, Sh));
4004 }
4005 SDValue Result =
4006 DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0), Scalars);
4007 ReplaceNode(SDValue(Node, 0), Result);
4008 break;
4009 }
4010 case ISD::GLOBAL_OFFSET_TABLE:
4011 case ISD::GlobalAddress:
4012 case ISD::GlobalTLSAddress:
4013 case ISD::ExternalSymbol:
4014 case ISD::ConstantPool:
4015 case ISD::JumpTable:
4016 case ISD::INTRINSIC_W_CHAIN:
4017 case ISD::INTRINSIC_WO_CHAIN:
4018 case ISD::INTRINSIC_VOID:
4019 // FIXME: Custom lowering for these operations shouldn't return null!
4020 break;
4021 }
4022
4023 // Replace the original node with the legalized result.
4024 if (!Results.empty())
4025 ReplaceNode(Node, Results.data());
4026 }
4027
PromoteNode(SDNode * Node)4028 void SelectionDAGLegalize::PromoteNode(SDNode *Node) {
4029 SmallVector<SDValue, 8> Results;
4030 MVT OVT = Node->getSimpleValueType(0);
4031 if (Node->getOpcode() == ISD::UINT_TO_FP ||
4032 Node->getOpcode() == ISD::SINT_TO_FP ||
4033 Node->getOpcode() == ISD::SETCC) {
4034 OVT = Node->getOperand(0).getSimpleValueType();
4035 }
4036 if (Node->getOpcode() == ISD::BR_CC)
4037 OVT = Node->getOperand(2).getSimpleValueType();
4038 MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OVT);
4039 SDLoc dl(Node);
4040 SDValue Tmp1, Tmp2, Tmp3;
4041 switch (Node->getOpcode()) {
4042 case ISD::CTTZ:
4043 case ISD::CTTZ_ZERO_UNDEF:
4044 case ISD::CTLZ:
4045 case ISD::CTLZ_ZERO_UNDEF:
4046 case ISD::CTPOP:
4047 // Zero extend the argument.
4048 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Node->getOperand(0));
4049 // Perform the larger operation. For CTPOP and CTTZ_ZERO_UNDEF, this is
4050 // already the correct result.
4051 Tmp1 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1);
4052 if (Node->getOpcode() == ISD::CTTZ) {
4053 // FIXME: This should set a bit in the zero extended value instead.
4054 Tmp2 = DAG.getSetCC(dl, getSetCCResultType(NVT),
4055 Tmp1, DAG.getConstant(NVT.getSizeInBits(), NVT),
4056 ISD::SETEQ);
4057 Tmp1 = DAG.getSelect(dl, NVT, Tmp2,
4058 DAG.getConstant(OVT.getSizeInBits(), NVT), Tmp1);
4059 } else if (Node->getOpcode() == ISD::CTLZ ||
4060 Node->getOpcode() == ISD::CTLZ_ZERO_UNDEF) {
4061 // Tmp1 = Tmp1 - (sizeinbits(NVT) - sizeinbits(Old VT))
4062 Tmp1 = DAG.getNode(ISD::SUB, dl, NVT, Tmp1,
4063 DAG.getConstant(NVT.getSizeInBits() -
4064 OVT.getSizeInBits(), NVT));
4065 }
4066 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, OVT, Tmp1));
4067 break;
4068 case ISD::BSWAP: {
4069 unsigned DiffBits = NVT.getSizeInBits() - OVT.getSizeInBits();
4070 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Node->getOperand(0));
4071 Tmp1 = DAG.getNode(ISD::BSWAP, dl, NVT, Tmp1);
4072 Tmp1 = DAG.getNode(ISD::SRL, dl, NVT, Tmp1,
4073 DAG.getConstant(DiffBits, TLI.getShiftAmountTy(NVT)));
4074 Results.push_back(Tmp1);
4075 break;
4076 }
4077 case ISD::FP_TO_UINT:
4078 case ISD::FP_TO_SINT:
4079 Tmp1 = PromoteLegalFP_TO_INT(Node->getOperand(0), Node->getValueType(0),
4080 Node->getOpcode() == ISD::FP_TO_SINT, dl);
4081 Results.push_back(Tmp1);
4082 break;
4083 case ISD::UINT_TO_FP:
4084 case ISD::SINT_TO_FP:
4085 Tmp1 = PromoteLegalINT_TO_FP(Node->getOperand(0), Node->getValueType(0),
4086 Node->getOpcode() == ISD::SINT_TO_FP, dl);
4087 Results.push_back(Tmp1);
4088 break;
4089 case ISD::VAARG: {
4090 SDValue Chain = Node->getOperand(0); // Get the chain.
4091 SDValue Ptr = Node->getOperand(1); // Get the pointer.
4092
4093 unsigned TruncOp;
4094 if (OVT.isVector()) {
4095 TruncOp = ISD::BITCAST;
4096 } else {
4097 assert(OVT.isInteger()
4098 && "VAARG promotion is supported only for vectors or integer types");
4099 TruncOp = ISD::TRUNCATE;
4100 }
4101
4102 // Perform the larger operation, then convert back
4103 Tmp1 = DAG.getVAArg(NVT, dl, Chain, Ptr, Node->getOperand(2),
4104 Node->getConstantOperandVal(3));
4105 Chain = Tmp1.getValue(1);
4106
4107 Tmp2 = DAG.getNode(TruncOp, dl, OVT, Tmp1);
4108
4109 // Modified the chain result - switch anything that used the old chain to
4110 // use the new one.
4111 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), Tmp2);
4112 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), Chain);
4113 if (UpdatedNodes) {
4114 UpdatedNodes->insert(Tmp2.getNode());
4115 UpdatedNodes->insert(Chain.getNode());
4116 }
4117 ReplacedNode(Node);
4118 break;
4119 }
4120 case ISD::AND:
4121 case ISD::OR:
4122 case ISD::XOR: {
4123 unsigned ExtOp, TruncOp;
4124 if (OVT.isVector()) {
4125 ExtOp = ISD::BITCAST;
4126 TruncOp = ISD::BITCAST;
4127 } else {
4128 assert(OVT.isInteger() && "Cannot promote logic operation");
4129 ExtOp = ISD::ANY_EXTEND;
4130 TruncOp = ISD::TRUNCATE;
4131 }
4132 // Promote each of the values to the new type.
4133 Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(0));
4134 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1));
4135 // Perform the larger operation, then convert back
4136 Tmp1 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1, Tmp2);
4137 Results.push_back(DAG.getNode(TruncOp, dl, OVT, Tmp1));
4138 break;
4139 }
4140 case ISD::SELECT: {
4141 unsigned ExtOp, TruncOp;
4142 if (Node->getValueType(0).isVector() ||
4143 Node->getValueType(0).getSizeInBits() == NVT.getSizeInBits()) {
4144 ExtOp = ISD::BITCAST;
4145 TruncOp = ISD::BITCAST;
4146 } else if (Node->getValueType(0).isInteger()) {
4147 ExtOp = ISD::ANY_EXTEND;
4148 TruncOp = ISD::TRUNCATE;
4149 } else {
4150 ExtOp = ISD::FP_EXTEND;
4151 TruncOp = ISD::FP_ROUND;
4152 }
4153 Tmp1 = Node->getOperand(0);
4154 // Promote each of the values to the new type.
4155 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1));
4156 Tmp3 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(2));
4157 // Perform the larger operation, then round down.
4158 Tmp1 = DAG.getSelect(dl, NVT, Tmp1, Tmp2, Tmp3);
4159 if (TruncOp != ISD::FP_ROUND)
4160 Tmp1 = DAG.getNode(TruncOp, dl, Node->getValueType(0), Tmp1);
4161 else
4162 Tmp1 = DAG.getNode(TruncOp, dl, Node->getValueType(0), Tmp1,
4163 DAG.getIntPtrConstant(0));
4164 Results.push_back(Tmp1);
4165 break;
4166 }
4167 case ISD::VECTOR_SHUFFLE: {
4168 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Node)->getMask();
4169
4170 // Cast the two input vectors.
4171 Tmp1 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(0));
4172 Tmp2 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(1));
4173
4174 // Convert the shuffle mask to the right # elements.
4175 Tmp1 = ShuffleWithNarrowerEltType(NVT, OVT, dl, Tmp1, Tmp2, Mask);
4176 Tmp1 = DAG.getNode(ISD::BITCAST, dl, OVT, Tmp1);
4177 Results.push_back(Tmp1);
4178 break;
4179 }
4180 case ISD::SETCC: {
4181 unsigned ExtOp = ISD::FP_EXTEND;
4182 if (NVT.isInteger()) {
4183 ISD::CondCode CCCode =
4184 cast<CondCodeSDNode>(Node->getOperand(2))->get();
4185 ExtOp = isSignedIntSetCC(CCCode) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
4186 }
4187 Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(0));
4188 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1));
4189 Results.push_back(DAG.getNode(ISD::SETCC, dl, Node->getValueType(0),
4190 Tmp1, Tmp2, Node->getOperand(2)));
4191 break;
4192 }
4193 case ISD::BR_CC: {
4194 unsigned ExtOp = ISD::FP_EXTEND;
4195 if (NVT.isInteger()) {
4196 ISD::CondCode CCCode =
4197 cast<CondCodeSDNode>(Node->getOperand(1))->get();
4198 ExtOp = isSignedIntSetCC(CCCode) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
4199 }
4200 Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(2));
4201 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(3));
4202 Results.push_back(DAG.getNode(ISD::BR_CC, dl, Node->getValueType(0),
4203 Node->getOperand(0), Node->getOperand(1),
4204 Tmp1, Tmp2, Node->getOperand(4)));
4205 break;
4206 }
4207 case ISD::FADD:
4208 case ISD::FSUB:
4209 case ISD::FMUL:
4210 case ISD::FDIV:
4211 case ISD::FREM:
4212 case ISD::FMINNUM:
4213 case ISD::FMAXNUM:
4214 case ISD::FCOPYSIGN:
4215 case ISD::FPOW: {
4216 Tmp1 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(0));
4217 Tmp2 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(1));
4218 Tmp3 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1, Tmp2);
4219 Results.push_back(DAG.getNode(ISD::FP_ROUND, dl, OVT,
4220 Tmp3, DAG.getIntPtrConstant(0)));
4221 break;
4222 }
4223 case ISD::FMA: {
4224 Tmp1 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(0));
4225 Tmp2 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(1));
4226 Tmp3 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(2));
4227 Results.push_back(
4228 DAG.getNode(ISD::FP_ROUND, dl, OVT,
4229 DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1, Tmp2, Tmp3),
4230 DAG.getIntPtrConstant(0)));
4231 break;
4232 }
4233 case ISD::FPOWI: {
4234 Tmp1 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(0));
4235 Tmp2 = Node->getOperand(1);
4236 Tmp3 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1, Tmp2);
4237 Results.push_back(DAG.getNode(ISD::FP_ROUND, dl, OVT,
4238 Tmp3, DAG.getIntPtrConstant(0)));
4239 break;
4240 }
4241 case ISD::FFLOOR:
4242 case ISD::FCEIL:
4243 case ISD::FRINT:
4244 case ISD::FNEARBYINT:
4245 case ISD::FROUND:
4246 case ISD::FTRUNC:
4247 case ISD::FNEG:
4248 case ISD::FSQRT:
4249 case ISD::FSIN:
4250 case ISD::FCOS:
4251 case ISD::FLOG:
4252 case ISD::FLOG2:
4253 case ISD::FLOG10:
4254 case ISD::FABS:
4255 case ISD::FEXP:
4256 case ISD::FEXP2: {
4257 Tmp1 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(0));
4258 Tmp2 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1);
4259 Results.push_back(DAG.getNode(ISD::FP_ROUND, dl, OVT,
4260 Tmp2, DAG.getIntPtrConstant(0)));
4261 break;
4262 }
4263 }
4264
4265 // Replace the original node with the legalized result.
4266 if (!Results.empty())
4267 ReplaceNode(Node, Results.data());
4268 }
4269
4270 /// This is the entry point for the file.
Legalize()4271 void SelectionDAG::Legalize() {
4272 AssignTopologicalOrder();
4273
4274 SmallPtrSet<SDNode *, 16> LegalizedNodes;
4275 SelectionDAGLegalize Legalizer(*this, LegalizedNodes);
4276
4277 // Visit all the nodes. We start in topological order, so that we see
4278 // nodes with their original operands intact. Legalization can produce
4279 // new nodes which may themselves need to be legalized. Iterate until all
4280 // nodes have been legalized.
4281 for (;;) {
4282 bool AnyLegalized = false;
4283 for (auto NI = allnodes_end(); NI != allnodes_begin();) {
4284 --NI;
4285
4286 SDNode *N = NI;
4287 if (N->use_empty() && N != getRoot().getNode()) {
4288 ++NI;
4289 DeleteNode(N);
4290 continue;
4291 }
4292
4293 if (LegalizedNodes.insert(N).second) {
4294 AnyLegalized = true;
4295 Legalizer.LegalizeOp(N);
4296
4297 if (N->use_empty() && N != getRoot().getNode()) {
4298 ++NI;
4299 DeleteNode(N);
4300 }
4301 }
4302 }
4303 if (!AnyLegalized)
4304 break;
4305
4306 }
4307
4308 // Remove dead nodes now.
4309 RemoveDeadNodes();
4310 }
4311
LegalizeOp(SDNode * N,SmallSetVector<SDNode *,16> & UpdatedNodes)4312 bool SelectionDAG::LegalizeOp(SDNode *N,
4313 SmallSetVector<SDNode *, 16> &UpdatedNodes) {
4314 SmallPtrSet<SDNode *, 16> LegalizedNodes;
4315 SelectionDAGLegalize Legalizer(*this, LegalizedNodes, &UpdatedNodes);
4316
4317 // Directly insert the node in question, and legalize it. This will recurse
4318 // as needed through operands.
4319 LegalizedNodes.insert(N);
4320 Legalizer.LegalizeOp(N);
4321
4322 return LegalizedNodes.count(N);
4323 }
4324