1 //===-- HexagonISelLowering.cpp - Hexagon DAG Lowering Implementation -----===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the interfaces that Hexagon uses to lower LLVM code
11 // into a selection DAG.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "HexagonISelLowering.h"
16 #include "HexagonMachineFunctionInfo.h"
17 #include "HexagonSubtarget.h"
18 #include "HexagonTargetMachine.h"
19 #include "HexagonTargetObjectFile.h"
20 #include "llvm/CodeGen/CallingConvLower.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineJumpTableInfo.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/SelectionDAGISel.h"
27 #include "llvm/CodeGen/ValueTypes.h"
28 #include "llvm/IR/CallingConv.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/GlobalAlias.h"
32 #include "llvm/IR/GlobalVariable.h"
33 #include "llvm/IR/InlineAsm.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/raw_ostream.h"
39 
40 using namespace llvm;
41 
42 #define DEBUG_TYPE "hexagon-lowering"
43 
44 static cl::opt<bool>
45 EmitJumpTables("hexagon-emit-jump-tables", cl::init(true), cl::Hidden,
46                cl::desc("Control jump table emission on Hexagon target"));
47 
48 namespace {
49 class HexagonCCState : public CCState {
50   int NumNamedVarArgParams;
51 
52 public:
HexagonCCState(CallingConv::ID CC,bool isVarArg,MachineFunction & MF,SmallVectorImpl<CCValAssign> & locs,LLVMContext & C,int NumNamedVarArgParams)53   HexagonCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
54                  SmallVectorImpl<CCValAssign> &locs, LLVMContext &C,
55                  int NumNamedVarArgParams)
56       : CCState(CC, isVarArg, MF, locs, C),
57         NumNamedVarArgParams(NumNamedVarArgParams) {}
58 
getNumNamedVarArgParams() const59   int getNumNamedVarArgParams() const { return NumNamedVarArgParams; }
60 };
61 }
62 
63 // Implement calling convention for Hexagon.
64 static bool
65 CC_Hexagon(unsigned ValNo, MVT ValVT,
66            MVT LocVT, CCValAssign::LocInfo LocInfo,
67            ISD::ArgFlagsTy ArgFlags, CCState &State);
68 
69 static bool
70 CC_Hexagon32(unsigned ValNo, MVT ValVT,
71              MVT LocVT, CCValAssign::LocInfo LocInfo,
72              ISD::ArgFlagsTy ArgFlags, CCState &State);
73 
74 static bool
75 CC_Hexagon64(unsigned ValNo, MVT ValVT,
76              MVT LocVT, CCValAssign::LocInfo LocInfo,
77              ISD::ArgFlagsTy ArgFlags, CCState &State);
78 
79 static bool
80 RetCC_Hexagon(unsigned ValNo, MVT ValVT,
81               MVT LocVT, CCValAssign::LocInfo LocInfo,
82               ISD::ArgFlagsTy ArgFlags, CCState &State);
83 
84 static bool
85 RetCC_Hexagon32(unsigned ValNo, MVT ValVT,
86                 MVT LocVT, CCValAssign::LocInfo LocInfo,
87                 ISD::ArgFlagsTy ArgFlags, CCState &State);
88 
89 static bool
90 RetCC_Hexagon64(unsigned ValNo, MVT ValVT,
91                 MVT LocVT, CCValAssign::LocInfo LocInfo,
92                 ISD::ArgFlagsTy ArgFlags, CCState &State);
93 
94 static bool
CC_Hexagon_VarArg(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)95 CC_Hexagon_VarArg (unsigned ValNo, MVT ValVT,
96             MVT LocVT, CCValAssign::LocInfo LocInfo,
97             ISD::ArgFlagsTy ArgFlags, CCState &State) {
98   HexagonCCState &HState = static_cast<HexagonCCState &>(State);
99 
100   // NumNamedVarArgParams can not be zero for a VarArg function.
101   assert((HState.getNumNamedVarArgParams() > 0) &&
102          "NumNamedVarArgParams is not bigger than zero.");
103 
104   if ((int)ValNo < HState.getNumNamedVarArgParams()) {
105     // Deal with named arguments.
106     return CC_Hexagon(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State);
107   }
108 
109   // Deal with un-named arguments.
110   unsigned ofst;
111   if (ArgFlags.isByVal()) {
112     // If pass-by-value, the size allocated on stack is decided
113     // by ArgFlags.getByValSize(), not by the size of LocVT.
114     assert ((ArgFlags.getByValSize() > 8) &&
115             "ByValSize must be bigger than 8 bytes");
116     ofst = State.AllocateStack(ArgFlags.getByValSize(), 4);
117     State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
118     return false;
119   }
120   if (LocVT == MVT::i1 || LocVT == MVT::i8 || LocVT == MVT::i16) {
121     LocVT = MVT::i32;
122     ValVT = MVT::i32;
123     if (ArgFlags.isSExt())
124       LocInfo = CCValAssign::SExt;
125     else if (ArgFlags.isZExt())
126       LocInfo = CCValAssign::ZExt;
127     else
128       LocInfo = CCValAssign::AExt;
129   }
130   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
131     ofst = State.AllocateStack(4, 4);
132     State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
133     return false;
134   }
135   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
136     ofst = State.AllocateStack(8, 8);
137     State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
138     return false;
139   }
140   llvm_unreachable(nullptr);
141 }
142 
143 
144 static bool
CC_Hexagon(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)145 CC_Hexagon (unsigned ValNo, MVT ValVT,
146             MVT LocVT, CCValAssign::LocInfo LocInfo,
147             ISD::ArgFlagsTy ArgFlags, CCState &State) {
148 
149   if (ArgFlags.isByVal()) {
150     // Passed on stack.
151     assert ((ArgFlags.getByValSize() > 8) &&
152             "ByValSize must be bigger than 8 bytes");
153     unsigned Offset = State.AllocateStack(ArgFlags.getByValSize(), 4);
154     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
155     return false;
156   }
157 
158   if (LocVT == MVT::i1 || LocVT == MVT::i8 || LocVT == MVT::i16) {
159     LocVT = MVT::i32;
160     ValVT = MVT::i32;
161     if (ArgFlags.isSExt())
162       LocInfo = CCValAssign::SExt;
163     else if (ArgFlags.isZExt())
164       LocInfo = CCValAssign::ZExt;
165     else
166       LocInfo = CCValAssign::AExt;
167   } else if (LocVT == MVT::v4i8 || LocVT == MVT::v2i16) {
168     LocVT = MVT::i32;
169     LocInfo = CCValAssign::BCvt;
170   } else if (LocVT == MVT::v8i8 || LocVT == MVT::v4i16 || LocVT == MVT::v2i32) {
171     LocVT = MVT::i64;
172     LocInfo = CCValAssign::BCvt;
173   }
174 
175   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
176     if (!CC_Hexagon32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
177       return false;
178   }
179 
180   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
181     if (!CC_Hexagon64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
182       return false;
183   }
184 
185   return true;  // CC didn't match.
186 }
187 
188 
CC_Hexagon32(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)189 static bool CC_Hexagon32(unsigned ValNo, MVT ValVT,
190                          MVT LocVT, CCValAssign::LocInfo LocInfo,
191                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
192 
193   static const MCPhysReg RegList[] = {
194     Hexagon::R0, Hexagon::R1, Hexagon::R2, Hexagon::R3, Hexagon::R4,
195     Hexagon::R5
196   };
197   if (unsigned Reg = State.AllocateReg(RegList)) {
198     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
199     return false;
200   }
201 
202   unsigned Offset = State.AllocateStack(4, 4);
203   State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
204   return false;
205 }
206 
CC_Hexagon64(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)207 static bool CC_Hexagon64(unsigned ValNo, MVT ValVT,
208                          MVT LocVT, CCValAssign::LocInfo LocInfo,
209                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
210 
211   if (unsigned Reg = State.AllocateReg(Hexagon::D0)) {
212     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
213     return false;
214   }
215 
216   static const MCPhysReg RegList1[] = {
217     Hexagon::D1, Hexagon::D2
218   };
219   static const MCPhysReg RegList2[] = {
220     Hexagon::R1, Hexagon::R3
221   };
222   if (unsigned Reg = State.AllocateReg(RegList1, RegList2)) {
223     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
224     return false;
225   }
226 
227   unsigned Offset = State.AllocateStack(8, 8, Hexagon::D2);
228   State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
229   return false;
230 }
231 
RetCC_Hexagon(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)232 static bool RetCC_Hexagon(unsigned ValNo, MVT ValVT,
233                           MVT LocVT, CCValAssign::LocInfo LocInfo,
234                           ISD::ArgFlagsTy ArgFlags, CCState &State) {
235 
236 
237   if (LocVT == MVT::i1 ||
238       LocVT == MVT::i8 ||
239       LocVT == MVT::i16) {
240     LocVT = MVT::i32;
241     ValVT = MVT::i32;
242     if (ArgFlags.isSExt())
243       LocInfo = CCValAssign::SExt;
244     else if (ArgFlags.isZExt())
245       LocInfo = CCValAssign::ZExt;
246     else
247       LocInfo = CCValAssign::AExt;
248   } else if (LocVT == MVT::v4i8 || LocVT == MVT::v2i16) {
249     LocVT = MVT::i32;
250     LocInfo = CCValAssign::BCvt;
251   } else if (LocVT == MVT::v8i8 || LocVT == MVT::v4i16 || LocVT == MVT::v2i32) {
252     LocVT = MVT::i64;
253     LocInfo = CCValAssign::BCvt;
254   }
255 
256   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
257     if (!RetCC_Hexagon32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
258     return false;
259   }
260 
261   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
262     if (!RetCC_Hexagon64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
263     return false;
264   }
265 
266   return true;  // CC didn't match.
267 }
268 
RetCC_Hexagon32(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)269 static bool RetCC_Hexagon32(unsigned ValNo, MVT ValVT,
270                             MVT LocVT, CCValAssign::LocInfo LocInfo,
271                             ISD::ArgFlagsTy ArgFlags, CCState &State) {
272 
273   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
274     if (unsigned Reg = State.AllocateReg(Hexagon::R0)) {
275       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
276       return false;
277     }
278   }
279 
280   unsigned Offset = State.AllocateStack(4, 4);
281   State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
282   return false;
283 }
284 
RetCC_Hexagon64(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)285 static bool RetCC_Hexagon64(unsigned ValNo, MVT ValVT,
286                             MVT LocVT, CCValAssign::LocInfo LocInfo,
287                             ISD::ArgFlagsTy ArgFlags, CCState &State) {
288   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
289     if (unsigned Reg = State.AllocateReg(Hexagon::D0)) {
290       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
291       return false;
292     }
293   }
294 
295   unsigned Offset = State.AllocateStack(8, 8);
296   State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
297   return false;
298 }
299 
300 SDValue
LowerINTRINSIC_WO_CHAIN(SDValue Op,SelectionDAG & DAG) const301 HexagonTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG)
302 const {
303   return SDValue();
304 }
305 
306 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
307 /// by "Src" to address "Dst" of size "Size".  Alignment information is
308 /// specified by the specific parameter attribute. The copy will be passed as
309 /// a byval function parameter.  Sometimes what we are copying is the end of a
310 /// larger object, the part that does not fit in registers.
311 static SDValue
CreateCopyOfByValArgument(SDValue Src,SDValue Dst,SDValue Chain,ISD::ArgFlagsTy Flags,SelectionDAG & DAG,SDLoc dl)312 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
313                           ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
314                           SDLoc dl) {
315 
316   SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
317   return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
318                        /*isVolatile=*/false, /*AlwaysInline=*/false,
319                        /*isTailCall=*/false,
320                        MachinePointerInfo(), MachinePointerInfo());
321 }
322 
323 
324 // LowerReturn - Lower ISD::RET. If a struct is larger than 8 bytes and is
325 // passed by value, the function prototype is modified to return void and
326 // the value is stored in memory pointed by a pointer passed by caller.
327 SDValue
LowerReturn(SDValue Chain,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,SDLoc dl,SelectionDAG & DAG) const328 HexagonTargetLowering::LowerReturn(SDValue Chain,
329                                    CallingConv::ID CallConv, bool isVarArg,
330                                    const SmallVectorImpl<ISD::OutputArg> &Outs,
331                                    const SmallVectorImpl<SDValue> &OutVals,
332                                    SDLoc dl, SelectionDAG &DAG) const {
333 
334   // CCValAssign - represent the assignment of the return value to locations.
335   SmallVector<CCValAssign, 16> RVLocs;
336 
337   // CCState - Info about the registers and stack slot.
338   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
339                  *DAG.getContext());
340 
341   // Analyze return values of ISD::RET
342   CCInfo.AnalyzeReturn(Outs, RetCC_Hexagon);
343 
344   SDValue Flag;
345   SmallVector<SDValue, 4> RetOps(1, Chain);
346 
347   // Copy the result values into the output registers.
348   for (unsigned i = 0; i != RVLocs.size(); ++i) {
349     CCValAssign &VA = RVLocs[i];
350 
351     Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag);
352 
353     // Guarantee that all emitted copies are stuck together with flags.
354     Flag = Chain.getValue(1);
355     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
356   }
357 
358   RetOps[0] = Chain;  // Update chain.
359 
360   // Add the flag if we have it.
361   if (Flag.getNode())
362     RetOps.push_back(Flag);
363 
364   return DAG.getNode(HexagonISD::RET_FLAG, dl, MVT::Other, RetOps);
365 }
366 
367 
368 
369 
370 /// LowerCallResult - Lower the result values of an ISD::CALL into the
371 /// appropriate copies out of appropriate physical registers.  This assumes that
372 /// Chain/InFlag are the input chain/flag to use, and that TheCall is the call
373 /// being lowered. Returns a SDNode with the same number of values as the
374 /// ISD::CALL.
375 SDValue
LowerCallResult(SDValue Chain,SDValue InFlag,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,SDLoc dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals,const SmallVectorImpl<SDValue> & OutVals,SDValue Callee) const376 HexagonTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
377                                        CallingConv::ID CallConv, bool isVarArg,
378                                        const
379                                        SmallVectorImpl<ISD::InputArg> &Ins,
380                                        SDLoc dl, SelectionDAG &DAG,
381                                        SmallVectorImpl<SDValue> &InVals,
382                                        const SmallVectorImpl<SDValue> &OutVals,
383                                        SDValue Callee) const {
384 
385   // Assign locations to each value returned by this call.
386   SmallVector<CCValAssign, 16> RVLocs;
387 
388   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
389                  *DAG.getContext());
390 
391   CCInfo.AnalyzeCallResult(Ins, RetCC_Hexagon);
392 
393   // Copy all of the result registers out of their specified physreg.
394   for (unsigned i = 0; i != RVLocs.size(); ++i) {
395     Chain = DAG.getCopyFromReg(Chain, dl,
396                                RVLocs[i].getLocReg(),
397                                RVLocs[i].getValVT(), InFlag).getValue(1);
398     InFlag = Chain.getValue(2);
399     InVals.push_back(Chain.getValue(0));
400   }
401 
402   return Chain;
403 }
404 
405 /// LowerCall - Functions arguments are copied from virtual regs to
406 /// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
407 SDValue
LowerCall(TargetLowering::CallLoweringInfo & CLI,SmallVectorImpl<SDValue> & InVals) const408 HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
409                                  SmallVectorImpl<SDValue> &InVals) const {
410   SelectionDAG &DAG                     = CLI.DAG;
411   SDLoc &dl                             = CLI.DL;
412   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
413   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
414   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
415   SDValue Chain                         = CLI.Chain;
416   SDValue Callee                        = CLI.Callee;
417   bool &isTailCall                      = CLI.IsTailCall;
418   CallingConv::ID CallConv              = CLI.CallConv;
419   bool isVarArg                         = CLI.IsVarArg;
420   bool doesNotReturn                    = CLI.DoesNotReturn;
421 
422   bool IsStructRet    = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
423 
424   // Check for varargs.
425   int NumNamedVarArgParams = -1;
426   if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Callee))
427   {
428     const Function* CalleeFn = nullptr;
429     Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, MVT::i32);
430     if ((CalleeFn = dyn_cast<Function>(GA->getGlobal())))
431     {
432       // If a function has zero args and is a vararg function, that's
433       // disallowed so it must be an undeclared function.  Do not assume
434       // varargs if the callee is undefined.
435       if (CalleeFn->isVarArg() &&
436           CalleeFn->getFunctionType()->getNumParams() != 0) {
437         NumNamedVarArgParams = CalleeFn->getFunctionType()->getNumParams();
438       }
439     }
440   }
441 
442   // Analyze operands of the call, assigning locations to each operand.
443   SmallVector<CCValAssign, 16> ArgLocs;
444   HexagonCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
445                         *DAG.getContext(), NumNamedVarArgParams);
446 
447   if (NumNamedVarArgParams > 0)
448     CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_VarArg);
449   else
450     CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon);
451 
452 
453   if(isTailCall) {
454     bool StructAttrFlag =
455       DAG.getMachineFunction().getFunction()->hasStructRetAttr();
456     isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
457                                                    isVarArg, IsStructRet,
458                                                    StructAttrFlag,
459                                                    Outs, OutVals, Ins, DAG);
460     for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i){
461       CCValAssign &VA = ArgLocs[i];
462       if (VA.isMemLoc()) {
463         isTailCall = false;
464         break;
465       }
466     }
467     if (isTailCall) {
468       DEBUG(dbgs () << "Eligible for Tail Call\n");
469     } else {
470       DEBUG(dbgs () <<
471             "Argument must be passed on stack. Not eligible for Tail Call\n");
472     }
473   }
474   // Get a count of how many bytes are to be pushed on the stack.
475   unsigned NumBytes = CCInfo.getNextStackOffset();
476   SmallVector<std::pair<unsigned, SDValue>, 16> RegsToPass;
477   SmallVector<SDValue, 8> MemOpChains;
478 
479   const HexagonRegisterInfo *QRI = Subtarget->getRegisterInfo();
480   SDValue StackPtr =
481       DAG.getCopyFromReg(Chain, dl, QRI->getStackRegister(), getPointerTy());
482 
483   // Walk the register/memloc assignments, inserting copies/loads.
484   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
485     CCValAssign &VA = ArgLocs[i];
486     SDValue Arg = OutVals[i];
487     ISD::ArgFlagsTy Flags = Outs[i].Flags;
488 
489     // Promote the value if needed.
490     switch (VA.getLocInfo()) {
491       default:
492         // Loc info must be one of Full, SExt, ZExt, or AExt.
493         llvm_unreachable("Unknown loc info!");
494       case CCValAssign::Full:
495         break;
496       case CCValAssign::SExt:
497         Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
498         break;
499       case CCValAssign::ZExt:
500         Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
501         break;
502       case CCValAssign::AExt:
503         Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
504         break;
505     }
506 
507     if (VA.isMemLoc()) {
508       unsigned LocMemOffset = VA.getLocMemOffset();
509       SDValue PtrOff = DAG.getConstant(LocMemOffset, StackPtr.getValueType());
510       PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
511 
512       if (Flags.isByVal()) {
513         // The argument is a struct passed by value. According to LLVM, "Arg"
514         // is is pointer.
515         MemOpChains.push_back(CreateCopyOfByValArgument(Arg, PtrOff, Chain,
516                                                         Flags, DAG, dl));
517       } else {
518         // The argument is not passed by value. "Arg" is a buildin type. It is
519         // not a pointer.
520         MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
521                                            MachinePointerInfo(),false, false,
522                                            0));
523       }
524       continue;
525     }
526 
527     // Arguments that can be passed on register must be kept at RegsToPass
528     // vector.
529     if (VA.isRegLoc()) {
530       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
531     }
532   }
533 
534   // Transform all store nodes into one single node because all store
535   // nodes are independent of each other.
536   if (!MemOpChains.empty()) {
537     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
538   }
539 
540   if (!isTailCall)
541     Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes,
542                                                         getPointerTy(), true),
543                                  dl);
544 
545   // Build a sequence of copy-to-reg nodes chained together with token
546   // chain and flag operands which copy the outgoing args into registers.
547   // The InFlag in necessary since all emitted instructions must be
548   // stuck together.
549   SDValue InFlag;
550   if (!isTailCall) {
551     for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
552       Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
553                                RegsToPass[i].second, InFlag);
554       InFlag = Chain.getValue(1);
555     }
556   }
557 
558   // For tail calls lower the arguments to the 'real' stack slot.
559   if (isTailCall) {
560     // Force all the incoming stack arguments to be loaded from the stack
561     // before any new outgoing arguments are stored to the stack, because the
562     // outgoing stack slots may alias the incoming argument stack slots, and
563     // the alias isn't otherwise explicit. This is slightly more conservative
564     // than necessary, because it means that each store effectively depends
565     // on every argument instead of just those arguments it would clobber.
566     //
567     // Do not flag preceding copytoreg stuff together with the following stuff.
568     InFlag = SDValue();
569     for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
570       Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
571                                RegsToPass[i].second, InFlag);
572       InFlag = Chain.getValue(1);
573     }
574     InFlag =SDValue();
575   }
576 
577   // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
578   // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
579   // node so that legalize doesn't hack it.
580   if (flag_aligned_memcpy) {
581     const char *MemcpyName =
582       "__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes";
583     Callee =
584       DAG.getTargetExternalSymbol(MemcpyName, getPointerTy());
585     flag_aligned_memcpy = false;
586   } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
587     Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, getPointerTy());
588   } else if (ExternalSymbolSDNode *S =
589              dyn_cast<ExternalSymbolSDNode>(Callee)) {
590     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
591   }
592 
593   // Returns a chain & a flag for retval copy to use.
594   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
595   SmallVector<SDValue, 8> Ops;
596   Ops.push_back(Chain);
597   Ops.push_back(Callee);
598 
599   // Add argument registers to the end of the list so that they are
600   // known live into the call.
601   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
602     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
603                                   RegsToPass[i].second.getValueType()));
604   }
605 
606   if (InFlag.getNode()) {
607     Ops.push_back(InFlag);
608   }
609 
610   if (isTailCall)
611     return DAG.getNode(HexagonISD::TC_RETURN, dl, NodeTys, Ops);
612 
613   int OpCode = doesNotReturn ? HexagonISD::CALLv3nr : HexagonISD::CALLv3;
614   Chain = DAG.getNode(OpCode, dl, NodeTys, Ops);
615   InFlag = Chain.getValue(1);
616 
617   // Create the CALLSEQ_END node.
618   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
619                              DAG.getIntPtrConstant(0, true), InFlag, dl);
620   InFlag = Chain.getValue(1);
621 
622   // Handle result values, copying them out of physregs into vregs that we
623   // return.
624   return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
625                          InVals, OutVals, Callee);
626 }
627 
getIndexedAddressParts(SDNode * Ptr,EVT VT,bool isSEXTLoad,SDValue & Base,SDValue & Offset,bool & isInc,SelectionDAG & DAG)628 static bool getIndexedAddressParts(SDNode *Ptr, EVT VT,
629                                    bool isSEXTLoad, SDValue &Base,
630                                    SDValue &Offset, bool &isInc,
631                                    SelectionDAG &DAG) {
632   if (Ptr->getOpcode() != ISD::ADD)
633   return false;
634 
635   if (VT == MVT::i64 || VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8) {
636     isInc = (Ptr->getOpcode() == ISD::ADD);
637     Base = Ptr->getOperand(0);
638     Offset = Ptr->getOperand(1);
639     // Ensure that Offset is a constant.
640     return (isa<ConstantSDNode>(Offset));
641   }
642 
643   return false;
644 }
645 
646 // TODO: Put this function along with the other isS* functions in
647 // HexagonISelDAGToDAG.cpp into a common file. Or better still, use the
648 // functions defined in HexagonOperands.td.
Is_PostInc_S4_Offset(SDNode * S,int ShiftAmount)649 static bool Is_PostInc_S4_Offset(SDNode * S, int ShiftAmount) {
650   ConstantSDNode *N = cast<ConstantSDNode>(S);
651 
652   // immS4 predicate - True if the immediate fits in a 4-bit sign extended.
653   // field.
654   int64_t v = (int64_t)N->getSExtValue();
655   int64_t m = 0;
656   if (ShiftAmount > 0) {
657     m = v % ShiftAmount;
658     v = v >> ShiftAmount;
659   }
660   return (v <= 7) && (v >= -8) && (m == 0);
661 }
662 
663 /// getPostIndexedAddressParts - returns true by value, base pointer and
664 /// offset pointer and addressing mode by reference if this node can be
665 /// combined with a load / store to form a post-indexed load / store.
getPostIndexedAddressParts(SDNode * N,SDNode * Op,SDValue & Base,SDValue & Offset,ISD::MemIndexedMode & AM,SelectionDAG & DAG) const666 bool HexagonTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
667                                                        SDValue &Base,
668                                                        SDValue &Offset,
669                                                        ISD::MemIndexedMode &AM,
670                                                        SelectionDAG &DAG) const
671 {
672   EVT VT;
673   SDValue Ptr;
674   bool isSEXTLoad = false;
675 
676   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
677     VT  = LD->getMemoryVT();
678     isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
679   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
680     VT  = ST->getMemoryVT();
681     if (ST->getValue().getValueType() == MVT::i64 && ST->isTruncatingStore()) {
682       return false;
683     }
684   } else {
685     return false;
686   }
687 
688   bool isInc = false;
689   bool isLegal = getIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
690                                         isInc, DAG);
691   // ShiftAmount = number of left-shifted bits in the Hexagon instruction.
692   int ShiftAmount = VT.getSizeInBits() / 16;
693   if (isLegal && Is_PostInc_S4_Offset(Offset.getNode(), ShiftAmount)) {
694     AM = isInc ? ISD::POST_INC : ISD::POST_DEC;
695     return true;
696   }
697 
698   return false;
699 }
700 
LowerINLINEASM(SDValue Op,SelectionDAG & DAG) const701 SDValue HexagonTargetLowering::LowerINLINEASM(SDValue Op,
702                                               SelectionDAG &DAG) const {
703   SDNode *Node = Op.getNode();
704   MachineFunction &MF = DAG.getMachineFunction();
705   HexagonMachineFunctionInfo *FuncInfo =
706     MF.getInfo<HexagonMachineFunctionInfo>();
707   switch (Node->getOpcode()) {
708     case ISD::INLINEASM: {
709       unsigned NumOps = Node->getNumOperands();
710       if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
711         --NumOps;  // Ignore the flag operand.
712 
713       for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
714         if (FuncInfo->hasClobberLR())
715           break;
716         unsigned Flags =
717           cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
718         unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
719         ++i;  // Skip the ID value.
720 
721         switch (InlineAsm::getKind(Flags)) {
722         default: llvm_unreachable("Bad flags!");
723           case InlineAsm::Kind_RegDef:
724           case InlineAsm::Kind_RegUse:
725           case InlineAsm::Kind_Imm:
726           case InlineAsm::Kind_Clobber:
727           case InlineAsm::Kind_Mem: {
728             for (; NumVals; --NumVals, ++i) {}
729             break;
730           }
731           case InlineAsm::Kind_RegDefEarlyClobber: {
732             for (; NumVals; --NumVals, ++i) {
733               unsigned Reg =
734                 cast<RegisterSDNode>(Node->getOperand(i))->getReg();
735 
736               // Check it to be lr
737               const HexagonRegisterInfo *QRI = Subtarget->getRegisterInfo();
738               if (Reg == QRI->getRARegister()) {
739                 FuncInfo->setHasClobberLR(true);
740                 break;
741               }
742             }
743             break;
744           }
745         }
746       }
747     }
748   } // Node->getOpcode
749   return Op;
750 }
751 
752 
753 //
754 // Taken from the XCore backend.
755 //
756 SDValue HexagonTargetLowering::
LowerBR_JT(SDValue Op,SelectionDAG & DAG) const757 LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
758 {
759   SDValue Chain = Op.getOperand(0);
760   SDValue Table = Op.getOperand(1);
761   SDValue Index = Op.getOperand(2);
762   SDLoc dl(Op);
763   JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
764   unsigned JTI = JT->getIndex();
765   MachineFunction &MF = DAG.getMachineFunction();
766   const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
767   SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32);
768 
769   // Mark all jump table targets as address taken.
770   const std::vector<MachineJumpTableEntry> &JTE = MJTI->getJumpTables();
771   const std::vector<MachineBasicBlock*> &JTBBs = JTE[JTI].MBBs;
772   for (unsigned i = 0, e = JTBBs.size(); i != e; ++i) {
773     MachineBasicBlock *MBB = JTBBs[i];
774     MBB->setHasAddressTaken();
775     // This line is needed to set the hasAddressTaken flag on the BasicBlock
776     // object.
777     BlockAddress::get(const_cast<BasicBlock *>(MBB->getBasicBlock()));
778   }
779 
780   SDValue JumpTableBase = DAG.getNode(HexagonISD::JT, dl,
781                                       getPointerTy(), TargetJT);
782   SDValue ShiftIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index,
783                                    DAG.getConstant(2, MVT::i32));
784   SDValue JTAddress = DAG.getNode(ISD::ADD, dl, MVT::i32, JumpTableBase,
785                                   ShiftIndex);
786   SDValue LoadTarget = DAG.getLoad(MVT::i32, dl, Chain, JTAddress,
787                                    MachinePointerInfo(), false, false, false,
788                                    0);
789   return DAG.getNode(HexagonISD::BR_JT, dl, MVT::Other, Chain, LoadTarget);
790 }
791 
792 
793 SDValue
LowerDYNAMIC_STACKALLOC(SDValue Op,SelectionDAG & DAG) const794 HexagonTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
795                                                SelectionDAG &DAG) const {
796   SDValue Chain = Op.getOperand(0);
797   SDValue Size = Op.getOperand(1);
798   SDLoc dl(Op);
799 
800   unsigned SPReg = getStackPointerRegisterToSaveRestore();
801 
802   // Get a reference to the stack pointer.
803   SDValue StackPointer = DAG.getCopyFromReg(Chain, dl, SPReg, MVT::i32);
804 
805   // Subtract the dynamic size from the actual stack size to
806   // obtain the new stack size.
807   SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, StackPointer, Size);
808 
809   //
810   // For Hexagon, the outgoing memory arguments area should be on top of the
811   // alloca area on the stack i.e., the outgoing memory arguments should be
812   // at a lower address than the alloca area. Move the alloca area down the
813   // stack by adding back the space reserved for outgoing arguments to SP
814   // here.
815   //
816   // We do not know what the size of the outgoing args is at this point.
817   // So, we add a pseudo instruction ADJDYNALLOC that will adjust the
818   // stack pointer. We patch this instruction with the correct, known
819   // offset in emitPrologue().
820   //
821   // Use a placeholder immediate (zero) for now. This will be patched up
822   // by emitPrologue().
823   SDValue ArgAdjust = DAG.getNode(HexagonISD::ADJDYNALLOC, dl,
824                                   MVT::i32,
825                                   Sub,
826                                   DAG.getConstant(0, MVT::i32));
827 
828   // The Sub result contains the new stack start address, so it
829   // must be placed in the stack pointer register.
830   const HexagonRegisterInfo *QRI = Subtarget->getRegisterInfo();
831   SDValue CopyChain = DAG.getCopyToReg(Chain, dl, QRI->getStackRegister(), Sub);
832 
833   SDValue Ops[2] = { ArgAdjust, CopyChain };
834   return DAG.getMergeValues(Ops, dl);
835 }
836 
837 SDValue
LowerFormalArguments(SDValue Chain,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,SDLoc dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const838 HexagonTargetLowering::LowerFormalArguments(SDValue Chain,
839                                             CallingConv::ID CallConv,
840                                             bool isVarArg,
841                                             const
842                                             SmallVectorImpl<ISD::InputArg> &Ins,
843                                             SDLoc dl, SelectionDAG &DAG,
844                                             SmallVectorImpl<SDValue> &InVals)
845 const {
846 
847   MachineFunction &MF = DAG.getMachineFunction();
848   MachineFrameInfo *MFI = MF.getFrameInfo();
849   MachineRegisterInfo &RegInfo = MF.getRegInfo();
850   HexagonMachineFunctionInfo *FuncInfo =
851     MF.getInfo<HexagonMachineFunctionInfo>();
852 
853 
854   // Assign locations to all of the incoming arguments.
855   SmallVector<CCValAssign, 16> ArgLocs;
856   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
857                  *DAG.getContext());
858 
859   CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon);
860 
861   // For LLVM, in the case when returning a struct by value (>8byte),
862   // the first argument is a pointer that points to the location on caller's
863   // stack where the return value will be stored. For Hexagon, the location on
864   // caller's stack is passed only when the struct size is smaller than (and
865   // equal to) 8 bytes. If not, no address will be passed into callee and
866   // callee return the result direclty through R0/R1.
867 
868   SmallVector<SDValue, 4> MemOps;
869 
870   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
871     CCValAssign &VA = ArgLocs[i];
872     ISD::ArgFlagsTy Flags = Ins[i].Flags;
873     unsigned ObjSize;
874     unsigned StackLocation;
875     int FI;
876 
877     if (   (VA.isRegLoc() && !Flags.isByVal())
878         || (VA.isRegLoc() && Flags.isByVal() && Flags.getByValSize() > 8)) {
879       // Arguments passed in registers
880       // 1. int, long long, ptr args that get allocated in register.
881       // 2. Large struct that gets an register to put its address in.
882       EVT RegVT = VA.getLocVT();
883       if (RegVT == MVT::i8 || RegVT == MVT::i16 ||
884           RegVT == MVT::i32 || RegVT == MVT::f32) {
885         unsigned VReg =
886           RegInfo.createVirtualRegister(&Hexagon::IntRegsRegClass);
887         RegInfo.addLiveIn(VA.getLocReg(), VReg);
888         InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
889       } else if (RegVT == MVT::i64 || RegVT == MVT::f64) {
890         unsigned VReg =
891           RegInfo.createVirtualRegister(&Hexagon::DoubleRegsRegClass);
892         RegInfo.addLiveIn(VA.getLocReg(), VReg);
893         InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
894       } else {
895         assert (0);
896       }
897     } else if (VA.isRegLoc() && Flags.isByVal() && Flags.getByValSize() <= 8) {
898       assert (0 && "ByValSize must be bigger than 8 bytes");
899     } else {
900       // Sanity check.
901       assert(VA.isMemLoc());
902 
903       if (Flags.isByVal()) {
904         // If it's a byval parameter, then we need to compute the
905         // "real" size, not the size of the pointer.
906         ObjSize = Flags.getByValSize();
907       } else {
908         ObjSize = VA.getLocVT().getStoreSizeInBits() >> 3;
909       }
910 
911       StackLocation = HEXAGON_LRFP_SIZE + VA.getLocMemOffset();
912       // Create the frame index object for this incoming parameter...
913       FI = MFI->CreateFixedObject(ObjSize, StackLocation, true);
914 
915       // Create the SelectionDAG nodes cordl, responding to a load
916       // from this parameter.
917       SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
918 
919       if (Flags.isByVal()) {
920         // If it's a pass-by-value aggregate, then do not dereference the stack
921         // location. Instead, we should generate a reference to the stack
922         // location.
923         InVals.push_back(FIN);
924       } else {
925         InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
926                                      MachinePointerInfo(), false, false,
927                                      false, 0));
928       }
929     }
930   }
931 
932   if (!MemOps.empty())
933     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
934 
935   if (isVarArg) {
936     // This will point to the next argument passed via stack.
937     int FrameIndex = MFI->CreateFixedObject(Hexagon_PointerSize,
938                                             HEXAGON_LRFP_SIZE +
939                                             CCInfo.getNextStackOffset(),
940                                             true);
941     FuncInfo->setVarArgsFrameIndex(FrameIndex);
942   }
943 
944   return Chain;
945 }
946 
947 SDValue
LowerVASTART(SDValue Op,SelectionDAG & DAG) const948 HexagonTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
949   // VASTART stores the address of the VarArgsFrameIndex slot into the
950   // memory location argument.
951   MachineFunction &MF = DAG.getMachineFunction();
952   HexagonMachineFunctionInfo *QFI = MF.getInfo<HexagonMachineFunctionInfo>();
953   SDValue Addr = DAG.getFrameIndex(QFI->getVarArgsFrameIndex(), MVT::i32);
954   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
955   return DAG.getStore(Op.getOperand(0), SDLoc(Op), Addr,
956                       Op.getOperand(1), MachinePointerInfo(SV), false,
957                       false, 0);
958 }
959 
960 // Creates a SPLAT instruction for a constant value VAL.
createSplat(SelectionDAG & DAG,SDLoc dl,EVT VT,SDValue Val)961 static SDValue createSplat(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue Val) {
962   if (VT.getSimpleVT() == MVT::v4i8)
963     return DAG.getNode(HexagonISD::VSPLATB, dl, VT, Val);
964 
965   if (VT.getSimpleVT() == MVT::v4i16)
966     return DAG.getNode(HexagonISD::VSPLATH, dl, VT, Val);
967 
968   return SDValue();
969 }
970 
isSExtFree(SDValue N)971 static bool isSExtFree(SDValue N) {
972   // A sign-extend of a truncate of a sign-extend is free.
973   if (N.getOpcode() == ISD::TRUNCATE &&
974       N.getOperand(0).getOpcode() == ISD::AssertSext)
975     return true;
976   // We have sign-extended loads.
977   if (N.getOpcode() == ISD::LOAD)
978     return true;
979   return false;
980 }
981 
LowerCTPOP(SDValue Op,SelectionDAG & DAG) const982 SDValue HexagonTargetLowering::LowerCTPOP(SDValue Op, SelectionDAG &DAG) const {
983   SDLoc dl(Op);
984   SDValue InpVal = Op.getOperand(0);
985   if (isa<ConstantSDNode>(InpVal)) {
986     uint64_t V = cast<ConstantSDNode>(InpVal)->getZExtValue();
987     return DAG.getTargetConstant(countPopulation(V), MVT::i64);
988   }
989   SDValue PopOut = DAG.getNode(HexagonISD::POPCOUNT, dl, MVT::i32, InpVal);
990   return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, PopOut);
991 }
992 
LowerSETCC(SDValue Op,SelectionDAG & DAG) const993 SDValue HexagonTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
994   SDLoc dl(Op);
995 
996   SDValue LHS = Op.getOperand(0);
997   SDValue RHS = Op.getOperand(1);
998   SDValue Cmp = Op.getOperand(2);
999   ISD::CondCode CC = cast<CondCodeSDNode>(Cmp)->get();
1000 
1001   EVT VT = Op.getValueType();
1002   EVT LHSVT = LHS.getValueType();
1003   EVT RHSVT = RHS.getValueType();
1004 
1005   if (LHSVT == MVT::v2i16) {
1006     assert(ISD::isSignedIntSetCC(CC) || ISD::isUnsignedIntSetCC(CC));
1007     unsigned ExtOpc = ISD::isSignedIntSetCC(CC) ? ISD::SIGN_EXTEND
1008                                                 : ISD::ZERO_EXTEND;
1009     SDValue LX = DAG.getNode(ExtOpc, dl, MVT::v2i32, LHS);
1010     SDValue RX = DAG.getNode(ExtOpc, dl, MVT::v2i32, RHS);
1011     SDValue SC = DAG.getNode(ISD::SETCC, dl, MVT::v2i1, LX, RX, Cmp);
1012     return SC;
1013   }
1014 
1015   // Treat all other vector types as legal.
1016   if (VT.isVector())
1017     return Op;
1018 
1019   // Equals and not equals should use sign-extend, not zero-extend, since
1020   // we can represent small negative values in the compare instructions.
1021   // The LLVM default is to use zero-extend arbitrarily in these cases.
1022   if ((CC == ISD::SETEQ || CC == ISD::SETNE) &&
1023       (RHSVT == MVT::i8 || RHSVT == MVT::i16) &&
1024       (LHSVT == MVT::i8 || LHSVT == MVT::i16)) {
1025     ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS);
1026     if (C && C->getAPIntValue().isNegative()) {
1027       LHS = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, LHS);
1028       RHS = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, RHS);
1029       return DAG.getNode(ISD::SETCC, dl, Op.getValueType(),
1030                          LHS, RHS, Op.getOperand(2));
1031     }
1032     if (isSExtFree(LHS) || isSExtFree(RHS)) {
1033       LHS = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, LHS);
1034       RHS = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, RHS);
1035       return DAG.getNode(ISD::SETCC, dl, Op.getValueType(),
1036                          LHS, RHS, Op.getOperand(2));
1037     }
1038   }
1039   return SDValue();
1040 }
1041 
LowerVSELECT(SDValue Op,SelectionDAG & DAG) const1042 SDValue HexagonTargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG)
1043       const {
1044   SDValue PredOp = Op.getOperand(0);
1045   SDValue Op1 = Op.getOperand(1), Op2 = Op.getOperand(2);
1046   EVT OpVT = Op1.getValueType();
1047   SDLoc DL(Op);
1048 
1049   if (OpVT == MVT::v2i16) {
1050     SDValue X1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v2i32, Op1);
1051     SDValue X2 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v2i32, Op2);
1052     SDValue SL = DAG.getNode(ISD::VSELECT, DL, MVT::v2i32, PredOp, X1, X2);
1053     SDValue TR = DAG.getNode(ISD::TRUNCATE, DL, MVT::v2i16, SL);
1054     return TR;
1055   }
1056 
1057   return SDValue();
1058 }
1059 
1060 // Handle only specific vector loads.
LowerLOAD(SDValue Op,SelectionDAG & DAG) const1061 SDValue HexagonTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
1062   EVT VT = Op.getValueType();
1063   SDLoc DL(Op);
1064   LoadSDNode *LoadNode = cast<LoadSDNode>(Op);
1065   SDValue Chain = LoadNode->getChain();
1066   SDValue Ptr = Op.getOperand(1);
1067   SDValue LoweredLoad;
1068   SDValue Result;
1069   SDValue Base = LoadNode->getBasePtr();
1070   ISD::LoadExtType Ext = LoadNode->getExtensionType();
1071   unsigned Alignment = LoadNode->getAlignment();
1072   SDValue LoadChain;
1073 
1074   if(Ext == ISD::NON_EXTLOAD)
1075     Ext = ISD::ZEXTLOAD;
1076 
1077   if (VT == MVT::v4i16) {
1078     if (Alignment == 2) {
1079       SDValue Loads[4];
1080       // Base load.
1081       Loads[0] = DAG.getExtLoad(Ext, DL, MVT::i32, Chain, Base,
1082                                 LoadNode->getPointerInfo(), MVT::i16,
1083                                 LoadNode->isVolatile(),
1084                                 LoadNode->isNonTemporal(),
1085                                 LoadNode->isInvariant(),
1086                                 Alignment);
1087       // Base+2 load.
1088       SDValue Increment = DAG.getConstant(2, MVT::i32);
1089       Ptr = DAG.getNode(ISD::ADD, DL, Base.getValueType(), Base, Increment);
1090       Loads[1] = DAG.getExtLoad(Ext, DL, MVT::i32, Chain, Ptr,
1091                                 LoadNode->getPointerInfo(), MVT::i16,
1092                                 LoadNode->isVolatile(),
1093                                 LoadNode->isNonTemporal(),
1094                                 LoadNode->isInvariant(),
1095                                 Alignment);
1096       // SHL 16, then OR base and base+2.
1097       SDValue ShiftAmount = DAG.getConstant(16, MVT::i32);
1098       SDValue Tmp1 = DAG.getNode(ISD::SHL, DL, MVT::i32, Loads[1], ShiftAmount);
1099       SDValue Tmp2 = DAG.getNode(ISD::OR, DL, MVT::i32, Tmp1, Loads[0]);
1100       // Base + 4.
1101       Increment = DAG.getConstant(4, MVT::i32);
1102       Ptr = DAG.getNode(ISD::ADD, DL, Base.getValueType(), Base, Increment);
1103       Loads[2] = DAG.getExtLoad(Ext, DL, MVT::i32, Chain, Ptr,
1104                                 LoadNode->getPointerInfo(), MVT::i16,
1105                                 LoadNode->isVolatile(),
1106                                 LoadNode->isNonTemporal(),
1107                                 LoadNode->isInvariant(),
1108                                 Alignment);
1109       // Base + 6.
1110       Increment = DAG.getConstant(6, MVT::i32);
1111       Ptr = DAG.getNode(ISD::ADD, DL, Base.getValueType(), Base, Increment);
1112       Loads[3] = DAG.getExtLoad(Ext, DL, MVT::i32, Chain, Ptr,
1113                                 LoadNode->getPointerInfo(), MVT::i16,
1114                                 LoadNode->isVolatile(),
1115                                 LoadNode->isNonTemporal(),
1116                                 LoadNode->isInvariant(),
1117                                 Alignment);
1118       // SHL 16, then OR base+4 and base+6.
1119       Tmp1 = DAG.getNode(ISD::SHL, DL, MVT::i32, Loads[3], ShiftAmount);
1120       SDValue Tmp4 = DAG.getNode(ISD::OR, DL, MVT::i32, Tmp1, Loads[2]);
1121       // Combine to i64. This could be optimised out later if we can
1122       // affect reg allocation of this code.
1123       Result = DAG.getNode(HexagonISD::COMBINE, DL, MVT::i64, Tmp4, Tmp2);
1124       LoadChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
1125                               Loads[0].getValue(1), Loads[1].getValue(1),
1126                               Loads[2].getValue(1), Loads[3].getValue(1));
1127     } else {
1128       // Perform default type expansion.
1129       Result = DAG.getLoad(MVT::i64, DL, Chain, Ptr, LoadNode->getPointerInfo(),
1130                            LoadNode->isVolatile(), LoadNode->isNonTemporal(),
1131                           LoadNode->isInvariant(), LoadNode->getAlignment());
1132       LoadChain = Result.getValue(1);
1133     }
1134   } else
1135     llvm_unreachable("Custom lowering unsupported load");
1136 
1137   Result = DAG.getNode(ISD::BITCAST, DL, VT, Result);
1138   // Since we pretend to lower a load, we need the original chain
1139   // info attached to the result.
1140   SDValue Ops[] = { Result, LoadChain };
1141 
1142   return DAG.getMergeValues(Ops, DL);
1143 }
1144 
1145 
1146 SDValue
LowerConstantPool(SDValue Op,SelectionDAG & DAG) const1147 HexagonTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
1148   EVT ValTy = Op.getValueType();
1149   SDLoc dl(Op);
1150   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
1151   SDValue Res;
1152   if (CP->isMachineConstantPoolEntry())
1153     Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), ValTy,
1154                                     CP->getAlignment());
1155   else
1156     Res = DAG.getTargetConstantPool(CP->getConstVal(), ValTy,
1157                                     CP->getAlignment());
1158   return DAG.getNode(HexagonISD::CONST32, dl, ValTy, Res);
1159 }
1160 
1161 SDValue
LowerRETURNADDR(SDValue Op,SelectionDAG & DAG) const1162 HexagonTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
1163   const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
1164   MachineFunction &MF = DAG.getMachineFunction();
1165   MachineFrameInfo *MFI = MF.getFrameInfo();
1166   MFI->setReturnAddressIsTaken(true);
1167 
1168   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
1169     return SDValue();
1170 
1171   EVT VT = Op.getValueType();
1172   SDLoc dl(Op);
1173   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1174   if (Depth) {
1175     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
1176     SDValue Offset = DAG.getConstant(4, MVT::i32);
1177     return DAG.getLoad(VT, dl, DAG.getEntryNode(),
1178                        DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
1179                        MachinePointerInfo(), false, false, false, 0);
1180   }
1181 
1182   // Return LR, which contains the return address. Mark it an implicit live-in.
1183   unsigned Reg = MF.addLiveIn(TRI->getRARegister(), getRegClassFor(MVT::i32));
1184   return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
1185 }
1186 
1187 SDValue
LowerFRAMEADDR(SDValue Op,SelectionDAG & DAG) const1188 HexagonTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
1189   const HexagonRegisterInfo *TRI = Subtarget->getRegisterInfo();
1190   MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
1191   MFI->setFrameAddressIsTaken(true);
1192 
1193   EVT VT = Op.getValueType();
1194   SDLoc dl(Op);
1195   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1196   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
1197                                          TRI->getFrameRegister(), VT);
1198   while (Depth--)
1199     FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
1200                             MachinePointerInfo(),
1201                             false, false, false, 0);
1202   return FrameAddr;
1203 }
1204 
LowerATOMIC_FENCE(SDValue Op,SelectionDAG & DAG) const1205 SDValue HexagonTargetLowering::LowerATOMIC_FENCE(SDValue Op,
1206                                                  SelectionDAG& DAG) const {
1207   SDLoc dl(Op);
1208   return DAG.getNode(HexagonISD::BARRIER, dl, MVT::Other, Op.getOperand(0));
1209 }
1210 
1211 
LowerGLOBALADDRESS(SDValue Op,SelectionDAG & DAG) const1212 SDValue HexagonTargetLowering::LowerGLOBALADDRESS(SDValue Op,
1213                                                   SelectionDAG &DAG) const {
1214   SDValue Result;
1215   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
1216   int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
1217   SDLoc dl(Op);
1218   Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
1219 
1220   const HexagonTargetObjectFile *TLOF =
1221       static_cast<const HexagonTargetObjectFile *>(
1222           getTargetMachine().getObjFileLowering());
1223   if (TLOF->IsGlobalInSmallSection(GV, getTargetMachine())) {
1224     return DAG.getNode(HexagonISD::CONST32_GP, dl, getPointerTy(), Result);
1225   }
1226 
1227   return DAG.getNode(HexagonISD::CONST32, dl, getPointerTy(), Result);
1228 }
1229 
1230 // Specifies that for loads and stores VT can be promoted to PromotedLdStVT.
promoteLdStType(EVT VT,EVT PromotedLdStVT)1231 void HexagonTargetLowering::promoteLdStType(EVT VT, EVT PromotedLdStVT) {
1232   if (VT != PromotedLdStVT) {
1233     setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote);
1234     AddPromotedToType(ISD::LOAD, VT.getSimpleVT(),
1235                       PromotedLdStVT.getSimpleVT());
1236 
1237     setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote);
1238     AddPromotedToType(ISD::STORE, VT.getSimpleVT(),
1239                       PromotedLdStVT.getSimpleVT());
1240   }
1241 }
1242 
1243 SDValue
LowerBlockAddress(SDValue Op,SelectionDAG & DAG) const1244 HexagonTargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
1245   const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
1246   SDValue BA_SD =  DAG.getTargetBlockAddress(BA, MVT::i32);
1247   SDLoc dl(Op);
1248   return DAG.getNode(HexagonISD::CONST32_GP, dl, getPointerTy(), BA_SD);
1249 }
1250 
1251 //===----------------------------------------------------------------------===//
1252 // TargetLowering Implementation
1253 //===----------------------------------------------------------------------===//
1254 
HexagonTargetLowering(const TargetMachine & TM,const HexagonSubtarget & STI)1255 HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
1256                                              const HexagonSubtarget &STI)
1257     : TargetLowering(TM), Subtarget(&STI) {
1258 
1259   // Set up the register classes.
1260   addRegisterClass(MVT::v2i1, &Hexagon::PredRegsRegClass);  // bbbbaaaa
1261   addRegisterClass(MVT::v4i1, &Hexagon::PredRegsRegClass);  // ddccbbaa
1262   addRegisterClass(MVT::v8i1, &Hexagon::PredRegsRegClass);  // hgfedcba
1263   addRegisterClass(MVT::i32, &Hexagon::IntRegsRegClass);
1264   addRegisterClass(MVT::v4i8, &Hexagon::IntRegsRegClass);
1265   addRegisterClass(MVT::v2i16, &Hexagon::IntRegsRegClass);
1266   promoteLdStType(MVT::v4i8, MVT::i32);
1267   promoteLdStType(MVT::v2i16, MVT::i32);
1268 
1269   if (Subtarget->hasV5TOps()) {
1270     addRegisterClass(MVT::f32, &Hexagon::IntRegsRegClass);
1271     addRegisterClass(MVT::f64, &Hexagon::DoubleRegsRegClass);
1272   }
1273 
1274   addRegisterClass(MVT::i64, &Hexagon::DoubleRegsRegClass);
1275   addRegisterClass(MVT::v8i8, &Hexagon::DoubleRegsRegClass);
1276   addRegisterClass(MVT::v4i16, &Hexagon::DoubleRegsRegClass);
1277   addRegisterClass(MVT::v2i32, &Hexagon::DoubleRegsRegClass);
1278   promoteLdStType(MVT::v8i8, MVT::i64);
1279 
1280   // Custom lower v4i16 load only. Let v4i16 store to be
1281   // promoted for now.
1282   setOperationAction(ISD::LOAD, MVT::v4i16, Custom);
1283   AddPromotedToType(ISD::LOAD, MVT::v4i16, MVT::i64);
1284   setOperationAction(ISD::STORE, MVT::v4i16, Promote);
1285   AddPromotedToType(ISD::STORE, MVT::v4i16, MVT::i64);
1286   promoteLdStType(MVT::v2i32, MVT::i64);
1287 
1288   for (unsigned i = (unsigned) MVT::FIRST_VECTOR_VALUETYPE;
1289        i <= (unsigned) MVT::LAST_VECTOR_VALUETYPE; ++i) {
1290     MVT::SimpleValueType VT = (MVT::SimpleValueType) i;
1291 
1292     // Hexagon does not have support for the following operations,
1293     // so they need to be expanded.
1294     setOperationAction(ISD::SELECT, VT, Expand);
1295     setOperationAction(ISD::SDIV, VT, Expand);
1296     setOperationAction(ISD::SREM, VT, Expand);
1297     setOperationAction(ISD::UDIV, VT, Expand);
1298     setOperationAction(ISD::UREM, VT, Expand);
1299     setOperationAction(ISD::ROTL, VT, Expand);
1300     setOperationAction(ISD::ROTR, VT, Expand);
1301     setOperationAction(ISD::FDIV, VT, Expand);
1302     setOperationAction(ISD::FNEG, VT, Expand);
1303     setOperationAction(ISD::UMUL_LOHI, VT, Expand);
1304     setOperationAction(ISD::SMUL_LOHI, VT, Expand);
1305     setOperationAction(ISD::UDIVREM, VT, Expand);
1306     setOperationAction(ISD::SDIVREM, VT, Expand);
1307     setOperationAction(ISD::FPOW, VT, Expand);
1308     setOperationAction(ISD::CTPOP, VT, Expand);
1309     setOperationAction(ISD::CTLZ, VT, Expand);
1310     setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
1311     setOperationAction(ISD::CTTZ, VT, Expand);
1312     setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
1313 
1314     // Expand all any extend loads.
1315     for (unsigned j = (unsigned) MVT::FIRST_VECTOR_VALUETYPE;
1316                   j <= (unsigned) MVT::LAST_VECTOR_VALUETYPE; ++j)
1317       setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType) j, VT, Expand);
1318 
1319     // Expand all trunc stores.
1320     for (unsigned TargetVT = (unsigned) MVT::FIRST_VECTOR_VALUETYPE;
1321          TargetVT <= (unsigned) MVT::LAST_VECTOR_VALUETYPE; ++TargetVT)
1322       setTruncStoreAction(VT, (MVT::SimpleValueType) TargetVT, Expand);
1323 
1324     setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
1325     setOperationAction(ISD::ConstantPool, VT, Expand);
1326     setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
1327     setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
1328     setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
1329     setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
1330     setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Expand);
1331     setOperationAction(ISD::INSERT_SUBVECTOR, VT, Expand);
1332     setOperationAction(ISD::CONCAT_VECTORS, VT, Expand);
1333     setOperationAction(ISD::SRA, VT, Custom);
1334     setOperationAction(ISD::SHL, VT, Custom);
1335     setOperationAction(ISD::SRL, VT, Custom);
1336 
1337     if (!isTypeLegal(VT))
1338       continue;
1339 
1340     setOperationAction(ISD::ADD, VT, Legal);
1341     setOperationAction(ISD::SUB, VT, Legal);
1342     setOperationAction(ISD::MUL, VT, Legal);
1343 
1344     setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1345     setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1346     setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1347     setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1348     setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1349     setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1350   }
1351 
1352   setOperationAction(ISD::SETCC, MVT::v2i16, Custom);
1353   setOperationAction(ISD::VSELECT, MVT::v2i16, Custom);
1354   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom);
1355   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom);
1356 
1357   setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
1358 
1359   addRegisterClass(MVT::i1, &Hexagon::PredRegsRegClass);
1360 
1361   computeRegisterProperties(Subtarget->getRegisterInfo());
1362 
1363   // Align loop entry
1364   setPrefLoopAlignment(4);
1365 
1366   // Limits for inline expansion of memcpy/memmove
1367   MaxStoresPerMemcpy = 6;
1368   MaxStoresPerMemmove = 6;
1369 
1370   //
1371   // Library calls for unsupported operations
1372   //
1373 
1374   setLibcallName(RTLIB::SINTTOFP_I128_F64, "__hexagon_floattidf");
1375   setLibcallName(RTLIB::SINTTOFP_I128_F32, "__hexagon_floattisf");
1376 
1377   setLibcallName(RTLIB::FPTOUINT_F32_I128, "__hexagon_fixunssfti");
1378   setLibcallName(RTLIB::FPTOUINT_F64_I128, "__hexagon_fixunsdfti");
1379 
1380   setLibcallName(RTLIB::FPTOSINT_F32_I128, "__hexagon_fixsfti");
1381   setLibcallName(RTLIB::FPTOSINT_F64_I128, "__hexagon_fixdfti");
1382 
1383   setLibcallName(RTLIB::SDIV_I32, "__hexagon_divsi3");
1384   setOperationAction(ISD::SDIV, MVT::i32, Expand);
1385   setLibcallName(RTLIB::SREM_I32, "__hexagon_umodsi3");
1386   setOperationAction(ISD::SREM, MVT::i32, Expand);
1387 
1388   setLibcallName(RTLIB::SDIV_I64, "__hexagon_divdi3");
1389   setOperationAction(ISD::SDIV, MVT::i64, Expand);
1390   setLibcallName(RTLIB::SREM_I64, "__hexagon_moddi3");
1391   setOperationAction(ISD::SREM, MVT::i64, Expand);
1392 
1393   setLibcallName(RTLIB::UDIV_I32, "__hexagon_udivsi3");
1394   setOperationAction(ISD::UDIV, MVT::i32, Expand);
1395 
1396   setLibcallName(RTLIB::UDIV_I64, "__hexagon_udivdi3");
1397   setOperationAction(ISD::UDIV, MVT::i64, Expand);
1398 
1399   setLibcallName(RTLIB::UREM_I32, "__hexagon_umodsi3");
1400   setOperationAction(ISD::UREM, MVT::i32, Expand);
1401 
1402   setLibcallName(RTLIB::UREM_I64, "__hexagon_umoddi3");
1403   setOperationAction(ISD::UREM, MVT::i64, Expand);
1404 
1405   setLibcallName(RTLIB::DIV_F32, "__hexagon_divsf3");
1406   setOperationAction(ISD::FDIV, MVT::f32, Expand);
1407 
1408   setLibcallName(RTLIB::DIV_F64, "__hexagon_divdf3");
1409   setOperationAction(ISD::FDIV, MVT::f64, Expand);
1410 
1411   setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3");
1412   setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3");
1413   setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3");
1414 
1415   setOperationAction(ISD::FSQRT, MVT::f32, Expand);
1416   setOperationAction(ISD::FSQRT, MVT::f64, Expand);
1417   setOperationAction(ISD::FSIN, MVT::f32, Expand);
1418   setOperationAction(ISD::FSIN, MVT::f64, Expand);
1419 
1420   if (Subtarget->hasV5TOps()) {
1421     // Hexagon V5 Support.
1422     setOperationAction(ISD::FADD, MVT::f32, Legal);
1423     setOperationAction(ISD::FADD, MVT::f64, Expand);
1424     setOperationAction(ISD::FSUB, MVT::f32, Legal);
1425     setOperationAction(ISD::FSUB, MVT::f64, Expand);
1426     setOperationAction(ISD::FMUL, MVT::f64, Expand);
1427     setOperationAction(ISD::FP_EXTEND, MVT::f32, Legal);
1428     setCondCodeAction(ISD::SETOEQ, MVT::f32, Legal);
1429     setCondCodeAction(ISD::SETOEQ, MVT::f64, Legal);
1430     setCondCodeAction(ISD::SETUEQ, MVT::f32, Legal);
1431     setCondCodeAction(ISD::SETUEQ, MVT::f64, Legal);
1432 
1433     setCondCodeAction(ISD::SETOGE, MVT::f32, Legal);
1434     setCondCodeAction(ISD::SETOGE, MVT::f64, Legal);
1435     setCondCodeAction(ISD::SETUGE, MVT::f32, Legal);
1436     setCondCodeAction(ISD::SETUGE, MVT::f64, Legal);
1437 
1438     setCondCodeAction(ISD::SETOGT, MVT::f32, Legal);
1439     setCondCodeAction(ISD::SETOGT, MVT::f64, Legal);
1440     setCondCodeAction(ISD::SETUGT, MVT::f32, Legal);
1441     setCondCodeAction(ISD::SETUGT, MVT::f64, Legal);
1442 
1443     setCondCodeAction(ISD::SETOLE, MVT::f32, Legal);
1444     setCondCodeAction(ISD::SETOLE, MVT::f64, Legal);
1445     setCondCodeAction(ISD::SETOLT, MVT::f32, Legal);
1446     setCondCodeAction(ISD::SETOLT, MVT::f64, Legal);
1447 
1448     setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
1449     setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
1450 
1451     setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote);
1452     setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote);
1453     setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
1454     setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
1455 
1456     setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
1457     setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
1458     setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote);
1459     setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
1460 
1461     setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
1462     setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
1463     setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
1464     setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
1465 
1466     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
1467     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
1468     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
1469     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
1470 
1471     setOperationAction(ISD::FP_TO_UINT, MVT::i64, Legal);
1472     setOperationAction(ISD::FP_TO_SINT, MVT::i64, Legal);
1473     setOperationAction(ISD::UINT_TO_FP, MVT::i64, Legal);
1474     setOperationAction(ISD::SINT_TO_FP, MVT::i64, Legal);
1475 
1476     setOperationAction(ISD::FABS, MVT::f32, Legal);
1477     setOperationAction(ISD::FABS, MVT::f64, Expand);
1478 
1479     setOperationAction(ISD::FNEG, MVT::f32, Legal);
1480     setOperationAction(ISD::FNEG, MVT::f64, Expand);
1481   } else {
1482 
1483     // Expand fp<->uint.
1484     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Expand);
1485     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
1486 
1487     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
1488     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
1489 
1490     setLibcallName(RTLIB::SINTTOFP_I64_F32, "__hexagon_floatdisf");
1491     setLibcallName(RTLIB::UINTTOFP_I64_F32, "__hexagon_floatundisf");
1492 
1493     setLibcallName(RTLIB::UINTTOFP_I32_F32, "__hexagon_floatunsisf");
1494     setLibcallName(RTLIB::SINTTOFP_I32_F32, "__hexagon_floatsisf");
1495 
1496     setLibcallName(RTLIB::SINTTOFP_I64_F64, "__hexagon_floatdidf");
1497     setLibcallName(RTLIB::UINTTOFP_I64_F64, "__hexagon_floatundidf");
1498 
1499     setLibcallName(RTLIB::UINTTOFP_I32_F64, "__hexagon_floatunsidf");
1500     setLibcallName(RTLIB::SINTTOFP_I32_F64, "__hexagon_floatsidf");
1501 
1502     setLibcallName(RTLIB::FPTOUINT_F32_I32, "__hexagon_fixunssfsi");
1503     setLibcallName(RTLIB::FPTOUINT_F32_I64, "__hexagon_fixunssfdi");
1504 
1505     setLibcallName(RTLIB::FPTOSINT_F64_I64, "__hexagon_fixdfdi");
1506     setLibcallName(RTLIB::FPTOSINT_F32_I64, "__hexagon_fixsfdi");
1507 
1508     setLibcallName(RTLIB::FPTOUINT_F64_I32, "__hexagon_fixunsdfsi");
1509     setLibcallName(RTLIB::FPTOUINT_F64_I64, "__hexagon_fixunsdfdi");
1510 
1511 
1512     setLibcallName(RTLIB::ADD_F32, "__hexagon_addsf3");
1513     setOperationAction(ISD::FADD, MVT::f32, Expand);
1514     setOperationAction(ISD::FADD, MVT::f64, Expand);
1515 
1516     setLibcallName(RTLIB::SUB_F32, "__hexagon_subsf3");
1517     setOperationAction(ISD::FSUB, MVT::f32, Expand);
1518     setOperationAction(ISD::FSUB, MVT::f64, Expand);
1519 
1520     setLibcallName(RTLIB::FPEXT_F32_F64, "__hexagon_extendsfdf2");
1521     setOperationAction(ISD::FP_EXTEND, MVT::f32, Expand);
1522 
1523     setLibcallName(RTLIB::OEQ_F32, "__hexagon_eqsf2");
1524     setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
1525 
1526     setLibcallName(RTLIB::OEQ_F64, "__hexagon_eqdf2");
1527     setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
1528 
1529     setLibcallName(RTLIB::OGE_F32, "__hexagon_gesf2");
1530     setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
1531 
1532     setLibcallName(RTLIB::OGE_F64, "__hexagon_gedf2");
1533     setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
1534 
1535     setLibcallName(RTLIB::OGT_F32, "__hexagon_gtsf2");
1536     setCondCodeAction(ISD::SETOGT, MVT::f32, Expand);
1537 
1538     setLibcallName(RTLIB::OGT_F64, "__hexagon_gtdf2");
1539     setCondCodeAction(ISD::SETOGT, MVT::f64, Expand);
1540 
1541     setLibcallName(RTLIB::FPTOSINT_F64_I32, "__hexagon_fixdfsi");
1542     setOperationAction(ISD::FP_TO_SINT, MVT::f64, Expand);
1543 
1544     setLibcallName(RTLIB::FPTOSINT_F32_I32, "__hexagon_fixsfsi");
1545     setOperationAction(ISD::FP_TO_SINT, MVT::f32, Expand);
1546 
1547     setLibcallName(RTLIB::OLE_F64, "__hexagon_ledf2");
1548     setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
1549 
1550     setLibcallName(RTLIB::OLE_F32, "__hexagon_lesf2");
1551     setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
1552 
1553     setLibcallName(RTLIB::OLT_F64, "__hexagon_ltdf2");
1554     setCondCodeAction(ISD::SETOLT, MVT::f64, Expand);
1555 
1556     setLibcallName(RTLIB::OLT_F32, "__hexagon_ltsf2");
1557     setCondCodeAction(ISD::SETOLT, MVT::f32, Expand);
1558 
1559     setOperationAction(ISD::FMUL, MVT::f64, Expand);
1560 
1561     setLibcallName(RTLIB::MUL_F32, "__hexagon_mulsf3");
1562     setOperationAction(ISD::MUL, MVT::f32, Expand);
1563 
1564     setLibcallName(RTLIB::UNE_F64, "__hexagon_nedf2");
1565     setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
1566 
1567     setLibcallName(RTLIB::UNE_F32, "__hexagon_nesf2");
1568 
1569     setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3");
1570     setOperationAction(ISD::SUB, MVT::f64, Expand);
1571 
1572     setLibcallName(RTLIB::SUB_F32, "__hexagon_subsf3");
1573     setOperationAction(ISD::SUB, MVT::f32, Expand);
1574 
1575     setLibcallName(RTLIB::FPROUND_F64_F32, "__hexagon_truncdfsf2");
1576     setOperationAction(ISD::FP_ROUND, MVT::f64, Expand);
1577 
1578     setLibcallName(RTLIB::UO_F64, "__hexagon_unorddf2");
1579     setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
1580 
1581     setLibcallName(RTLIB::O_F64, "__hexagon_unorddf2");
1582     setCondCodeAction(ISD::SETO, MVT::f64, Expand);
1583 
1584     setLibcallName(RTLIB::O_F32, "__hexagon_unordsf2");
1585     setCondCodeAction(ISD::SETO, MVT::f32, Expand);
1586 
1587     setLibcallName(RTLIB::UO_F32, "__hexagon_unordsf2");
1588     setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
1589 
1590     setOperationAction(ISD::FABS, MVT::f32, Expand);
1591     setOperationAction(ISD::FABS, MVT::f64, Expand);
1592     setOperationAction(ISD::FNEG, MVT::f32, Expand);
1593     setOperationAction(ISD::FNEG, MVT::f64, Expand);
1594   }
1595 
1596   setLibcallName(RTLIB::SREM_I32, "__hexagon_modsi3");
1597   setOperationAction(ISD::SREM, MVT::i32, Expand);
1598 
1599   setIndexedLoadAction(ISD::POST_INC, MVT::i8, Legal);
1600   setIndexedLoadAction(ISD::POST_INC, MVT::i16, Legal);
1601   setIndexedLoadAction(ISD::POST_INC, MVT::i32, Legal);
1602   setIndexedLoadAction(ISD::POST_INC, MVT::i64, Legal);
1603 
1604   setIndexedStoreAction(ISD::POST_INC, MVT::i8, Legal);
1605   setIndexedStoreAction(ISD::POST_INC, MVT::i16, Legal);
1606   setIndexedStoreAction(ISD::POST_INC, MVT::i32, Legal);
1607   setIndexedStoreAction(ISD::POST_INC, MVT::i64, Legal);
1608 
1609   setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
1610 
1611   // Turn FP extload into load/fextend.
1612   for (MVT VT : MVT::fp_valuetypes())
1613     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
1614 
1615   // No extending loads from i32.
1616   for (MVT VT : MVT::integer_valuetypes()) {
1617     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i32, Expand);
1618     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand);
1619     setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand);
1620   }
1621 
1622   // Turn FP truncstore into trunc + store.
1623   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1624 
1625   // Custom legalize GlobalAddress nodes into CONST32.
1626   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
1627   setOperationAction(ISD::GlobalAddress, MVT::i8, Custom);
1628   setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
1629   // Truncate action?
1630   setOperationAction(ISD::TRUNCATE, MVT::i64, Expand);
1631 
1632   // Hexagon doesn't have sext_inreg, replace them with shl/sra.
1633   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
1634 
1635   // Hexagon has no REM or DIVREM operations.
1636   setOperationAction(ISD::UREM, MVT::i32, Expand);
1637   setOperationAction(ISD::SREM, MVT::i32, Expand);
1638   setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
1639   setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
1640   setOperationAction(ISD::SREM, MVT::i64, Expand);
1641   setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
1642   setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
1643 
1644   setOperationAction(ISD::BSWAP, MVT::i64, Expand);
1645 
1646   // Lower SELECT_CC to SETCC and SELECT.
1647   setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
1648   setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
1649   setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
1650 
1651   if (Subtarget->hasV5TOps()) {
1652 
1653     // We need to make the operation type of SELECT node to be Custom,
1654     // such that we don't go into the infinite loop of
1655     // select ->  setcc -> select_cc -> select loop.
1656     setOperationAction(ISD::SELECT, MVT::f32, Custom);
1657     setOperationAction(ISD::SELECT, MVT::f64, Custom);
1658 
1659     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
1660     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
1661 
1662   } else {
1663 
1664     // Hexagon has no select or setcc: expand to SELECT_CC.
1665     setOperationAction(ISD::SELECT, MVT::f32, Expand);
1666     setOperationAction(ISD::SELECT, MVT::f64, Expand);
1667   }
1668 
1669   // Hexagon needs to optimize cases with negative constants.
1670   setOperationAction(ISD::SETCC, MVT::i16, Custom);
1671   setOperationAction(ISD::SETCC, MVT::i8, Custom);
1672 
1673   if (EmitJumpTables) {
1674     setOperationAction(ISD::BR_JT, MVT::Other, Custom);
1675   } else {
1676     setOperationAction(ISD::BR_JT, MVT::Other, Expand);
1677   }
1678   // Increase jump tables cutover to 5, was 4.
1679   setMinimumJumpTableEntries(5);
1680 
1681   setOperationAction(ISD::BR_CC, MVT::f32, Expand);
1682   setOperationAction(ISD::BR_CC, MVT::f64, Expand);
1683   setOperationAction(ISD::BR_CC, MVT::i1, Expand);
1684   setOperationAction(ISD::BR_CC, MVT::i32, Expand);
1685   setOperationAction(ISD::BR_CC, MVT::i64, Expand);
1686 
1687   setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
1688 
1689   setOperationAction(ISD::FSIN, MVT::f64, Expand);
1690   setOperationAction(ISD::FCOS, MVT::f64, Expand);
1691   setOperationAction(ISD::FREM, MVT::f64, Expand);
1692   setOperationAction(ISD::FSIN, MVT::f32, Expand);
1693   setOperationAction(ISD::FCOS, MVT::f32, Expand);
1694   setOperationAction(ISD::FREM, MVT::f32, Expand);
1695   setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
1696   setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
1697 
1698   // In V4, we have double word add/sub with carry. The problem with
1699   // modelling this instruction is that it produces 2 results - Rdd and Px.
1700   // To model update of Px, we will have to use Defs[p0..p3] which will
1701   // cause any predicate live range to spill. So, we pretend we dont't
1702   // have these instructions.
1703   setOperationAction(ISD::ADDE, MVT::i8, Expand);
1704   setOperationAction(ISD::ADDE, MVT::i16, Expand);
1705   setOperationAction(ISD::ADDE, MVT::i32, Expand);
1706   setOperationAction(ISD::ADDE, MVT::i64, Expand);
1707   setOperationAction(ISD::SUBE, MVT::i8, Expand);
1708   setOperationAction(ISD::SUBE, MVT::i16, Expand);
1709   setOperationAction(ISD::SUBE, MVT::i32, Expand);
1710   setOperationAction(ISD::SUBE, MVT::i64, Expand);
1711   setOperationAction(ISD::ADDC, MVT::i8, Expand);
1712   setOperationAction(ISD::ADDC, MVT::i16, Expand);
1713   setOperationAction(ISD::ADDC, MVT::i32, Expand);
1714   setOperationAction(ISD::ADDC, MVT::i64, Expand);
1715   setOperationAction(ISD::SUBC, MVT::i8, Expand);
1716   setOperationAction(ISD::SUBC, MVT::i16, Expand);
1717   setOperationAction(ISD::SUBC, MVT::i32, Expand);
1718   setOperationAction(ISD::SUBC, MVT::i64, Expand);
1719 
1720   // Only add and sub that detect overflow are the saturating ones.
1721   for (MVT VT : MVT::integer_valuetypes()) {
1722     setOperationAction(ISD::UADDO, VT, Expand);
1723     setOperationAction(ISD::SADDO, VT, Expand);
1724     setOperationAction(ISD::USUBO, VT, Expand);
1725     setOperationAction(ISD::SSUBO, VT, Expand);
1726   }
1727 
1728   setOperationAction(ISD::CTPOP, MVT::i32, Expand);
1729   setOperationAction(ISD::CTPOP, MVT::i64, Expand);
1730   setOperationAction(ISD::CTTZ, MVT::i32, Expand);
1731   setOperationAction(ISD::CTTZ, MVT::i64, Expand);
1732   setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
1733   setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
1734   setOperationAction(ISD::CTLZ, MVT::i32, Expand);
1735   setOperationAction(ISD::CTLZ, MVT::i64, Expand);
1736   setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
1737   setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
1738 
1739   setOperationAction(ISD::ROTL, MVT::i32, Expand);
1740   setOperationAction(ISD::ROTR, MVT::i32, Expand);
1741   setOperationAction(ISD::BSWAP, MVT::i32, Expand);
1742   setOperationAction(ISD::ROTL, MVT::i64, Expand);
1743   setOperationAction(ISD::ROTR, MVT::i64, Expand);
1744   setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
1745   setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
1746   setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
1747   setOperationAction(ISD::BR_CC, MVT::i64, Expand);
1748 
1749   setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
1750   setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
1751   setOperationAction(ISD::FPOW, MVT::f64, Expand);
1752   setOperationAction(ISD::FPOW, MVT::f32, Expand);
1753 
1754   setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
1755   setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
1756   setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
1757 
1758   setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
1759   setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
1760 
1761   setOperationAction(ISD::MULHS, MVT::i64, Expand);
1762   setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
1763   setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
1764 
1765   setOperationAction(ISD::EH_RETURN, MVT::Other, Custom);
1766 
1767   setExceptionPointerRegister(Hexagon::R0);
1768   setExceptionSelectorRegister(Hexagon::R1);
1769 
1770   // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1771   setOperationAction(ISD::VASTART, MVT::Other, Custom);
1772 
1773   // Use the default implementation.
1774   setOperationAction(ISD::VAARG, MVT::Other, Expand);
1775   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
1776   setOperationAction(ISD::VAEND, MVT::Other, Expand);
1777   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
1778   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
1779 
1780   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
1781   setOperationAction(ISD::INLINEASM, MVT::Other, Custom);
1782 
1783   setMinFunctionAlignment(2);
1784 
1785   // Needed for DYNAMIC_STACKALLOC expansion.
1786   const HexagonRegisterInfo *QRI = Subtarget->getRegisterInfo();
1787   setStackPointerRegisterToSaveRestore(QRI->getStackRegister());
1788   setSchedulingPreference(Sched::VLIW);
1789 }
1790 
1791 const char*
getTargetNodeName(unsigned Opcode) const1792 HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const {
1793   switch (Opcode) {
1794   default: return nullptr;
1795   case HexagonISD::CONST32:     return "HexagonISD::CONST32";
1796   case HexagonISD::CONST32_GP: return "HexagonISD::CONST32_GP";
1797   case HexagonISD::CONST32_Int_Real: return "HexagonISD::CONST32_Int_Real";
1798   case HexagonISD::ADJDYNALLOC: return "HexagonISD::ADJDYNALLOC";
1799   case HexagonISD::CMPICC:      return "HexagonISD::CMPICC";
1800   case HexagonISD::CMPFCC:      return "HexagonISD::CMPFCC";
1801   case HexagonISD::BRICC:       return "HexagonISD::BRICC";
1802   case HexagonISD::BRFCC:       return "HexagonISD::BRFCC";
1803   case HexagonISD::SELECT_ICC:  return "HexagonISD::SELECT_ICC";
1804   case HexagonISD::SELECT_FCC:  return "HexagonISD::SELECT_FCC";
1805   case HexagonISD::Hi:          return "HexagonISD::Hi";
1806   case HexagonISD::Lo:          return "HexagonISD::Lo";
1807   case HexagonISD::JT: return "HexagonISD::JT";
1808   case HexagonISD::CP: return "HexagonISD::CP";
1809   case HexagonISD::POPCOUNT: return "HexagonISD::POPCOUNT";
1810   case HexagonISD::COMBINE: return "HexagonISD::COMBINE";
1811   case HexagonISD::PACKHL: return "HexagonISD::PACKHL";
1812   case HexagonISD::VSPLATB: return "HexagonISD::VSPLTB";
1813   case HexagonISD::VSPLATH: return "HexagonISD::VSPLATH";
1814   case HexagonISD::SHUFFEB: return "HexagonISD::SHUFFEB";
1815   case HexagonISD::SHUFFEH: return "HexagonISD::SHUFFEH";
1816   case HexagonISD::SHUFFOB: return "HexagonISD::SHUFFOB";
1817   case HexagonISD::SHUFFOH: return "HexagonISD::SHUFFOH";
1818   case HexagonISD::VSXTBH: return "HexagonISD::VSXTBH";
1819   case HexagonISD::VSXTBW: return "HexagonISD::VSXTBW";
1820   case HexagonISD::VSRAW: return "HexagonISD::VSRAW";
1821   case HexagonISD::VSRAH: return "HexagonISD::VSRAH";
1822   case HexagonISD::VSRLW: return "HexagonISD::VSRLW";
1823   case HexagonISD::VSRLH: return "HexagonISD::VSRLH";
1824   case HexagonISD::VSHLW: return "HexagonISD::VSHLW";
1825   case HexagonISD::VSHLH: return "HexagonISD::VSHLH";
1826   case HexagonISD::VCMPBEQ: return "HexagonISD::VCMPBEQ";
1827   case HexagonISD::VCMPBGT: return "HexagonISD::VCMPBGT";
1828   case HexagonISD::VCMPBGTU: return "HexagonISD::VCMPBGTU";
1829   case HexagonISD::VCMPHEQ: return "HexagonISD::VCMPHEQ";
1830   case HexagonISD::VCMPHGT: return "HexagonISD::VCMPHGT";
1831   case HexagonISD::VCMPHGTU: return "HexagonISD::VCMPHGTU";
1832   case HexagonISD::VCMPWEQ: return "HexagonISD::VCMPWEQ";
1833   case HexagonISD::VCMPWGT: return "HexagonISD::VCMPWGT";
1834   case HexagonISD::VCMPWGTU: return "HexagonISD::VCMPWGTU";
1835   case HexagonISD::INSERT_ri: return "HexagonISD::INSERT_ri";
1836   case HexagonISD::INSERT_rd: return "HexagonISD::INSERT_rd";
1837   case HexagonISD::INSERT_riv: return "HexagonISD::INSERT_riv";
1838   case HexagonISD::INSERT_rdv: return "HexagonISD::INSERT_rdv";
1839   case HexagonISD::EXTRACTU_ri: return "HexagonISD::EXTRACTU_ri";
1840   case HexagonISD::EXTRACTU_rd: return "HexagonISD::EXTRACTU_rd";
1841   case HexagonISD::EXTRACTU_riv: return "HexagonISD::EXTRACTU_riv";
1842   case HexagonISD::EXTRACTU_rdv: return "HexagonISD::EXTRACTU_rdv";
1843   case HexagonISD::FTOI:        return "HexagonISD::FTOI";
1844   case HexagonISD::ITOF:        return "HexagonISD::ITOF";
1845   case HexagonISD::CALLv3:      return "HexagonISD::CALLv3";
1846   case HexagonISD::CALLv3nr:    return "HexagonISD::CALLv3nr";
1847   case HexagonISD::CALLR:       return "HexagonISD::CALLR";
1848   case HexagonISD::RET_FLAG:    return "HexagonISD::RET_FLAG";
1849   case HexagonISD::BR_JT:       return "HexagonISD::BR_JT";
1850   case HexagonISD::TC_RETURN:   return "HexagonISD::TC_RETURN";
1851   case HexagonISD::EH_RETURN: return "HexagonISD::EH_RETURN";
1852   }
1853 }
1854 
1855 bool
isTruncateFree(Type * Ty1,Type * Ty2) const1856 HexagonTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
1857   EVT MTy1 = EVT::getEVT(Ty1);
1858   EVT MTy2 = EVT::getEVT(Ty2);
1859   if (!MTy1.isSimple() || !MTy2.isSimple()) {
1860     return false;
1861   }
1862   return ((MTy1.getSimpleVT() == MVT::i64) && (MTy2.getSimpleVT() == MVT::i32));
1863 }
1864 
isTruncateFree(EVT VT1,EVT VT2) const1865 bool HexagonTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
1866   if (!VT1.isSimple() || !VT2.isSimple()) {
1867     return false;
1868   }
1869   return ((VT1.getSimpleVT() == MVT::i64) && (VT2.getSimpleVT() == MVT::i32));
1870 }
1871 
1872 // shouldExpandBuildVectorWithShuffles
1873 // Should we expand the build vector with shuffles?
1874 bool
shouldExpandBuildVectorWithShuffles(EVT VT,unsigned DefinedValues) const1875 HexagonTargetLowering::shouldExpandBuildVectorWithShuffles(EVT VT,
1876                                   unsigned DefinedValues) const {
1877 
1878   // Hexagon vector shuffle operates on element sizes of bytes or halfwords
1879   EVT EltVT = VT.getVectorElementType();
1880   int EltBits = EltVT.getSizeInBits();
1881   if ((EltBits != 8) && (EltBits != 16))
1882     return false;
1883 
1884   return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues);
1885 }
1886 
1887 // LowerVECTOR_SHUFFLE - Lower a vector shuffle (V1, V2, V3).  V1 and
1888 // V2 are the two vectors to select data from, V3 is the permutation.
LowerVECTOR_SHUFFLE(SDValue Op,SelectionDAG & DAG)1889 static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
1890   const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
1891   SDValue V1 = Op.getOperand(0);
1892   SDValue V2 = Op.getOperand(1);
1893   SDLoc dl(Op);
1894   EVT VT = Op.getValueType();
1895 
1896   if (V2.getOpcode() == ISD::UNDEF)
1897     V2 = V1;
1898 
1899   if (SVN->isSplat()) {
1900     int Lane = SVN->getSplatIndex();
1901     if (Lane == -1) Lane = 0;
1902 
1903     // Test if V1 is a SCALAR_TO_VECTOR.
1904     if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR)
1905       return createSplat(DAG, dl, VT, V1.getOperand(0));
1906 
1907     // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR
1908     // (and probably will turn into a SCALAR_TO_VECTOR once legalization
1909     // reaches it).
1910     if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR &&
1911         !isa<ConstantSDNode>(V1.getOperand(0))) {
1912       bool IsScalarToVector = true;
1913       for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i)
1914         if (V1.getOperand(i).getOpcode() != ISD::UNDEF) {
1915           IsScalarToVector = false;
1916           break;
1917         }
1918       if (IsScalarToVector)
1919         return createSplat(DAG, dl, VT, V1.getOperand(0));
1920     }
1921     return createSplat(DAG, dl, VT, DAG.getConstant(Lane, MVT::i32));
1922   }
1923 
1924   // FIXME: We need to support more general vector shuffles.  See
1925   // below the comment from the ARM backend that deals in the general
1926   // case with the vector shuffles.  For now, let expand handle these.
1927   return SDValue();
1928 
1929   // If the shuffle is not directly supported and it has 4 elements, use
1930   // the PerfectShuffle-generated table to synthesize it from other shuffles.
1931 }
1932 
1933 // If BUILD_VECTOR has same base element repeated several times,
1934 // report true.
isCommonSplatElement(BuildVectorSDNode * BVN)1935 static bool isCommonSplatElement(BuildVectorSDNode *BVN) {
1936   unsigned NElts = BVN->getNumOperands();
1937   SDValue V0 = BVN->getOperand(0);
1938 
1939   for (unsigned i = 1, e = NElts; i != e; ++i) {
1940     if (BVN->getOperand(i) != V0)
1941       return false;
1942   }
1943   return true;
1944 }
1945 
1946 // LowerVECTOR_SHIFT - Lower a vector shift. Try to convert
1947 // <VT> = SHL/SRA/SRL <VT> by <VT> to Hexagon specific
1948 // <VT> = SHL/SRA/SRL <VT> by <IT/i32>.
LowerVECTOR_SHIFT(SDValue Op,SelectionDAG & DAG)1949 static SDValue LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) {
1950   BuildVectorSDNode *BVN = 0;
1951   SDValue V1 = Op.getOperand(0);
1952   SDValue V2 = Op.getOperand(1);
1953   SDValue V3;
1954   SDLoc dl(Op);
1955   EVT VT = Op.getValueType();
1956 
1957   if ((BVN = dyn_cast<BuildVectorSDNode>(V1.getNode())) &&
1958       isCommonSplatElement(BVN))
1959     V3 = V2;
1960   else if ((BVN = dyn_cast<BuildVectorSDNode>(V2.getNode())) &&
1961            isCommonSplatElement(BVN))
1962     V3 = V1;
1963   else
1964     return SDValue();
1965 
1966   SDValue CommonSplat = BVN->getOperand(0);
1967   SDValue Result;
1968 
1969   if (VT.getSimpleVT() == MVT::v4i16) {
1970     switch (Op.getOpcode()) {
1971     case ISD::SRA:
1972       Result = DAG.getNode(HexagonISD::VSRAH, dl, VT, V3, CommonSplat);
1973       break;
1974     case ISD::SHL:
1975       Result = DAG.getNode(HexagonISD::VSHLH, dl, VT, V3, CommonSplat);
1976       break;
1977     case ISD::SRL:
1978       Result = DAG.getNode(HexagonISD::VSRLH, dl, VT, V3, CommonSplat);
1979       break;
1980     default:
1981       return SDValue();
1982     }
1983   } else if (VT.getSimpleVT() == MVT::v2i32) {
1984     switch (Op.getOpcode()) {
1985     case ISD::SRA:
1986       Result = DAG.getNode(HexagonISD::VSRAW, dl, VT, V3, CommonSplat);
1987       break;
1988     case ISD::SHL:
1989       Result = DAG.getNode(HexagonISD::VSHLW, dl, VT, V3, CommonSplat);
1990       break;
1991     case ISD::SRL:
1992       Result = DAG.getNode(HexagonISD::VSRLW, dl, VT, V3, CommonSplat);
1993       break;
1994     default:
1995       return SDValue();
1996     }
1997   } else {
1998     return SDValue();
1999   }
2000 
2001   return DAG.getNode(ISD::BITCAST, dl, VT, Result);
2002 }
2003 
2004 SDValue
LowerBUILD_VECTOR(SDValue Op,SelectionDAG & DAG) const2005 HexagonTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
2006   BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
2007   SDLoc dl(Op);
2008   EVT VT = Op.getValueType();
2009 
2010   unsigned Size = VT.getSizeInBits();
2011 
2012   // A vector larger than 64 bits cannot be represented in Hexagon.
2013   // Expand will split the vector.
2014   if (Size > 64)
2015     return SDValue();
2016 
2017   APInt APSplatBits, APSplatUndef;
2018   unsigned SplatBitSize;
2019   bool HasAnyUndefs;
2020   unsigned NElts = BVN->getNumOperands();
2021 
2022   // Try to generate a SPLAT instruction.
2023   if ((VT.getSimpleVT() == MVT::v4i8 || VT.getSimpleVT() == MVT::v4i16) &&
2024       (BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
2025                             HasAnyUndefs, 0, true) && SplatBitSize <= 16)) {
2026     unsigned SplatBits = APSplatBits.getZExtValue();
2027     int32_t SextVal = ((int32_t) (SplatBits << (32 - SplatBitSize)) >>
2028                        (32 - SplatBitSize));
2029     return createSplat(DAG, dl, VT, DAG.getConstant(SextVal, MVT::i32));
2030   }
2031 
2032   // Try to generate COMBINE to build v2i32 vectors.
2033   if (VT.getSimpleVT() == MVT::v2i32) {
2034     SDValue V0 = BVN->getOperand(0);
2035     SDValue V1 = BVN->getOperand(1);
2036 
2037     if (V0.getOpcode() == ISD::UNDEF)
2038       V0 = DAG.getConstant(0, MVT::i32);
2039     if (V1.getOpcode() == ISD::UNDEF)
2040       V1 = DAG.getConstant(0, MVT::i32);
2041 
2042     ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(V0);
2043     ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(V1);
2044     // If the element isn't a constant, it is in a register:
2045     // generate a COMBINE Register Register instruction.
2046     if (!C0 || !C1)
2047       return DAG.getNode(HexagonISD::COMBINE, dl, VT, V1, V0);
2048 
2049     // If one of the operands is an 8 bit integer constant, generate
2050     // a COMBINE Immediate Immediate instruction.
2051     if (isInt<8>(C0->getSExtValue()) ||
2052         isInt<8>(C1->getSExtValue()))
2053       return DAG.getNode(HexagonISD::COMBINE, dl, VT, V1, V0);
2054   }
2055 
2056   // Try to generate a S2_packhl to build v2i16 vectors.
2057   if (VT.getSimpleVT() == MVT::v2i16) {
2058     for (unsigned i = 0, e = NElts; i != e; ++i) {
2059       if (BVN->getOperand(i).getOpcode() == ISD::UNDEF)
2060         continue;
2061       ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(BVN->getOperand(i));
2062       // If the element isn't a constant, it is in a register:
2063       // generate a S2_packhl instruction.
2064       if (!Cst) {
2065         SDValue pack = DAG.getNode(HexagonISD::PACKHL, dl, MVT::v4i16,
2066                                    BVN->getOperand(1), BVN->getOperand(0));
2067 
2068         return DAG.getTargetExtractSubreg(Hexagon::subreg_loreg, dl, MVT::v2i16,
2069                                           pack);
2070       }
2071     }
2072   }
2073 
2074   // In the general case, generate a CONST32 or a CONST64 for constant vectors,
2075   // and insert_vector_elt for all the other cases.
2076   uint64_t Res = 0;
2077   unsigned EltSize = Size / NElts;
2078   SDValue ConstVal;
2079   uint64_t Mask = ~uint64_t(0ULL) >> (64 - EltSize);
2080   bool HasNonConstantElements = false;
2081 
2082   for (unsigned i = 0, e = NElts; i != e; ++i) {
2083     // LLVM's BUILD_VECTOR operands are in Little Endian mode, whereas Hexagon's
2084     // combine, const64, etc. are Big Endian.
2085     unsigned OpIdx = NElts - i - 1;
2086     SDValue Operand = BVN->getOperand(OpIdx);
2087     if (Operand.getOpcode() == ISD::UNDEF)
2088       continue;
2089 
2090     int64_t Val = 0;
2091     if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Operand))
2092       Val = Cst->getSExtValue();
2093     else
2094       HasNonConstantElements = true;
2095 
2096     Val &= Mask;
2097     Res = (Res << EltSize) | Val;
2098   }
2099 
2100   if (Size == 64)
2101     ConstVal = DAG.getConstant(Res, MVT::i64);
2102   else
2103     ConstVal = DAG.getConstant(Res, MVT::i32);
2104 
2105   // When there are non constant operands, add them with INSERT_VECTOR_ELT to
2106   // ConstVal, the constant part of the vector.
2107   if (HasNonConstantElements) {
2108     EVT EltVT = VT.getVectorElementType();
2109     SDValue Width = DAG.getConstant(EltVT.getSizeInBits(), MVT::i64);
2110     SDValue Shifted = DAG.getNode(ISD::SHL, dl, MVT::i64, Width,
2111                                   DAG.getConstant(32, MVT::i64));
2112 
2113     for (unsigned i = 0, e = NElts; i != e; ++i) {
2114       // LLVM's BUILD_VECTOR operands are in Little Endian mode, whereas Hexagon
2115       // is Big Endian.
2116       unsigned OpIdx = NElts - i - 1;
2117       SDValue Operand = BVN->getOperand(OpIdx);
2118       if (isa<ConstantSDNode>(Operand))
2119         // This operand is already in ConstVal.
2120         continue;
2121 
2122       if (VT.getSizeInBits() == 64 &&
2123           Operand.getValueType().getSizeInBits() == 32) {
2124         SDValue C = DAG.getConstant(0, MVT::i32);
2125         Operand = DAG.getNode(HexagonISD::COMBINE, dl, VT, C, Operand);
2126       }
2127 
2128       SDValue Idx = DAG.getConstant(OpIdx, MVT::i64);
2129       SDValue Offset = DAG.getNode(ISD::MUL, dl, MVT::i64, Idx, Width);
2130       SDValue Combined = DAG.getNode(ISD::OR, dl, MVT::i64, Shifted, Offset);
2131       const SDValue Ops[] = {ConstVal, Operand, Combined};
2132 
2133       if (VT.getSizeInBits() == 32)
2134         ConstVal = DAG.getNode(HexagonISD::INSERT_riv, dl, MVT::i32, Ops);
2135       else
2136         ConstVal = DAG.getNode(HexagonISD::INSERT_rdv, dl, MVT::i64, Ops);
2137     }
2138   }
2139 
2140   return DAG.getNode(ISD::BITCAST, dl, VT, ConstVal);
2141 }
2142 
2143 SDValue
LowerCONCAT_VECTORS(SDValue Op,SelectionDAG & DAG) const2144 HexagonTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
2145                                            SelectionDAG &DAG) const {
2146   SDLoc dl(Op);
2147   EVT VT = Op.getValueType();
2148   unsigned NElts = Op.getNumOperands();
2149   SDValue Vec = Op.getOperand(0);
2150   EVT VecVT = Vec.getValueType();
2151   SDValue Width = DAG.getConstant(VecVT.getSizeInBits(), MVT::i64);
2152   SDValue Shifted = DAG.getNode(ISD::SHL, dl, MVT::i64, Width,
2153                                 DAG.getConstant(32, MVT::i64));
2154   SDValue ConstVal = DAG.getConstant(0, MVT::i64);
2155 
2156   ConstantSDNode *W = dyn_cast<ConstantSDNode>(Width);
2157   ConstantSDNode *S = dyn_cast<ConstantSDNode>(Shifted);
2158 
2159   if ((VecVT.getSimpleVT() == MVT::v2i16) && (NElts == 2) && W && S) {
2160     if ((W->getZExtValue() == 32) && ((S->getZExtValue() >> 32) == 32)) {
2161       // We are trying to concat two v2i16 to a single v4i16.
2162       SDValue Vec0 = Op.getOperand(1);
2163       SDValue Combined  = DAG.getNode(HexagonISD::COMBINE, dl, VT, Vec0, Vec);
2164       return DAG.getNode(ISD::BITCAST, dl, VT, Combined);
2165     }
2166   }
2167 
2168   if ((VecVT.getSimpleVT() == MVT::v4i8) && (NElts == 2) && W && S) {
2169     if ((W->getZExtValue() == 32) && ((S->getZExtValue() >> 32) == 32)) {
2170       // We are trying to concat two v4i8 to a single v8i8.
2171       SDValue Vec0 = Op.getOperand(1);
2172       SDValue Combined  = DAG.getNode(HexagonISD::COMBINE, dl, VT, Vec0, Vec);
2173       return DAG.getNode(ISD::BITCAST, dl, VT, Combined);
2174     }
2175   }
2176 
2177   for (unsigned i = 0, e = NElts; i != e; ++i) {
2178     unsigned OpIdx = NElts - i - 1;
2179     SDValue Operand = Op.getOperand(OpIdx);
2180 
2181     if (VT.getSizeInBits() == 64 &&
2182         Operand.getValueType().getSizeInBits() == 32) {
2183       SDValue C = DAG.getConstant(0, MVT::i32);
2184       Operand = DAG.getNode(HexagonISD::COMBINE, dl, VT, C, Operand);
2185     }
2186 
2187     SDValue Idx = DAG.getConstant(OpIdx, MVT::i64);
2188     SDValue Offset = DAG.getNode(ISD::MUL, dl, MVT::i64, Idx, Width);
2189     SDValue Combined = DAG.getNode(ISD::OR, dl, MVT::i64, Shifted, Offset);
2190     const SDValue Ops[] = {ConstVal, Operand, Combined};
2191 
2192     if (VT.getSizeInBits() == 32)
2193       ConstVal = DAG.getNode(HexagonISD::INSERT_riv, dl, MVT::i32, Ops);
2194     else
2195       ConstVal = DAG.getNode(HexagonISD::INSERT_rdv, dl, MVT::i64, Ops);
2196   }
2197 
2198   return DAG.getNode(ISD::BITCAST, dl, VT, ConstVal);
2199 }
2200 
2201 SDValue
LowerEXTRACT_VECTOR(SDValue Op,SelectionDAG & DAG) const2202 HexagonTargetLowering::LowerEXTRACT_VECTOR(SDValue Op,
2203                                            SelectionDAG &DAG) const {
2204   EVT VT = Op.getValueType();
2205   int VTN = VT.isVector() ? VT.getVectorNumElements() : 1;
2206   SDLoc dl(Op);
2207   SDValue Idx = Op.getOperand(1);
2208   SDValue Vec = Op.getOperand(0);
2209   EVT VecVT = Vec.getValueType();
2210   EVT EltVT = VecVT.getVectorElementType();
2211   int EltSize = EltVT.getSizeInBits();
2212   SDValue Width = DAG.getConstant(Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT ?
2213                                   EltSize : VTN * EltSize, MVT::i64);
2214 
2215   // Constant element number.
2216   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Idx)) {
2217     SDValue Offset = DAG.getConstant(C->getZExtValue() * EltSize, MVT::i32);
2218     const SDValue Ops[] = {Vec, Width, Offset};
2219 
2220     ConstantSDNode *W = dyn_cast<ConstantSDNode>(Width);
2221     assert(W && "Non constant width in LowerEXTRACT_VECTOR");
2222 
2223     SDValue N;
2224     // For certain extracts, it is a simple _hi/_lo subreg.
2225     if (VecVT.getSimpleVT() == MVT::v2i32) {
2226       // v2i32 -> i32 vselect.
2227       if (C->getZExtValue() == 0)
2228         N = DAG.getTargetExtractSubreg(Hexagon::subreg_loreg, dl,
2229                                        MVT::i32, Vec);
2230       else if (C->getZExtValue() == 1)
2231         N = DAG.getTargetExtractSubreg(Hexagon::subreg_hireg, dl,
2232                                        MVT::i32, Vec);
2233       else
2234         llvm_unreachable("Bad offset");
2235     } else if ((VecVT.getSimpleVT() == MVT::v4i16) &&
2236                (W->getZExtValue() == 32)) {
2237       // v4i16 -> v2i16/i32 vselect.
2238       if (C->getZExtValue() == 0)
2239         N = DAG.getTargetExtractSubreg(Hexagon::subreg_loreg, dl,
2240                                        MVT::i32, Vec);
2241       else if (C->getZExtValue() == 2)
2242         N = DAG.getTargetExtractSubreg(Hexagon::subreg_hireg, dl,
2243                                        MVT::i32, Vec);
2244       else
2245         llvm_unreachable("Bad offset");
2246     }  else if ((VecVT.getSimpleVT() == MVT::v8i8) &&
2247                (W->getZExtValue() == 32)) {
2248       // v8i8 -> v4i8/i32 vselect.
2249       if (C->getZExtValue() == 0)
2250         N = DAG.getTargetExtractSubreg(Hexagon::subreg_loreg, dl,
2251                                        MVT::i32, Vec);
2252       else if (C->getZExtValue() == 4)
2253         N = DAG.getTargetExtractSubreg(Hexagon::subreg_hireg, dl,
2254                                        MVT::i32, Vec);
2255       else
2256         llvm_unreachable("Bad offset");
2257     } else if (VecVT.getSizeInBits() == 32) {
2258         N = DAG.getNode(HexagonISD::EXTRACTU_ri, dl, MVT::i32, Ops);
2259     } else {
2260       N = DAG.getNode(HexagonISD::EXTRACTU_rd, dl, MVT::i64, Ops);
2261       if (VT.getSizeInBits() == 32)
2262         N = DAG.getTargetExtractSubreg(Hexagon::subreg_loreg, dl, MVT::i32, N);
2263     }
2264 
2265     return DAG.getNode(ISD::BITCAST, dl, VT, N);
2266   }
2267 
2268   // Variable element number.
2269   SDValue Offset = DAG.getNode(ISD::MUL, dl, MVT::i32, Idx,
2270                                DAG.getConstant(EltSize, MVT::i32));
2271   SDValue Shifted = DAG.getNode(ISD::SHL, dl, MVT::i64, Width,
2272                                 DAG.getConstant(32, MVT::i64));
2273   SDValue Combined = DAG.getNode(ISD::OR, dl, MVT::i64, Shifted, Offset);
2274 
2275   const SDValue Ops[] = {Vec, Combined};
2276 
2277   SDValue N;
2278   if (VecVT.getSizeInBits() == 32) {
2279     N = DAG.getNode(HexagonISD::EXTRACTU_riv, dl, MVT::i32, Ops);
2280   } else {
2281     N = DAG.getNode(HexagonISD::EXTRACTU_rdv, dl, MVT::i64, Ops);
2282     if (VT.getSizeInBits() == 32)
2283       N = DAG.getTargetExtractSubreg(Hexagon::subreg_loreg, dl, MVT::i32, N);
2284   }
2285   return DAG.getNode(ISD::BITCAST, dl, VT, N);
2286 }
2287 
2288 SDValue
LowerINSERT_VECTOR(SDValue Op,SelectionDAG & DAG) const2289 HexagonTargetLowering::LowerINSERT_VECTOR(SDValue Op,
2290                                           SelectionDAG &DAG) const {
2291   EVT VT = Op.getValueType();
2292   int VTN = VT.isVector() ? VT.getVectorNumElements() : 1;
2293   SDLoc dl(Op);
2294   SDValue Vec = Op.getOperand(0);
2295   SDValue Val = Op.getOperand(1);
2296   SDValue Idx = Op.getOperand(2);
2297   EVT VecVT = Vec.getValueType();
2298   EVT EltVT = VecVT.getVectorElementType();
2299   int EltSize = EltVT.getSizeInBits();
2300   SDValue Width = DAG.getConstant(Op.getOpcode() == ISD::INSERT_VECTOR_ELT ?
2301                                   EltSize : VTN * EltSize, MVT::i64);
2302 
2303   if (ConstantSDNode *C = cast<ConstantSDNode>(Idx)) {
2304     SDValue Offset = DAG.getConstant(C->getSExtValue() * EltSize, MVT::i32);
2305     const SDValue Ops[] = {Vec, Val, Width, Offset};
2306 
2307     SDValue N;
2308     if (VT.getSizeInBits() == 32)
2309       N = DAG.getNode(HexagonISD::INSERT_ri, dl, MVT::i32, Ops);
2310     else
2311       N = DAG.getNode(HexagonISD::INSERT_rd, dl, MVT::i64, Ops);
2312 
2313     return DAG.getNode(ISD::BITCAST, dl, VT, N);
2314   }
2315 
2316   // Variable element number.
2317   SDValue Offset = DAG.getNode(ISD::MUL, dl, MVT::i32, Idx,
2318                                DAG.getConstant(EltSize, MVT::i32));
2319   SDValue Shifted = DAG.getNode(ISD::SHL, dl, MVT::i64, Width,
2320                                 DAG.getConstant(32, MVT::i64));
2321   SDValue Combined = DAG.getNode(ISD::OR, dl, MVT::i64, Shifted, Offset);
2322 
2323   if (VT.getSizeInBits() == 64 &&
2324       Val.getValueType().getSizeInBits() == 32) {
2325     SDValue C = DAG.getConstant(0, MVT::i32);
2326     Val = DAG.getNode(HexagonISD::COMBINE, dl, VT, C, Val);
2327   }
2328 
2329   const SDValue Ops[] = {Vec, Val, Combined};
2330 
2331   SDValue N;
2332   if (VT.getSizeInBits() == 32)
2333     N = DAG.getNode(HexagonISD::INSERT_riv, dl, MVT::i32, Ops);
2334   else
2335     N = DAG.getNode(HexagonISD::INSERT_rdv, dl, MVT::i64, Ops);
2336 
2337   return DAG.getNode(ISD::BITCAST, dl, VT, N);
2338 }
2339 
2340 bool
allowTruncateForTailCall(Type * Ty1,Type * Ty2) const2341 HexagonTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
2342   // Assuming the caller does not have either a signext or zeroext modifier, and
2343   // only one value is accepted, any reasonable truncation is allowed.
2344   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
2345     return false;
2346 
2347   // FIXME: in principle up to 64-bit could be made safe, but it would be very
2348   // fragile at the moment: any support for multiple value returns would be
2349   // liable to disallow tail calls involving i64 -> iN truncation in many cases.
2350   return Ty1->getPrimitiveSizeInBits() <= 32;
2351 }
2352 
2353 SDValue
LowerEH_RETURN(SDValue Op,SelectionDAG & DAG) const2354 HexagonTargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
2355   SDValue Chain     = Op.getOperand(0);
2356   SDValue Offset    = Op.getOperand(1);
2357   SDValue Handler   = Op.getOperand(2);
2358   SDLoc dl(Op);
2359 
2360   // Mark function as containing a call to EH_RETURN.
2361   HexagonMachineFunctionInfo *FuncInfo =
2362     DAG.getMachineFunction().getInfo<HexagonMachineFunctionInfo>();
2363   FuncInfo->setHasEHReturn();
2364 
2365   unsigned OffsetReg = Hexagon::R28;
2366 
2367   SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(),
2368                                   DAG.getRegister(Hexagon::R30, getPointerTy()),
2369                                   DAG.getIntPtrConstant(4));
2370   Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
2371                        false, false, 0);
2372   Chain = DAG.getCopyToReg(Chain, dl, OffsetReg, Offset);
2373 
2374   // Not needed we already use it as explict input to EH_RETURN.
2375   // MF.getRegInfo().addLiveOut(OffsetReg);
2376 
2377   return DAG.getNode(HexagonISD::EH_RETURN, dl, MVT::Other, Chain);
2378 }
2379 
2380 SDValue
LowerOperation(SDValue Op,SelectionDAG & DAG) const2381 HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
2382   switch (Op.getOpcode()) {
2383     default: llvm_unreachable("Should not custom lower this!");
2384     case ISD::CONCAT_VECTORS:     return LowerCONCAT_VECTORS(Op, DAG);
2385     case ISD::INSERT_SUBVECTOR:   return LowerINSERT_VECTOR(Op, DAG);
2386     case ISD::INSERT_VECTOR_ELT:  return LowerINSERT_VECTOR(Op, DAG);
2387     case ISD::EXTRACT_SUBVECTOR:  return LowerEXTRACT_VECTOR(Op, DAG);
2388     case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR(Op, DAG);
2389     case ISD::BUILD_VECTOR:       return LowerBUILD_VECTOR(Op, DAG);
2390     case ISD::VECTOR_SHUFFLE:     return LowerVECTOR_SHUFFLE(Op, DAG);
2391     case ISD::SRA:
2392     case ISD::SHL:
2393     case ISD::SRL:
2394       return LowerVECTOR_SHIFT(Op, DAG);
2395     case ISD::ConstantPool:
2396       return LowerConstantPool(Op, DAG);
2397     case ISD::EH_RETURN:          return LowerEH_RETURN(Op, DAG);
2398       // Frame & Return address.  Currently unimplemented.
2399     case ISD::RETURNADDR:         return LowerRETURNADDR(Op, DAG);
2400     case ISD::FRAMEADDR:          return LowerFRAMEADDR(Op, DAG);
2401     case ISD::GlobalTLSAddress:
2402                           llvm_unreachable("TLS not implemented for Hexagon.");
2403     case ISD::ATOMIC_FENCE:       return LowerATOMIC_FENCE(Op, DAG);
2404     case ISD::GlobalAddress:      return LowerGLOBALADDRESS(Op, DAG);
2405     case ISD::BlockAddress:       return LowerBlockAddress(Op, DAG);
2406     case ISD::VASTART:            return LowerVASTART(Op, DAG);
2407     case ISD::BR_JT:              return LowerBR_JT(Op, DAG);
2408     // Custom lower some vector loads.
2409     case ISD::LOAD:               return LowerLOAD(Op, DAG);
2410 
2411     case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
2412     case ISD::SELECT:             return Op;
2413     case ISD::SETCC:              return LowerSETCC(Op, DAG);
2414     case ISD::VSELECT:            return LowerVSELECT(Op, DAG);
2415     case ISD::CTPOP:              return LowerCTPOP(Op, DAG);
2416     case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
2417     case ISD::INLINEASM:          return LowerINLINEASM(Op, DAG);
2418 
2419   }
2420 }
2421 
2422 
2423 
2424 //===----------------------------------------------------------------------===//
2425 //                           Hexagon Scheduler Hooks
2426 //===----------------------------------------------------------------------===//
2427 MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr * MI,MachineBasicBlock * BB) const2428 HexagonTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
2429                                                    MachineBasicBlock *BB)
2430 const {
2431   switch (MI->getOpcode()) {
2432     case Hexagon::ADJDYNALLOC: {
2433       MachineFunction *MF = BB->getParent();
2434       HexagonMachineFunctionInfo *FuncInfo =
2435         MF->getInfo<HexagonMachineFunctionInfo>();
2436       FuncInfo->addAllocaAdjustInst(MI);
2437       return BB;
2438     }
2439     default: llvm_unreachable("Unexpected instr type to insert");
2440   } // switch
2441 }
2442 
2443 //===----------------------------------------------------------------------===//
2444 // Inline Assembly Support
2445 //===----------------------------------------------------------------------===//
2446 
2447 std::pair<unsigned, const TargetRegisterClass *>
getRegForInlineAsmConstraint(const TargetRegisterInfo * TRI,const std::string & Constraint,MVT VT) const2448 HexagonTargetLowering::getRegForInlineAsmConstraint(
2449     const TargetRegisterInfo *TRI, const std::string &Constraint,
2450     MVT VT) const {
2451   if (Constraint.size() == 1) {
2452     switch (Constraint[0]) {
2453     case 'r':   // R0-R31
2454        switch (VT.SimpleTy) {
2455        default:
2456          llvm_unreachable("getRegForInlineAsmConstraint Unhandled data type");
2457        case MVT::i32:
2458        case MVT::i16:
2459        case MVT::i8:
2460        case MVT::f32:
2461          return std::make_pair(0U, &Hexagon::IntRegsRegClass);
2462        case MVT::i64:
2463        case MVT::f64:
2464          return std::make_pair(0U, &Hexagon::DoubleRegsRegClass);
2465       }
2466     default:
2467       llvm_unreachable("Unknown asm register class");
2468     }
2469   }
2470 
2471   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
2472 }
2473 
2474 /// isFPImmLegal - Returns true if the target can instruction select the
2475 /// specified FP immediate natively. If false, the legalizer will
2476 /// materialize the FP immediate as a load from a constant pool.
isFPImmLegal(const APFloat & Imm,EVT VT) const2477 bool HexagonTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
2478   return Subtarget->hasV5TOps();
2479 }
2480 
2481 /// isLegalAddressingMode - Return true if the addressing mode represented by
2482 /// AM is legal for this target, for a load/store of the specified type.
isLegalAddressingMode(const AddrMode & AM,Type * Ty) const2483 bool HexagonTargetLowering::isLegalAddressingMode(const AddrMode &AM,
2484                                                   Type *Ty) const {
2485   // Allows a signed-extended 11-bit immediate field.
2486   if (AM.BaseOffs <= -(1LL << 13) || AM.BaseOffs >= (1LL << 13)-1) {
2487     return false;
2488   }
2489 
2490   // No global is ever allowed as a base.
2491   if (AM.BaseGV) {
2492     return false;
2493   }
2494 
2495   int Scale = AM.Scale;
2496   if (Scale < 0) Scale = -Scale;
2497   switch (Scale) {
2498   case 0:  // No scale reg, "r+i", "r", or just "i".
2499     break;
2500   default: // No scaled addressing mode.
2501     return false;
2502   }
2503   return true;
2504 }
2505 
2506 /// isLegalICmpImmediate - Return true if the specified immediate is legal
2507 /// icmp immediate, that is the target has icmp instructions which can compare
2508 /// a register against the immediate without having to materialize the
2509 /// immediate into a register.
isLegalICmpImmediate(int64_t Imm) const2510 bool HexagonTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
2511   return Imm >= -512 && Imm <= 511;
2512 }
2513 
2514 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
2515 /// for tail call optimization. Targets which want to do tail call
2516 /// optimization should implement this function.
IsEligibleForTailCallOptimization(SDValue Callee,CallingConv::ID CalleeCC,bool isVarArg,bool isCalleeStructRet,bool isCallerStructRet,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SmallVectorImpl<ISD::InputArg> & Ins,SelectionDAG & DAG) const2517 bool HexagonTargetLowering::IsEligibleForTailCallOptimization(
2518                                  SDValue Callee,
2519                                  CallingConv::ID CalleeCC,
2520                                  bool isVarArg,
2521                                  bool isCalleeStructRet,
2522                                  bool isCallerStructRet,
2523                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
2524                                  const SmallVectorImpl<SDValue> &OutVals,
2525                                  const SmallVectorImpl<ISD::InputArg> &Ins,
2526                                  SelectionDAG& DAG) const {
2527   const Function *CallerF = DAG.getMachineFunction().getFunction();
2528   CallingConv::ID CallerCC = CallerF->getCallingConv();
2529   bool CCMatch = CallerCC == CalleeCC;
2530 
2531   // ***************************************************************************
2532   //  Look for obvious safe cases to perform tail call optimization that do not
2533   //  require ABI changes.
2534   // ***************************************************************************
2535 
2536   // If this is a tail call via a function pointer, then don't do it!
2537   if (!(dyn_cast<GlobalAddressSDNode>(Callee))
2538       && !(dyn_cast<ExternalSymbolSDNode>(Callee))) {
2539     return false;
2540   }
2541 
2542   // Do not optimize if the calling conventions do not match.
2543   if (!CCMatch)
2544     return false;
2545 
2546   // Do not tail call optimize vararg calls.
2547   if (isVarArg)
2548     return false;
2549 
2550   // Also avoid tail call optimization if either caller or callee uses struct
2551   // return semantics.
2552   if (isCalleeStructRet || isCallerStructRet)
2553     return false;
2554 
2555   // In addition to the cases above, we also disable Tail Call Optimization if
2556   // the calling convention code that at least one outgoing argument needs to
2557   // go on the stack. We cannot check that here because at this point that
2558   // information is not available.
2559   return true;
2560 }
2561 
2562 // Return true when the given node fits in a positive half word.
isPositiveHalfWord(SDNode * N)2563 bool llvm::isPositiveHalfWord(SDNode *N) {
2564   ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
2565   if (CN && CN->getSExtValue() > 0 && isInt<16>(CN->getSExtValue()))
2566     return true;
2567 
2568   switch (N->getOpcode()) {
2569   default:
2570     return false;
2571   case ISD::SIGN_EXTEND_INREG:
2572     return true;
2573   }
2574 }
2575