1 //===-- HexagonISelLowering.h - Hexagon DAG Lowering Interface --*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the interfaces that Hexagon uses to lower LLVM code into a
11 // selection DAG.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef LLVM_LIB_TARGET_HEXAGON_HEXAGONISELLOWERING_H
16 #define LLVM_LIB_TARGET_HEXAGON_HEXAGONISELLOWERING_H
17 
18 #include "Hexagon.h"
19 #include "llvm/CodeGen/CallingConvLower.h"
20 #include "llvm/IR/CallingConv.h"
21 #include "llvm/Target/TargetLowering.h"
22 
23 namespace llvm {
24 
25 // Return true when the given node fits in a positive half word.
26 bool isPositiveHalfWord(SDNode *N);
27 
28   namespace HexagonISD {
29     enum NodeType : unsigned {
30       OP_BEGIN = ISD::BUILTIN_OP_END,
31 
32       CONST32 = OP_BEGIN,
33       CONST32_GP,  // For marking data present in GP.
34       FCONST32,
35       ALLOCA,
36       ARGEXTEND,
37 
38       AT_GOT,      // Index in GOT.
39       AT_PCREL,    // Offset relative to PC.
40 
41       CALLv3,      // A V3+ call instruction.
42       CALLv3nr,    // A V3+ call instruction that doesn't return.
43       CALLR,
44 
45       RET_FLAG,    // Return with a flag operand.
46       BARRIER,     // Memory barrier.
47       JT,          // Jump table.
48       CP,          // Constant pool.
49 
50       POPCOUNT,
51       COMBINE,
52       PACKHL,
53       VSPLATB,
54       VSPLATH,
55       SHUFFEB,
56       SHUFFEH,
57       SHUFFOB,
58       SHUFFOH,
59       VSXTBH,
60       VSXTBW,
61       VSRAW,
62       VSRAH,
63       VSRLW,
64       VSRLH,
65       VSHLW,
66       VSHLH,
67       VCMPBEQ,
68       VCMPBGT,
69       VCMPBGTU,
70       VCMPHEQ,
71       VCMPHGT,
72       VCMPHGTU,
73       VCMPWEQ,
74       VCMPWGT,
75       VCMPWGTU,
76 
77       INSERT,
78       INSERTRP,
79       EXTRACTU,
80       EXTRACTURP,
81       VCOMBINE,
82       TC_RETURN,
83       EH_RETURN,
84       DCFETCH,
85 
86       OP_END
87     };
88   }
89 
90   class HexagonSubtarget;
91 
92   class HexagonTargetLowering : public TargetLowering {
93     int VarArgsFrameOffset;   // Frame offset to start of varargs area.
94 
95     bool CanReturnSmallStruct(const Function* CalleeFn, unsigned& RetSize)
96         const;
97     void promoteLdStType(EVT VT, EVT PromotedLdStVT);
98     const HexagonTargetMachine &HTM;
99     const HexagonSubtarget &Subtarget;
100 
101   public:
102     explicit HexagonTargetLowering(const TargetMachine &TM,
103                                    const HexagonSubtarget &ST);
104 
105     /// IsEligibleForTailCallOptimization - Check whether the call is eligible
106     /// for tail call optimization. Targets which want to do tail call
107     /// optimization should implement this function.
108     bool IsEligibleForTailCallOptimization(SDValue Callee,
109         CallingConv::ID CalleeCC, bool isVarArg, bool isCalleeStructRet,
110         bool isCallerStructRet, const SmallVectorImpl<ISD::OutputArg> &Outs,
111         const SmallVectorImpl<SDValue> &OutVals,
112         const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG& DAG) const;
113 
114     bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
115     bool isTruncateFree(EVT VT1, EVT VT2) const override;
116 
117     bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
118 
119     // Should we expand the build vector with shuffles?
120     bool shouldExpandBuildVectorWithShuffles(EVT VT,
121         unsigned DefinedValues) const override;
122 
123     SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
124     const char *getTargetNodeName(unsigned Opcode) const override;
125     SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
126     SDValue LowerEXTRACT_VECTOR(SDValue Op, SelectionDAG &DAG) const;
127     SDValue LowerINSERT_VECTOR(SDValue Op, SelectionDAG &DAG) const;
128     SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
129     SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
130     SDValue LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const;
131     SDValue LowerEH_LABEL(SDValue Op, SelectionDAG &DAG) const;
132     SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const;
133     SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
134         bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl,
135         SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const override;
136     SDValue LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const;
137     SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
138     SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const;
139 
140     SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
141         SmallVectorImpl<SDValue> &InVals) const override;
142     SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
143         CallingConv::ID CallConv, bool isVarArg,
144         const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl,
145         SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
146         const SmallVectorImpl<SDValue> &OutVals, SDValue Callee) const;
147 
148     SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
149     SDValue LowerVSELECT(SDValue Op, SelectionDAG &DAG) const;
150     SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
151     SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
152     SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG& DAG) const;
153     SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
154     SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
155 
156     SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv,
157         bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs,
158         const SmallVectorImpl<SDValue> &OutVals, SDLoc dl,
159         SelectionDAG &DAG) const override;
160 
161     bool mayBeEmittedAsTailCall(CallInst *CI) const override;
162     MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr *MI,
163         MachineBasicBlock *BB) const override;
164 
165     /// If a physical register, this returns the register that receives the
166     /// exception address on entry to an EH pad.
167     unsigned
getExceptionPointerRegister(const Constant * PersonalityFn)168     getExceptionPointerRegister(const Constant *PersonalityFn) const override {
169       return Hexagon::R0;
170     }
171 
172     /// If a physical register, this returns the register that receives the
173     /// exception typeid on entry to a landing pad.
174     unsigned
getExceptionSelectorRegister(const Constant * PersonalityFn)175     getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
176       return Hexagon::R1;
177     }
178 
179     SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
180     SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
181     SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
getSetCCResultType(const DataLayout &,LLVMContext & C,EVT VT)182     EVT getSetCCResultType(const DataLayout &, LLVMContext &C,
183                            EVT VT) const override {
184       if (!VT.isVector())
185         return MVT::i1;
186       else
187         return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements());
188     }
189 
190     bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
191                                     SDValue &Base, SDValue &Offset,
192                                     ISD::MemIndexedMode &AM,
193                                     SelectionDAG &DAG) const override;
194 
195     std::pair<unsigned, const TargetRegisterClass *>
196     getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
197                                  StringRef Constraint, MVT VT) const override;
198 
199     unsigned
getInlineAsmMemConstraint(StringRef ConstraintCode)200     getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
201       if (ConstraintCode == "o")
202         return InlineAsm::Constraint_o;
203       else if (ConstraintCode == "v")
204         return InlineAsm::Constraint_v;
205       return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
206     }
207 
208     // Intrinsics
209     SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
210     /// isLegalAddressingMode - Return true if the addressing mode represented
211     /// by AM is legal for this target, for a load/store of the specified type.
212     /// The type may be VoidTy, in which case only return true if the addressing
213     /// mode is legal for a load/store of any legal type.
214     /// TODO: Handle pre/postinc as well.
215     bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
216                                Type *Ty, unsigned AS) const override;
217     /// Return true if folding a constant offset with the given GlobalAddress
218     /// is legal.  It is frequently not legal in PIC relocation models.
219     bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
220 
221     bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
222 
223     /// isLegalICmpImmediate - Return true if the specified immediate is legal
224     /// icmp immediate, that is the target has icmp instructions which can
225     /// compare a register against the immediate without having to materialize
226     /// the immediate into a register.
227     bool isLegalICmpImmediate(int64_t Imm) const override;
228 
229     /// Returns relocation base for the given PIC jumptable.
230     SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG)
231                                      const override;
232 
233     // Handling of atomic RMW instructions.
234     Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
235         AtomicOrdering Ord) const override;
236     Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
237         Value *Addr, AtomicOrdering Ord) const override;
238     AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
239     bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
240     AtomicExpansionKind
shouldExpandAtomicRMWInIR(AtomicRMWInst * AI)241     shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override {
242       return AtomicExpansionKind::LLSC;
243     }
244 
245   protected:
246     std::pair<const TargetRegisterClass*, uint8_t>
247     findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT)
248         const override;
249   };
250 } // end namespace llvm
251 
252 #endif    // Hexagon_ISELLOWERING_H
253