1 //===-- ARMISelLowering.h - ARM DAG Lowering Interface ----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the interfaces that ARM uses to lower LLVM code into a 11 // selection DAG. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifndef LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H 16 #define LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H 17 18 #include "MCTargetDesc/ARMBaseInfo.h" 19 #include "llvm/CodeGen/CallingConvLower.h" 20 #include "llvm/CodeGen/SelectionDAG.h" 21 #include "llvm/Target/TargetLowering.h" 22 #include <vector> 23 24 namespace llvm { 25 class ARMConstantPoolValue; 26 class ARMSubtarget; 27 28 namespace ARMISD { 29 // ARM Specific DAG Nodes 30 enum NodeType { 31 // Start the numbering where the builtin ops and target ops leave off. 32 FIRST_NUMBER = ISD::BUILTIN_OP_END, 33 34 Wrapper, // Wrapper - A wrapper node for TargetConstantPool, 35 // TargetExternalSymbol, and TargetGlobalAddress. 36 WrapperPIC, // WrapperPIC - A wrapper node for TargetGlobalAddress in 37 // PIC mode. 38 WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable 39 40 // Add pseudo op to model memcpy for struct byval. 41 COPY_STRUCT_BYVAL, 42 43 CALL, // Function call. 44 CALL_PRED, // Function call that's predicable. 45 CALL_NOLINK, // Function call with branch not branch-and-link. 46 tCALL, // Thumb function call. 47 BRCOND, // Conditional branch. 48 BR_JT, // Jumptable branch. 49 BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump). 50 RET_FLAG, // Return with a flag operand. 51 INTRET_FLAG, // Interrupt return with an LR-offset and a flag operand. 52 53 PIC_ADD, // Add with a PC operand and a PIC label. 54 55 CMP, // ARM compare instructions. 56 CMN, // ARM CMN instructions. 57 CMPZ, // ARM compare that sets only Z flag. 58 CMPFP, // ARM VFP compare instruction, sets FPSCR. 59 CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR. 60 FMSTAT, // ARM fmstat instruction. 61 62 CMOV, // ARM conditional move instructions. 63 64 BCC_i64, 65 66 RBIT, // ARM bitreverse instruction 67 68 SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out. 69 SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out. 70 RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag. 71 72 ADDC, // Add with carry 73 ADDE, // Add using carry 74 SUBC, // Sub with carry 75 SUBE, // Sub using carry 76 77 VMOVRRD, // double to two gprs. 78 VMOVDRR, // Two gprs to double. 79 80 EH_SJLJ_SETJMP, // SjLj exception handling setjmp. 81 EH_SJLJ_LONGJMP, // SjLj exception handling longjmp. 82 83 TC_RETURN, // Tail call return pseudo. 84 85 THREAD_POINTER, 86 87 DYN_ALLOC, // Dynamic allocation on the stack. 88 89 MEMBARRIER_MCR, // Memory barrier (MCR) 90 91 PRELOAD, // Preload 92 93 WIN__CHKSTK, // Windows' __chkstk call to do stack probing. 94 95 VCEQ, // Vector compare equal. 96 VCEQZ, // Vector compare equal to zero. 97 VCGE, // Vector compare greater than or equal. 98 VCGEZ, // Vector compare greater than or equal to zero. 99 VCLEZ, // Vector compare less than or equal to zero. 100 VCGEU, // Vector compare unsigned greater than or equal. 101 VCGT, // Vector compare greater than. 102 VCGTZ, // Vector compare greater than zero. 103 VCLTZ, // Vector compare less than zero. 104 VCGTU, // Vector compare unsigned greater than. 105 VTST, // Vector test bits. 106 107 // Vector shift by immediate: 108 VSHL, // ...left 109 VSHRs, // ...right (signed) 110 VSHRu, // ...right (unsigned) 111 112 // Vector rounding shift by immediate: 113 VRSHRs, // ...right (signed) 114 VRSHRu, // ...right (unsigned) 115 VRSHRN, // ...right narrow 116 117 // Vector saturating shift by immediate: 118 VQSHLs, // ...left (signed) 119 VQSHLu, // ...left (unsigned) 120 VQSHLsu, // ...left (signed to unsigned) 121 VQSHRNs, // ...right narrow (signed) 122 VQSHRNu, // ...right narrow (unsigned) 123 VQSHRNsu, // ...right narrow (signed to unsigned) 124 125 // Vector saturating rounding shift by immediate: 126 VQRSHRNs, // ...right narrow (signed) 127 VQRSHRNu, // ...right narrow (unsigned) 128 VQRSHRNsu, // ...right narrow (signed to unsigned) 129 130 // Vector shift and insert: 131 VSLI, // ...left 132 VSRI, // ...right 133 134 // Vector get lane (VMOV scalar to ARM core register) 135 // (These are used for 8- and 16-bit element types only.) 136 VGETLANEu, // zero-extend vector extract element 137 VGETLANEs, // sign-extend vector extract element 138 139 // Vector move immediate and move negated immediate: 140 VMOVIMM, 141 VMVNIMM, 142 143 // Vector move f32 immediate: 144 VMOVFPIMM, 145 146 // Vector duplicate: 147 VDUP, 148 VDUPLANE, 149 150 // Vector shuffles: 151 VEXT, // extract 152 VREV64, // reverse elements within 64-bit doublewords 153 VREV32, // reverse elements within 32-bit words 154 VREV16, // reverse elements within 16-bit halfwords 155 VZIP, // zip (interleave) 156 VUZP, // unzip (deinterleave) 157 VTRN, // transpose 158 VTBL1, // 1-register shuffle with mask 159 VTBL2, // 2-register shuffle with mask 160 161 // Vector multiply long: 162 VMULLs, // ...signed 163 VMULLu, // ...unsigned 164 165 UMLAL, // 64bit Unsigned Accumulate Multiply 166 SMLAL, // 64bit Signed Accumulate Multiply 167 168 // Operands of the standard BUILD_VECTOR node are not legalized, which 169 // is fine if BUILD_VECTORs are always lowered to shuffles or other 170 // operations, but for ARM some BUILD_VECTORs are legal as-is and their 171 // operands need to be legalized. Define an ARM-specific version of 172 // BUILD_VECTOR for this purpose. 173 BUILD_VECTOR, 174 175 // Floating-point max and min: 176 FMAX, 177 FMIN, 178 VMAXNM, 179 VMINNM, 180 181 // Bit-field insert 182 BFI, 183 184 // Vector OR with immediate 185 VORRIMM, 186 // Vector AND with NOT of immediate 187 VBICIMM, 188 189 // Vector bitwise select 190 VBSL, 191 192 // Vector load N-element structure to all lanes: 193 VLD2DUP = ISD::FIRST_TARGET_MEMORY_OPCODE, 194 VLD3DUP, 195 VLD4DUP, 196 197 // NEON loads with post-increment base updates: 198 VLD1_UPD, 199 VLD2_UPD, 200 VLD3_UPD, 201 VLD4_UPD, 202 VLD2LN_UPD, 203 VLD3LN_UPD, 204 VLD4LN_UPD, 205 VLD2DUP_UPD, 206 VLD3DUP_UPD, 207 VLD4DUP_UPD, 208 209 // NEON stores with post-increment base updates: 210 VST1_UPD, 211 VST2_UPD, 212 VST3_UPD, 213 VST4_UPD, 214 VST2LN_UPD, 215 VST3LN_UPD, 216 VST4LN_UPD 217 }; 218 } 219 220 /// Define some predicates that are used for node matching. 221 namespace ARM { 222 bool isBitFieldInvertedMask(unsigned v); 223 } 224 225 //===--------------------------------------------------------------------===// 226 // ARMTargetLowering - ARM Implementation of the TargetLowering interface 227 228 class ARMTargetLowering : public TargetLowering { 229 public: 230 explicit ARMTargetLowering(const TargetMachine &TM, 231 const ARMSubtarget &STI); 232 233 unsigned getJumpTableEncoding() const override; 234 235 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; 236 237 /// ReplaceNodeResults - Replace the results of node with an illegal result 238 /// type with new values built out of custom code. 239 /// 240 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, 241 SelectionDAG &DAG) const override; 242 243 const char *getTargetNodeName(unsigned Opcode) const override; 244 isSelectSupported(SelectSupportKind Kind)245 bool isSelectSupported(SelectSupportKind Kind) const override { 246 // ARM does not support scalar condition selects on vectors. 247 return (Kind != ScalarCondVectorVal); 248 } 249 250 /// getSetCCResultType - Return the value type to use for ISD::SETCC. 251 EVT getSetCCResultType(LLVMContext &Context, EVT VT) const override; 252 253 MachineBasicBlock * 254 EmitInstrWithCustomInserter(MachineInstr *MI, 255 MachineBasicBlock *MBB) const override; 256 257 void AdjustInstrPostInstrSelection(MachineInstr *MI, 258 SDNode *Node) const override; 259 260 SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const; 261 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override; 262 263 bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override; 264 265 /// allowsMisalignedMemoryAccesses - Returns true if the target allows 266 /// unaligned memory accesses of the specified type. Returns whether it 267 /// is "fast" by reference in the second argument. 268 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, 269 unsigned Align, 270 bool *Fast) const override; 271 272 EVT getOptimalMemOpType(uint64_t Size, 273 unsigned DstAlign, unsigned SrcAlign, 274 bool IsMemset, bool ZeroMemset, 275 bool MemcpyStrSrc, 276 MachineFunction &MF) const override; 277 278 using TargetLowering::isZExtFree; 279 bool isZExtFree(SDValue Val, EVT VT2) const override; 280 281 bool isVectorLoadExtDesirable(SDValue ExtVal) const override; 282 283 bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override; 284 285 286 /// isLegalAddressingMode - Return true if the addressing mode represented 287 /// by AM is legal for this target, for a load/store of the specified type. 288 bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const override; 289 bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const; 290 291 /// isLegalICmpImmediate - Return true if the specified immediate is legal 292 /// icmp immediate, that is the target has icmp instructions which can 293 /// compare a register against the immediate without having to materialize 294 /// the immediate into a register. 295 bool isLegalICmpImmediate(int64_t Imm) const override; 296 297 /// isLegalAddImmediate - Return true if the specified immediate is legal 298 /// add immediate, that is the target has add instructions which can 299 /// add a register and the immediate without having to materialize 300 /// the immediate into a register. 301 bool isLegalAddImmediate(int64_t Imm) const override; 302 303 /// getPreIndexedAddressParts - returns true by value, base pointer and 304 /// offset pointer and addressing mode by reference if the node's address 305 /// can be legally represented as pre-indexed load / store address. 306 bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, 307 ISD::MemIndexedMode &AM, 308 SelectionDAG &DAG) const override; 309 310 /// getPostIndexedAddressParts - returns true by value, base pointer and 311 /// offset pointer and addressing mode by reference if this node can be 312 /// combined with a load / store to form a post-indexed load / store. 313 bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, 314 SDValue &Offset, ISD::MemIndexedMode &AM, 315 SelectionDAG &DAG) const override; 316 317 void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero, 318 APInt &KnownOne, 319 const SelectionDAG &DAG, 320 unsigned Depth) const override; 321 322 323 bool ExpandInlineAsm(CallInst *CI) const override; 324 325 ConstraintType 326 getConstraintType(const std::string &Constraint) const override; 327 328 /// Examine constraint string and operand type and determine a weight value. 329 /// The operand object must already have been set up with the operand type. 330 ConstraintWeight getSingleConstraintMatchWeight( 331 AsmOperandInfo &info, const char *constraint) const override; 332 333 std::pair<unsigned, const TargetRegisterClass *> 334 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 335 const std::string &Constraint, 336 MVT VT) const override; 337 338 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 339 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is 340 /// true it means one of the asm constraint of the inline asm instruction 341 /// being processed is 'm'. 342 void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 343 std::vector<SDValue> &Ops, 344 SelectionDAG &DAG) const override; 345 getInlineAsmMemConstraint(const std::string & ConstraintCode)346 unsigned getInlineAsmMemConstraint( 347 const std::string &ConstraintCode) const override { 348 // FIXME: Map different constraints differently. 349 return InlineAsm::Constraint_m; 350 } 351 getSubtarget()352 const ARMSubtarget* getSubtarget() const { 353 return Subtarget; 354 } 355 356 /// getRegClassFor - Return the register class that should be used for the 357 /// specified value type. 358 const TargetRegisterClass *getRegClassFor(MVT VT) const override; 359 360 /// Returns true if a cast between SrcAS and DestAS is a noop. isNoopAddrSpaceCast(unsigned SrcAS,unsigned DestAS)361 bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override { 362 // Addrspacecasts are always noops. 363 return true; 364 } 365 366 bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, 367 unsigned &PrefAlign) const override; 368 369 /// createFastISel - This method returns a target specific FastISel object, 370 /// or null if the target does not support "fast" ISel. 371 FastISel *createFastISel(FunctionLoweringInfo &funcInfo, 372 const TargetLibraryInfo *libInfo) const override; 373 374 Sched::Preference getSchedulingPreference(SDNode *N) const override; 375 376 bool 377 isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const override; 378 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override; 379 380 /// isFPImmLegal - Returns true if the target can instruction select the 381 /// specified FP immediate natively. If false, the legalizer will 382 /// materialize the FP immediate as a load from a constant pool. 383 bool isFPImmLegal(const APFloat &Imm, EVT VT) const override; 384 385 bool getTgtMemIntrinsic(IntrinsicInfo &Info, 386 const CallInst &I, 387 unsigned Intrinsic) const override; 388 389 /// \brief Returns true if it is beneficial to convert a load of a constant 390 /// to just the constant itself. 391 bool shouldConvertConstantLoadToIntImm(const APInt &Imm, 392 Type *Ty) const override; 393 394 /// \brief Returns true if an argument of type Ty needs to be passed in a 395 /// contiguous block of registers in calling convention CallConv. 396 bool functionArgumentNeedsConsecutiveRegisters( 397 Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override; 398 399 bool hasLoadLinkedStoreConditional() const override; 400 Instruction *makeDMB(IRBuilder<> &Builder, ARM_MB::MemBOpt Domain) const; 401 Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr, 402 AtomicOrdering Ord) const override; 403 Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val, 404 Value *Addr, AtomicOrdering Ord) const override; 405 406 Instruction* emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord, 407 bool IsStore, bool IsLoad) const override; 408 Instruction* emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord, 409 bool IsStore, bool IsLoad) const override; 410 411 bool shouldExpandAtomicLoadInIR(LoadInst *LI) const override; 412 bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override; 413 TargetLoweringBase::AtomicRMWExpansionKind 414 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; 415 416 bool useLoadStackGuardNode() const override; 417 418 bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx, 419 unsigned &Cost) const override; 420 421 protected: 422 std::pair<const TargetRegisterClass *, uint8_t> 423 findRepresentativeClass(const TargetRegisterInfo *TRI, 424 MVT VT) const override; 425 426 private: 427 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 428 /// make the right decision when generating code for different targets. 429 const ARMSubtarget *Subtarget; 430 431 const TargetRegisterInfo *RegInfo; 432 433 const InstrItineraryData *Itins; 434 435 /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created. 436 /// 437 unsigned ARMPCLabelIndex; 438 439 void addTypeForNEON(MVT VT, MVT PromotedLdStVT, MVT PromotedBitwiseVT); 440 void addDRTypeForNEON(MVT VT); 441 void addQRTypeForNEON(MVT VT); 442 std::pair<SDValue, SDValue> getARMXALUOOp(SDValue Op, SelectionDAG &DAG, SDValue &ARMcc) const; 443 444 typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector; 445 void PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG, 446 SDValue Chain, SDValue &Arg, 447 RegsToPassVector &RegsToPass, 448 CCValAssign &VA, CCValAssign &NextVA, 449 SDValue &StackPtr, 450 SmallVectorImpl<SDValue> &MemOpChains, 451 ISD::ArgFlagsTy Flags) const; 452 SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 453 SDValue &Root, SelectionDAG &DAG, 454 SDLoc dl) const; 455 456 CallingConv::ID getEffectiveCallingConv(CallingConv::ID CC, 457 bool isVarArg) const; 458 CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return, 459 bool isVarArg) const; 460 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg, 461 SDLoc dl, SelectionDAG &DAG, 462 const CCValAssign &VA, 463 ISD::ArgFlagsTy Flags) const; 464 SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const; 465 SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const; 466 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 467 const ARMSubtarget *Subtarget) const; 468 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; 469 SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const; 470 SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const; 471 SDValue LowerGlobalAddressWindows(SDValue Op, SelectionDAG &DAG) const; 472 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 473 SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 474 SelectionDAG &DAG) const; 475 SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA, 476 SelectionDAG &DAG, 477 TLSModel::Model model) const; 478 SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const; 479 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const; 480 SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) const; 481 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const; 482 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; 483 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const; 484 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const; 485 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 486 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; 487 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const; 488 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const; 489 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; 490 SDValue LowerConstantFP(SDValue Op, SelectionDAG &DAG, 491 const ARMSubtarget *ST) const; 492 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 493 const ARMSubtarget *ST) const; 494 SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const; 495 SDValue LowerDivRem(SDValue Op, SelectionDAG &DAG) const; 496 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; 497 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const; 498 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const; 499 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const; 500 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; 501 502 unsigned getRegisterByName(const char* RegName, EVT VT) const override; 503 504 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster 505 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be 506 /// expanded to FMAs when this method returns true, otherwise fmuladd is 507 /// expanded to fmul + fadd. 508 /// 509 /// ARM supports both fused and unfused multiply-add operations; we already 510 /// lower a pair of fmul and fadd to the latter so it's not clear that there 511 /// would be a gain or that the gain would be worthwhile enough to risk 512 /// correctness bugs. isFMAFasterThanFMulAndFAdd(EVT VT)513 bool isFMAFasterThanFMulAndFAdd(EVT VT) const override { return false; } 514 515 SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const; 516 517 SDValue LowerCallResult(SDValue Chain, SDValue InFlag, 518 CallingConv::ID CallConv, bool isVarArg, 519 const SmallVectorImpl<ISD::InputArg> &Ins, 520 SDLoc dl, SelectionDAG &DAG, 521 SmallVectorImpl<SDValue> &InVals, 522 bool isThisReturn, SDValue ThisVal) const; 523 524 SDValue 525 LowerFormalArguments(SDValue Chain, 526 CallingConv::ID CallConv, bool isVarArg, 527 const SmallVectorImpl<ISD::InputArg> &Ins, 528 SDLoc dl, SelectionDAG &DAG, 529 SmallVectorImpl<SDValue> &InVals) const override; 530 531 int StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, 532 SDLoc dl, SDValue &Chain, 533 const Value *OrigArg, 534 unsigned InRegsParamRecordIdx, 535 int ArgOffset, 536 unsigned ArgSize) const; 537 538 void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, 539 SDLoc dl, SDValue &Chain, 540 unsigned ArgOffset, 541 unsigned TotalArgRegsSaveSize, 542 bool ForceMutable = false) const; 543 544 SDValue 545 LowerCall(TargetLowering::CallLoweringInfo &CLI, 546 SmallVectorImpl<SDValue> &InVals) const override; 547 548 /// HandleByVal - Target-specific cleanup for ByVal support. 549 void HandleByVal(CCState *, unsigned &, unsigned) const override; 550 551 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 552 /// for tail call optimization. Targets which want to do tail call 553 /// optimization should implement this function. 554 bool IsEligibleForTailCallOptimization(SDValue Callee, 555 CallingConv::ID CalleeCC, 556 bool isVarArg, 557 bool isCalleeStructRet, 558 bool isCallerStructRet, 559 const SmallVectorImpl<ISD::OutputArg> &Outs, 560 const SmallVectorImpl<SDValue> &OutVals, 561 const SmallVectorImpl<ISD::InputArg> &Ins, 562 SelectionDAG& DAG) const; 563 564 bool CanLowerReturn(CallingConv::ID CallConv, 565 MachineFunction &MF, bool isVarArg, 566 const SmallVectorImpl<ISD::OutputArg> &Outs, 567 LLVMContext &Context) const override; 568 569 SDValue 570 LowerReturn(SDValue Chain, 571 CallingConv::ID CallConv, bool isVarArg, 572 const SmallVectorImpl<ISD::OutputArg> &Outs, 573 const SmallVectorImpl<SDValue> &OutVals, 574 SDLoc dl, SelectionDAG &DAG) const override; 575 576 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override; 577 578 bool mayBeEmittedAsTailCall(CallInst *CI) const override; 579 580 SDValue getCMOV(SDLoc dl, EVT VT, SDValue FalseVal, SDValue TrueVal, 581 SDValue ARMcc, SDValue CCR, SDValue Cmp, 582 SelectionDAG &DAG) const; 583 SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 584 SDValue &ARMcc, SelectionDAG &DAG, SDLoc dl) const; 585 SDValue getVFPCmp(SDValue LHS, SDValue RHS, 586 SelectionDAG &DAG, SDLoc dl) const; 587 SDValue duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const; 588 589 SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const; 590 591 void SetupEntryBlockForSjLj(MachineInstr *MI, 592 MachineBasicBlock *MBB, 593 MachineBasicBlock *DispatchBB, int FI) const; 594 595 MachineBasicBlock *EmitSjLjDispatchBlock(MachineInstr *MI, 596 MachineBasicBlock *MBB) const; 597 598 bool RemapAddSubWithFlags(MachineInstr *MI, MachineBasicBlock *BB) const; 599 600 MachineBasicBlock *EmitStructByval(MachineInstr *MI, 601 MachineBasicBlock *MBB) const; 602 603 MachineBasicBlock *EmitLowered__chkstk(MachineInstr *MI, 604 MachineBasicBlock *MBB) const; 605 }; 606 607 enum NEONModImmType { 608 VMOVModImm, 609 VMVNModImm, 610 OtherModImm 611 }; 612 613 namespace ARM { 614 FastISel *createFastISel(FunctionLoweringInfo &funcInfo, 615 const TargetLibraryInfo *libInfo); 616 } 617 } 618 619 #endif // ARMISELLOWERING_H 620