Lines Matching refs:MVT
140 bool isTypeLegal(Type *Ty, MVT &VT);
141 bool isTypeSupported(Type *Ty, MVT &VT, bool IsVectorAllowed = false);
145 bool simplifyAddress(Address &Addr, MVT VT);
154 bool optimizeIntExtLoad(const Instruction *I, MVT RetVT, MVT SrcVT);
159 unsigned emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS,
162 unsigned emitAddSub_rr(bool UseAdd, MVT RetVT, unsigned LHSReg,
165 unsigned emitAddSub_ri(bool UseAdd, MVT RetVT, unsigned LHSReg,
168 unsigned emitAddSub_rs(bool UseAdd, MVT RetVT, unsigned LHSReg,
173 unsigned emitAddSub_rx(bool UseAdd, MVT RetVT, unsigned LHSReg,
182 bool emitICmp(MVT RetVT, const Value *LHS, const Value *RHS, bool IsZExt);
183 bool emitICmp_ri(MVT RetVT, unsigned LHSReg, bool LHSIsKill, uint64_t Imm);
184 bool emitFCmp(MVT RetVT, const Value *LHS, const Value *RHS);
185 unsigned emitLoad(MVT VT, MVT ResultVT, Address Addr, bool WantZExt = true,
187 bool emitStore(MVT VT, unsigned SrcReg, Address Addr,
189 unsigned emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt);
190 unsigned emiti1Ext(unsigned SrcReg, MVT DestVT, bool isZExt);
191 unsigned emitAdd(MVT RetVT, const Value *LHS, const Value *RHS,
194 unsigned emitAdd_ri_(MVT VT, unsigned Op0, bool Op0IsKill, int64_t Imm);
195 unsigned emitSub(MVT RetVT, const Value *LHS, const Value *RHS,
198 unsigned emitSubs_rr(MVT RetVT, unsigned LHSReg, bool LHSIsKill,
200 unsigned emitSubs_rs(MVT RetVT, unsigned LHSReg, bool LHSIsKill,
204 unsigned emitLogicalOp(unsigned ISDOpc, MVT RetVT, const Value *LHS,
206 unsigned emitLogicalOp_ri(unsigned ISDOpc, MVT RetVT, unsigned LHSReg,
208 unsigned emitLogicalOp_rs(unsigned ISDOpc, MVT RetVT, unsigned LHSReg,
211 unsigned emitAnd_ri(MVT RetVT, unsigned LHSReg, bool LHSIsKill, uint64_t Imm);
212 unsigned emitMul_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
214 unsigned emitSMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
216 unsigned emitUMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
218 unsigned emitLSL_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill,
220 unsigned emitLSL_ri(MVT RetVT, MVT SrcVT, unsigned Op0Reg, bool Op0IsKill,
222 unsigned emitLSR_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill,
224 unsigned emitLSR_ri(MVT RetVT, MVT SrcVT, unsigned Op0Reg, bool Op0IsKill,
226 unsigned emitASR_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill,
228 unsigned emitASR_ri(MVT RetVT, MVT SrcVT, unsigned Op0Reg, bool Op0IsKill,
231 unsigned materializeInt(const ConstantInt *CI, MVT VT);
232 unsigned materializeFP(const ConstantFP *CFP, MVT VT);
238 bool processCallArgs(CallLoweringInfo &CLI, SmallVectorImpl<MVT> &ArgVTs,
240 bool finishCall(CallLoweringInfo &CLI, MVT RetVT, unsigned NumBytes);
286 static unsigned getImplicitScaleFactor(MVT VT) { in getImplicitScaleFactor()
290 case MVT::i1: // fall-through in getImplicitScaleFactor()
291 case MVT::i8: in getImplicitScaleFactor()
293 case MVT::i16: in getImplicitScaleFactor()
295 case MVT::i32: // fall-through in getImplicitScaleFactor()
296 case MVT::f32: in getImplicitScaleFactor()
298 case MVT::i64: // fall-through in getImplicitScaleFactor()
299 case MVT::f64: in getImplicitScaleFactor()
313 assert(TLI.getValueType(DL, AI->getType(), true) == MVT::i64 && in fastMaterializeAlloca()
336 unsigned AArch64FastISel::materializeInt(const ConstantInt *CI, MVT VT) { in materializeInt()
337 if (VT > MVT::i64) in materializeInt()
344 const TargetRegisterClass *RC = (VT == MVT::i64) ? &AArch64::GPR64RegClass in materializeInt()
346 unsigned ZeroReg = (VT == MVT::i64) ? AArch64::XZR : AArch64::WZR; in materializeInt()
353 unsigned AArch64FastISel::materializeFP(const ConstantFP *CFP, MVT VT) { in materializeFP()
359 if (VT != MVT::f32 && VT != MVT::f64) in materializeFP()
363 bool Is64Bit = (VT == MVT::f64); in materializeFP()
467 MVT VT = CEVT.getSimpleVT(); in fastMaterializeConstant()
482 MVT VT; in fastMaterializeFloatZero()
486 if (VT != MVT::f32 && VT != MVT::f64) in fastMaterializeFloatZero()
489 bool Is64Bit = (VT == MVT::f64); in fastMaterializeFloatZero()
704 Reg = fastEmitInst_extractsubreg(MVT::i32, Reg, RegIsKill, in computeAddress()
802 Reg = fastEmitInst_extractsubreg(MVT::i32, Reg, RegIsKill, in computeAddress()
909 bool AArch64FastISel::isTypeLegal(Type *Ty, MVT &VT) { in isTypeLegal()
913 if (evt == MVT::Other || !evt.isSimple()) in isTypeLegal()
918 if (VT == MVT::f128) in isTypeLegal()
930 bool AArch64FastISel::isTypeSupported(Type *Ty, MVT &VT, bool IsVectorAllowed) { in isTypeSupported()
939 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) in isTypeSupported()
956 bool AArch64FastISel::simplifyAddress(Address &Addr, MVT VT) { in simplifyAddress()
1000 ResultReg = emitAddSub_rx(/*UseAdd=*/true, MVT::i64, Addr.getReg(), in simplifyAddress()
1005 ResultReg = emitAddSub_rs(/*UseAdd=*/true, MVT::i64, Addr.getReg(), in simplifyAddress()
1011 ResultReg = emitLSL_ri(MVT::i64, MVT::i32, Addr.getOffsetReg(), in simplifyAddress()
1015 ResultReg = emitLSL_ri(MVT::i64, MVT::i32, Addr.getOffsetReg(), in simplifyAddress()
1019 ResultReg = emitLSL_ri(MVT::i64, MVT::i64, Addr.getOffsetReg(), in simplifyAddress()
1037 ResultReg = emitAdd_ri_(MVT::i64, Addr.getReg(), /*IsKill=*/false, Offset); in simplifyAddress()
1039 ResultReg = fastEmit_i(MVT::i64, MVT::i64, ISD::Constant, Offset); in simplifyAddress()
1089 unsigned AArch64FastISel::emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS, in emitAddSub()
1097 case MVT::i1: in emitAddSub()
1100 case MVT::i8: in emitAddSub()
1104 case MVT::i16: in emitAddSub()
1108 case MVT::i32: // fall-through in emitAddSub()
1109 case MVT::i64: in emitAddSub()
1112 MVT SrcVT = RetVT; in emitAddSub()
1113 RetVT.SimpleTy = std::max(RetVT.SimpleTy, MVT::i32); in emitAddSub()
1243 unsigned AArch64FastISel::emitAddSub_rr(bool UseAdd, MVT RetVT, unsigned LHSReg, in emitAddSub_rr()
1249 if (RetVT != MVT::i32 && RetVT != MVT::i64) in emitAddSub_rr()
1258 bool Is64Bit = RetVT == MVT::i64; in emitAddSub_rr()
1277 unsigned AArch64FastISel::emitAddSub_ri(bool UseAdd, MVT RetVT, unsigned LHSReg, in emitAddSub_ri()
1282 if (RetVT != MVT::i32 && RetVT != MVT::i64) in emitAddSub_ri()
1300 bool Is64Bit = RetVT == MVT::i64; in emitAddSub_ri()
1322 unsigned AArch64FastISel::emitAddSub_rs(bool UseAdd, MVT RetVT, unsigned LHSReg, in emitAddSub_rs()
1330 if (RetVT != MVT::i32 && RetVT != MVT::i64) in emitAddSub_rs()
1343 bool Is64Bit = RetVT == MVT::i64; in emitAddSub_rs()
1363 unsigned AArch64FastISel::emitAddSub_rx(bool UseAdd, MVT RetVT, unsigned LHSReg, in emitAddSub_rx()
1371 if (RetVT != MVT::i32 && RetVT != MVT::i64) in emitAddSub_rx()
1383 bool Is64Bit = RetVT == MVT::i64; in emitAddSub_rx()
1411 MVT VT = EVT.getSimpleVT(); in emitCmp()
1416 case MVT::i1: in emitCmp()
1417 case MVT::i8: in emitCmp()
1418 case MVT::i16: in emitCmp()
1419 case MVT::i32: in emitCmp()
1420 case MVT::i64: in emitCmp()
1422 case MVT::f32: in emitCmp()
1423 case MVT::f64: in emitCmp()
1428 bool AArch64FastISel::emitICmp(MVT RetVT, const Value *LHS, const Value *RHS, in emitICmp()
1434 bool AArch64FastISel::emitICmp_ri(MVT RetVT, unsigned LHSReg, bool LHSIsKill, in emitICmp_ri()
1440 bool AArch64FastISel::emitFCmp(MVT RetVT, const Value *LHS, const Value *RHS) { in emitFCmp()
1441 if (RetVT != MVT::f32 && RetVT != MVT::f64) in emitFCmp()
1457 unsigned Opc = (RetVT == MVT::f64) ? AArch64::FCMPDri : AArch64::FCMPSri; in emitFCmp()
1468 unsigned Opc = (RetVT == MVT::f64) ? AArch64::FCMPDrr : AArch64::FCMPSrr; in emitFCmp()
1475 unsigned AArch64FastISel::emitAdd(MVT RetVT, const Value *LHS, const Value *RHS, in emitAdd()
1486 unsigned AArch64FastISel::emitAdd_ri_(MVT VT, unsigned Op0, bool Op0IsKill, in emitAdd_ri_()
1505 unsigned AArch64FastISel::emitSub(MVT RetVT, const Value *LHS, const Value *RHS, in emitSub()
1511 unsigned AArch64FastISel::emitSubs_rr(MVT RetVT, unsigned LHSReg, in emitSubs_rr()
1518 unsigned AArch64FastISel::emitSubs_rs(MVT RetVT, unsigned LHSReg, in emitSubs_rs()
1528 unsigned AArch64FastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT, in emitLogicalOp()
1603 MVT VT = std::max(MVT::i32, RetVT.SimpleTy); in emitLogicalOp()
1605 if (RetVT >= MVT::i8 && RetVT <= MVT::i16) { in emitLogicalOp()
1606 uint64_t Mask = (RetVT == MVT::i8) ? 0xff : 0xffff; in emitLogicalOp()
1607 ResultReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask); in emitLogicalOp()
1612 unsigned AArch64FastISel::emitLogicalOp_ri(unsigned ISDOpc, MVT RetVT, in emitLogicalOp_ri()
1628 case MVT::i1: in emitLogicalOp_ri()
1629 case MVT::i8: in emitLogicalOp_ri()
1630 case MVT::i16: in emitLogicalOp_ri()
1631 case MVT::i32: { in emitLogicalOp_ri()
1638 case MVT::i64: in emitLogicalOp_ri()
1651 if (RetVT >= MVT::i8 && RetVT <= MVT::i16 && ISDOpc != ISD::AND) { in emitLogicalOp_ri()
1652 uint64_t Mask = (RetVT == MVT::i8) ? 0xff : 0xffff; in emitLogicalOp_ri()
1653 ResultReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask); in emitLogicalOp_ri()
1658 unsigned AArch64FastISel::emitLogicalOp_rs(unsigned ISDOpc, MVT RetVT, in emitLogicalOp_rs()
1679 case MVT::i1: in emitLogicalOp_rs()
1680 case MVT::i8: in emitLogicalOp_rs()
1681 case MVT::i16: in emitLogicalOp_rs()
1682 case MVT::i32: in emitLogicalOp_rs()
1686 case MVT::i64: in emitLogicalOp_rs()
1694 if (RetVT >= MVT::i8 && RetVT <= MVT::i16) { in emitLogicalOp_rs()
1695 uint64_t Mask = (RetVT == MVT::i8) ? 0xff : 0xffff; in emitLogicalOp_rs()
1696 ResultReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask); in emitLogicalOp_rs()
1701 unsigned AArch64FastISel::emitAnd_ri(MVT RetVT, unsigned LHSReg, bool LHSIsKill, in emitAnd_ri()
1706 unsigned AArch64FastISel::emitLoad(MVT VT, MVT RetVT, Address Addr, in emitLoad()
1782 bool IsRet64Bit = RetVT == MVT::i64; in emitLoad()
1786 case MVT::i1: // Intentional fall-through. in emitLoad()
1787 case MVT::i8: in emitLoad()
1792 case MVT::i16: in emitLoad()
1797 case MVT::i32: in emitLoad()
1802 case MVT::i64: in emitLoad()
1806 case MVT::f32: in emitLoad()
1810 case MVT::f64: in emitLoad()
1823 if (VT == MVT::i1) { in emitLoad()
1824 unsigned ANDReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, 1); in emitLoad()
1831 if (WantZExt && RetVT == MVT::i64 && VT <= MVT::i32) { in emitLoad()
1844 MVT VT; in selectAddSub()
1870 MVT VT; in selectLogicalOp()
1899 MVT VT; in selectLoad()
1914 MVT RetVT = VT; in selectLoad()
1955 if (RetVT == MVT::i64 && VT <= MVT::i32) { in selectLoad()
1961 ResultReg = fastEmitInst_extractsubreg(MVT::i32, ResultReg, in selectLoad()
1993 bool AArch64FastISel::emitStore(MVT VT, unsigned SrcReg, Address Addr, in emitStore()
2036 case MVT::i1: VTIsi1 = true; in emitStore()
2037 case MVT::i8: Opc = OpcTable[Idx][0]; break; in emitStore()
2038 case MVT::i16: Opc = OpcTable[Idx][1]; break; in emitStore()
2039 case MVT::i32: Opc = OpcTable[Idx][2]; break; in emitStore()
2040 case MVT::i64: Opc = OpcTable[Idx][3]; break; in emitStore()
2041 case MVT::f32: Opc = OpcTable[Idx][4]; break; in emitStore()
2042 case MVT::f64: Opc = OpcTable[Idx][5]; break; in emitStore()
2047 unsigned ANDReg = emitAnd_ri(MVT::i32, SrcReg, /*TODO:IsKill=*/false, 1); in emitStore()
2062 MVT VT; in selectStore()
2076 SrcReg = (VT == MVT::i64) ? AArch64::XZR : AArch64::WZR; in selectStore()
2079 VT = MVT::getIntegerVT(VT.getSizeInBits()); in selectStore()
2080 SrcReg = (VT == MVT::i64) ? AArch64::XZR : AArch64::WZR; in selectStore()
2155 MVT VT; in emitCompareAndBranch()
2201 if (VT == MVT::i1) in emitCompareAndBranch()
2248 SrcReg = fastEmitInst_extractsubreg(MVT::i32, SrcReg, SrcIsKill, in emitCompareAndBranch()
2252 SrcReg = emitIntExt(VT, SrcReg, MVT::i32, /*IsZExt=*/true); in emitCompareAndBranch()
2435 ResultReg = fastEmit_i(MVT::i32, MVT::i32, ISD::Constant, 1); in selectCmp()
2547 Src1Reg = emitLogicalOp_ri(ISD::XOR, MVT::i32, Src1Reg, Src1IsKill, 1); in optimizeSelect()
2558 MVT VT; in selectSelect()
2567 case MVT::i1: in selectSelect()
2568 case MVT::i8: in selectSelect()
2569 case MVT::i16: in selectSelect()
2570 case MVT::i32: in selectSelect()
2574 case MVT::i64: in selectSelect()
2578 case MVT::f32: in selectSelect()
2582 case MVT::f64: in selectSelect()
2720 MVT DestVT; in selectFPToInt()
2729 if (SrcVT == MVT::f128) in selectFPToInt()
2733 if (SrcVT == MVT::f64) { in selectFPToInt()
2735 Opc = (DestVT == MVT::i32) ? AArch64::FCVTZSUWDr : AArch64::FCVTZSUXDr; in selectFPToInt()
2737 Opc = (DestVT == MVT::i32) ? AArch64::FCVTZUUWDr : AArch64::FCVTZUUXDr; in selectFPToInt()
2740 Opc = (DestVT == MVT::i32) ? AArch64::FCVTZSUWSr : AArch64::FCVTZSUXSr; in selectFPToInt()
2742 Opc = (DestVT == MVT::i32) ? AArch64::FCVTZUUWSr : AArch64::FCVTZUUXSr; in selectFPToInt()
2745 DestVT == MVT::i32 ? &AArch64::GPR32RegClass : &AArch64::GPR64RegClass); in selectFPToInt()
2753 MVT DestVT; in selectIntToFP()
2756 assert ((DestVT == MVT::f32 || DestVT == MVT::f64) && in selectIntToFP()
2767 if (SrcVT == MVT::i16 || SrcVT == MVT::i8 || SrcVT == MVT::i1) { in selectIntToFP()
2769 emitIntExt(SrcVT.getSimpleVT(), SrcReg, MVT::i32, /*isZExt*/ !Signed); in selectIntToFP()
2776 if (SrcVT == MVT::i64) { in selectIntToFP()
2778 Opc = (DestVT == MVT::f32) ? AArch64::SCVTFUXSri : AArch64::SCVTFUXDri; in selectIntToFP()
2780 Opc = (DestVT == MVT::f32) ? AArch64::UCVTFUXSri : AArch64::UCVTFUXDri; in selectIntToFP()
2783 Opc = (DestVT == MVT::f32) ? AArch64::SCVTFUWSri : AArch64::SCVTFUWDri; in selectIntToFP()
2785 Opc = (DestVT == MVT::f32) ? AArch64::UCVTFUWSri : AArch64::UCVTFUWDri; in selectIntToFP()
2827 MVT VT = ArgVT.getSimpleVT().SimpleTy; in fastLowerArguments()
2835 if (VT >= MVT::i1 && VT <= MVT::i64) in fastLowerArguments()
2837 else if ((VT >= MVT::f16 && VT <= MVT::f64) || VT.is64BitVector() || in fastLowerArguments()
2865 MVT VT = TLI.getSimpleValueType(DL, Arg.getType()); in fastLowerArguments()
2868 if (VT >= MVT::i1 && VT <= MVT::i32) { in fastLowerArguments()
2871 VT = MVT::i32; in fastLowerArguments()
2872 } else if (VT == MVT::i64) { in fastLowerArguments()
2875 } else if (VT == MVT::f16) { in fastLowerArguments()
2878 } else if (VT == MVT::f32) { in fastLowerArguments()
2881 } else if ((VT == MVT::f64) || VT.is64BitVector()) { in fastLowerArguments()
2904 SmallVectorImpl<MVT> &OutVTs, in processCallArgs()
2922 MVT ArgVT = OutVTs[VA.getValNo()]; in processCallArgs()
2933 MVT DestVT = VA.getLocVT(); in processCallArgs()
2934 MVT SrcVT = ArgVT; in processCallArgs()
2943 MVT DestVT = VA.getLocVT(); in processCallArgs()
2944 MVT SrcVT = ArgVT; in processCallArgs()
2993 bool AArch64FastISel::finishCall(CallLoweringInfo &CLI, MVT RetVT, in finishCall()
3003 if (RetVT != MVT::isVoid) { in finishCall()
3013 MVT CopyVT = RVLocs[0].getValVT(); in finishCall()
3060 MVT RetVT; in fastLowerCall()
3062 RetVT = MVT::isVoid; in fastLowerCall()
3071 SmallVector<MVT, 16> OutVTs; in fastLowerCall()
3075 MVT VT; in fastLowerCall()
3077 !(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) in fastLowerCall()
3169 MVT VT; in tryEmitSmallMemCpy()
3172 VT = MVT::i64; in tryEmitSmallMemCpy()
3174 VT = MVT::i32; in tryEmitSmallMemCpy()
3176 VT = MVT::i16; in tryEmitSmallMemCpy()
3178 VT = MVT::i8; in tryEmitSmallMemCpy()
3183 VT = MVT::i32; in tryEmitSmallMemCpy()
3185 VT = MVT::i16; in tryEmitSmallMemCpy()
3187 VT = MVT::i8; in tryEmitSmallMemCpy()
3223 MVT RetVT; in foldXALUIntrinsic()
3230 if (RetVT != MVT::i32 && RetVT != MVT::i64) in foldXALUIntrinsic()
3387 MVT RetVT; in fastLowerIntrinsicCall()
3391 if (RetVT != MVT::f32 && RetVT != MVT::f64) in fastLowerIntrinsicCall()
3400 bool Is64Bit = RetVT == MVT::f64; in fastLowerIntrinsicCall()
3436 MVT VT; in fastLowerIntrinsicCall()
3444 case MVT::f32: in fastLowerIntrinsicCall()
3447 case MVT::f64: in fastLowerIntrinsicCall()
3469 MVT VT; in fastLowerIntrinsicCall()
3496 MVT VT; in fastLowerIntrinsicCall()
3500 if (VT != MVT::i32 && VT != MVT::i64) in fastLowerIntrinsicCall()
3563 if (VT == MVT::i32) { in fastLowerIntrinsicCall()
3564 MulReg = emitSMULL_rr(MVT::i64, LHSReg, LHSIsKill, RHSReg, RHSIsKill); in fastLowerIntrinsicCall()
3565 unsigned ShiftReg = emitLSR_ri(MVT::i64, MVT::i64, MulReg, in fastLowerIntrinsicCall()
3574 assert(VT == MVT::i64 && "Unexpected value type."); in fastLowerIntrinsicCall()
3598 if (VT == MVT::i32) { in fastLowerIntrinsicCall()
3599 MulReg = emitUMULL_rr(MVT::i64, LHSReg, LHSIsKill, RHSReg, RHSIsKill); in fastLowerIntrinsicCall()
3600 emitSubs_rs(MVT::i64, AArch64::XZR, /*IsKill=*/true, MulReg, in fastLowerIntrinsicCall()
3606 assert(VT == MVT::i64 && "Unexpected value type."); in fastLowerIntrinsicCall()
3702 MVT RVVT = RVEVT.getSimpleVT(); in selectRet()
3703 if (RVVT == MVT::f128) in selectRet()
3706 MVT DestVT = VA.getValVT(); in selectRet()
3709 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16) in selectRet()
3748 MVT SrcVT = SrcEVT.getSimpleVT(); in selectTrunc()
3749 MVT DestVT = DestEVT.getSimpleVT(); in selectTrunc()
3751 if (SrcVT != MVT::i64 && SrcVT != MVT::i32 && SrcVT != MVT::i16 && in selectTrunc()
3752 SrcVT != MVT::i8) in selectTrunc()
3754 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8 && in selectTrunc()
3755 DestVT != MVT::i1) in selectTrunc()
3769 if (SrcVT == MVT::i64) { in selectTrunc()
3775 case MVT::i1: in selectTrunc()
3778 case MVT::i8: in selectTrunc()
3781 case MVT::i16: in selectTrunc()
3786 unsigned Reg32 = fastEmitInst_extractsubreg(MVT::i32, SrcReg, SrcIsKill, in selectTrunc()
3789 ResultReg = emitAnd_ri(MVT::i32, Reg32, /*IsKill=*/true, Mask); in selectTrunc()
3802 unsigned AArch64FastISel::emiti1Ext(unsigned SrcReg, MVT DestVT, bool IsZExt) { in emiti1Ext()
3803 assert((DestVT == MVT::i8 || DestVT == MVT::i16 || DestVT == MVT::i32 || in emiti1Ext()
3804 DestVT == MVT::i64) && in emiti1Ext()
3807 if (DestVT == MVT::i8 || DestVT == MVT::i16) in emiti1Ext()
3808 DestVT = MVT::i32; in emiti1Ext()
3811 unsigned ResultReg = emitAnd_ri(MVT::i32, SrcReg, /*TODO:IsKill=*/false, 1); in emiti1Ext()
3813 if (DestVT == MVT::i64) { in emiti1Ext()
3826 if (DestVT == MVT::i64) { in emiti1Ext()
3835 unsigned AArch64FastISel::emitMul_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, in emitMul_rr()
3840 case MVT::i8: in emitMul_rr()
3841 case MVT::i16: in emitMul_rr()
3842 case MVT::i32: in emitMul_rr()
3843 RetVT = MVT::i32; in emitMul_rr()
3845 case MVT::i64: in emitMul_rr()
3850 (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass; in emitMul_rr()
3855 unsigned AArch64FastISel::emitSMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, in emitSMULL_rr()
3857 if (RetVT != MVT::i64) in emitSMULL_rr()
3865 unsigned AArch64FastISel::emitUMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, in emitUMULL_rr()
3867 if (RetVT != MVT::i64) in emitUMULL_rr()
3875 unsigned AArch64FastISel::emitLSL_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill, in emitLSL_rr()
3882 case MVT::i8: Opc = AArch64::LSLVWr; NeedTrunc = true; Mask = 0xff; break; in emitLSL_rr()
3883 case MVT::i16: Opc = AArch64::LSLVWr; NeedTrunc = true; Mask = 0xffff; break; in emitLSL_rr()
3884 case MVT::i32: Opc = AArch64::LSLVWr; break; in emitLSL_rr()
3885 case MVT::i64: Opc = AArch64::LSLVXr; break; in emitLSL_rr()
3889 (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass; in emitLSL_rr()
3891 Op1Reg = emitAnd_ri(MVT::i32, Op1Reg, Op1IsKill, Mask); in emitLSL_rr()
3897 ResultReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask); in emitLSL_rr()
3901 unsigned AArch64FastISel::emitLSL_ri(MVT RetVT, MVT SrcVT, unsigned Op0, in emitLSL_ri()
3906 assert((SrcVT == MVT::i1 || SrcVT == MVT::i8 || SrcVT == MVT::i16 || in emitLSL_ri()
3907 SrcVT == MVT::i32 || SrcVT == MVT::i64) && in emitLSL_ri()
3909 assert((RetVT == MVT::i8 || RetVT == MVT::i16 || RetVT == MVT::i32 || in emitLSL_ri()
3910 RetVT == MVT::i64) && "Unexpected return value type."); in emitLSL_ri()
3912 bool Is64Bit = (RetVT == MVT::i64); in emitLSL_ri()
3968 if (SrcVT.SimpleTy <= MVT::i32 && RetVT == MVT::i64) { in emitLSL_ri()
3981 unsigned AArch64FastISel::emitLSR_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill, in emitLSR_rr()
3988 case MVT::i8: Opc = AArch64::LSRVWr; NeedTrunc = true; Mask = 0xff; break; in emitLSR_rr()
3989 case MVT::i16: Opc = AArch64::LSRVWr; NeedTrunc = true; Mask = 0xffff; break; in emitLSR_rr()
3990 case MVT::i32: Opc = AArch64::LSRVWr; break; in emitLSR_rr()
3991 case MVT::i64: Opc = AArch64::LSRVXr; break; in emitLSR_rr()
3995 (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass; in emitLSR_rr()
3997 Op0Reg = emitAnd_ri(MVT::i32, Op0Reg, Op0IsKill, Mask); in emitLSR_rr()
3998 Op1Reg = emitAnd_ri(MVT::i32, Op1Reg, Op1IsKill, Mask); in emitLSR_rr()
4004 ResultReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask); in emitLSR_rr()
4008 unsigned AArch64FastISel::emitLSR_ri(MVT RetVT, MVT SrcVT, unsigned Op0, in emitLSR_ri()
4013 assert((SrcVT == MVT::i1 || SrcVT == MVT::i8 || SrcVT == MVT::i16 || in emitLSR_ri()
4014 SrcVT == MVT::i32 || SrcVT == MVT::i64) && in emitLSR_ri()
4016 assert((RetVT == MVT::i8 || RetVT == MVT::i16 || RetVT == MVT::i32 || in emitLSR_ri()
4017 RetVT == MVT::i64) && "Unexpected return value type."); in emitLSR_ri()
4019 bool Is64Bit = (RetVT == MVT::i64); in emitLSR_ri()
4089 if (SrcVT.SimpleTy <= MVT::i32 && RetVT == MVT::i64) { in emitLSR_ri()
4102 unsigned AArch64FastISel::emitASR_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill, in emitASR_rr()
4109 case MVT::i8: Opc = AArch64::ASRVWr; NeedTrunc = true; Mask = 0xff; break; in emitASR_rr()
4110 case MVT::i16: Opc = AArch64::ASRVWr; NeedTrunc = true; Mask = 0xffff; break; in emitASR_rr()
4111 case MVT::i32: Opc = AArch64::ASRVWr; break; in emitASR_rr()
4112 case MVT::i64: Opc = AArch64::ASRVXr; break; in emitASR_rr()
4116 (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass; in emitASR_rr()
4118 Op0Reg = emitIntExt(RetVT, Op0Reg, MVT::i32, /*IsZExt=*/false); in emitASR_rr()
4119 Op1Reg = emitAnd_ri(MVT::i32, Op1Reg, Op1IsKill, Mask); in emitASR_rr()
4125 ResultReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask); in emitASR_rr()
4129 unsigned AArch64FastISel::emitASR_ri(MVT RetVT, MVT SrcVT, unsigned Op0, in emitASR_ri()
4134 assert((SrcVT == MVT::i1 || SrcVT == MVT::i8 || SrcVT == MVT::i16 || in emitASR_ri()
4135 SrcVT == MVT::i32 || SrcVT == MVT::i64) && in emitASR_ri()
4137 assert((RetVT == MVT::i8 || RetVT == MVT::i16 || RetVT == MVT::i32 || in emitASR_ri()
4138 RetVT == MVT::i64) && "Unexpected return value type."); in emitASR_ri()
4140 bool Is64Bit = (RetVT == MVT::i64); in emitASR_ri()
4198 if (SrcVT.SimpleTy <= MVT::i32 && RetVT == MVT::i64) { in emitASR_ri()
4211 unsigned AArch64FastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, in emitIntExt()
4213 assert(DestVT != MVT::i1 && "ZeroExt/SignExt an i1?"); in emitIntExt()
4219 if (((DestVT != MVT::i8) && (DestVT != MVT::i16) && in emitIntExt()
4220 (DestVT != MVT::i32) && (DestVT != MVT::i64)) || in emitIntExt()
4221 ((SrcVT != MVT::i1) && (SrcVT != MVT::i8) && in emitIntExt()
4222 (SrcVT != MVT::i16) && (SrcVT != MVT::i32))) in emitIntExt()
4231 case MVT::i1: in emitIntExt()
4233 case MVT::i8: in emitIntExt()
4234 if (DestVT == MVT::i64) in emitIntExt()
4240 case MVT::i16: in emitIntExt()
4241 if (DestVT == MVT::i64) in emitIntExt()
4247 case MVT::i32: in emitIntExt()
4248 assert(DestVT == MVT::i64 && "IntExt i32 to i32?!?"); in emitIntExt()
4255 if (DestVT == MVT::i8 || DestVT == MVT::i16) in emitIntExt()
4256 DestVT = MVT::i32; in emitIntExt()
4257 else if (DestVT == MVT::i64) { in emitIntExt()
4268 (DestVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass; in emitIntExt()
4320 bool AArch64FastISel::optimizeIntExtLoad(const Instruction *I, MVT RetVT, in optimizeIntExtLoad()
4321 MVT SrcVT) { in optimizeIntExtLoad()
4349 if (RetVT != MVT::i64 || SrcVT > MVT::i32) { in optimizeIntExtLoad()
4376 MVT RetVT; in selectIntExt()
4377 MVT SrcVT; in selectIntExt()
4397 if (RetVT == MVT::i64 && SrcVT != MVT::i64) { in selectIntExt()
4432 MVT DestVT = DestEVT.getSimpleVT(); in selectRem()
4433 if (DestVT != MVT::i64 && DestVT != MVT::i32) in selectRem()
4437 bool Is64bit = (DestVT == MVT::i64); in selectRem()
4460 (DestVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass; in selectRem()
4474 MVT VT; in selectMul()
4491 MVT SrcVT = VT; in selectMul()
4495 MVT VT; in selectMul()
4504 MVT VT; in selectMul()
4547 MVT RetVT; in selectShift()
4557 MVT SrcVT = RetVT; in selectShift()
4562 MVT TmpVT; in selectShift()
4571 MVT TmpVT; in selectShift()
4636 MVT RetVT, SrcVT; in selectBitCast()
4644 if (RetVT == MVT::f32 && SrcVT == MVT::i32) in selectBitCast()
4646 else if (RetVT == MVT::f64 && SrcVT == MVT::i64) in selectBitCast()
4648 else if (RetVT == MVT::i32 && SrcVT == MVT::f32) in selectBitCast()
4650 else if (RetVT == MVT::i64 && SrcVT == MVT::f64) in selectBitCast()
4658 case MVT::i32: RC = &AArch64::GPR32RegClass; break; in selectBitCast()
4659 case MVT::i64: RC = &AArch64::GPR64RegClass; break; in selectBitCast()
4660 case MVT::f32: RC = &AArch64::FPR32RegClass; break; in selectBitCast()
4661 case MVT::f64: RC = &AArch64::FPR64RegClass; break; in selectBitCast()
4677 MVT RetVT; in selectFRem()
4685 case MVT::f32: in selectFRem()
4688 case MVT::f64: in selectFRem()
4715 MVT VT; in selectSDiv()
4723 if ((VT != MVT::i32 && VT != MVT::i64) || !C || in selectSDiv()
4752 if (VT == MVT::i64) { in selectSDiv()
4767 unsigned ZeroReg = (VT == MVT::i64) ? AArch64::XZR : AArch64::WZR; in selectSDiv()
4794 MVT PtrVT = TLI.getPointerTy(DL); in getRegForGEPIndex()
4818 MVT VT = TLI.getPointerTy(DL); in selectGetElementPtr()