• Home
  • History
  • Annotate
  • Raw
  • Download

Lines Matching refs:hasVLX

692     addRegisterClass(MVT::f128, Subtarget.hasVLX() ? &X86::VR128XRegClass  in X86TargetLowering()
848 addRegisterClass(MVT::v4f32, Subtarget.hasVLX() ? &X86::VR128XRegClass in X86TargetLowering()
871 addRegisterClass(MVT::v2f64, Subtarget.hasVLX() ? &X86::VR128XRegClass in X86TargetLowering()
876 addRegisterClass(MVT::v16i8, Subtarget.hasVLX() ? &X86::VR128XRegClass in X86TargetLowering()
878 addRegisterClass(MVT::v8i16, Subtarget.hasVLX() ? &X86::VR128XRegClass in X86TargetLowering()
880 addRegisterClass(MVT::v4i32, Subtarget.hasVLX() ? &X86::VR128XRegClass in X86TargetLowering()
882 addRegisterClass(MVT::v2i64, Subtarget.hasVLX() ? &X86::VR128XRegClass in X86TargetLowering()
1051 !(Subtarget.hasBWI() && Subtarget.hasVLX())) in X86TargetLowering()
1168 addRegisterClass(MVT::v32i8, Subtarget.hasVLX() ? &X86::VR256XRegClass in X86TargetLowering()
1170 addRegisterClass(MVT::v16i16, Subtarget.hasVLX() ? &X86::VR256XRegClass in X86TargetLowering()
1172 addRegisterClass(MVT::v8i32, Subtarget.hasVLX() ? &X86::VR256XRegClass in X86TargetLowering()
1174 addRegisterClass(MVT::v8f32, Subtarget.hasVLX() ? &X86::VR256XRegClass in X86TargetLowering()
1176 addRegisterClass(MVT::v4i64, Subtarget.hasVLX() ? &X86::VR256XRegClass in X86TargetLowering()
1178 addRegisterClass(MVT::v4f64, Subtarget.hasVLX() ? &X86::VR256XRegClass in X86TargetLowering()
1358 setOperationAction(ISD::MLOAD, VT, Subtarget.hasVLX() ? Legal : Custom); in X86TargetLowering()
1543 if (!Subtarget.hasVLX()) { in X86TargetLowering()
1735 Subtarget.hasVLX() ? Legal : Custom); in X86TargetLowering()
1737 Subtarget.hasVLX() ? Legal : Custom); in X86TargetLowering()
1740 Subtarget.hasVLX() ? Legal : Custom); in X86TargetLowering()
1742 Subtarget.hasVLX() ? Legal : Custom); in X86TargetLowering()
1745 Subtarget.hasVLX() ? Legal : Custom); in X86TargetLowering()
1747 Subtarget.hasVLX() ? Legal : Custom); in X86TargetLowering()
1749 Subtarget.hasVLX() ? Legal : Custom); in X86TargetLowering()
1751 Subtarget.hasVLX() ? Legal : Custom); in X86TargetLowering()
1790 Subtarget.hasVLX() ? Legal : Custom); in X86TargetLowering()
1792 Subtarget.hasVLX() ? Legal : Custom); in X86TargetLowering()
1794 Subtarget.hasVLX() ? Legal : Custom); in X86TargetLowering()
1796 Subtarget.hasVLX() ? Legal : Custom); in X86TargetLowering()
1798 Subtarget.hasVLX() ? Legal : Custom); in X86TargetLowering()
1800 Subtarget.hasVLX() ? Legal : Custom); in X86TargetLowering()
1802 Subtarget.hasVLX() ? Legal : Custom); in X86TargetLowering()
1804 Subtarget.hasVLX() ? Legal : Custom); in X86TargetLowering()
1858 setOperationAction(ISD::MLOAD, VT, Subtarget.hasVLX() ? Legal : Custom); in X86TargetLowering()
1859 setOperationAction(ISD::MSTORE, VT, Subtarget.hasVLX() ? Legal : Custom); in X86TargetLowering()
1872 if (!Subtarget.useSoftFloat() && Subtarget.hasVLX()) { in X86TargetLowering()
2197 if (LegalVT.getSimpleVT().isVector() && Subtarget.hasVLX()) { in getSetCCResultType()
3652 RC = Subtarget.hasVLX() ? &X86::VR256XRegClass : &X86::VR256RegClass; in LowerFormalArguments()
3654 RC = Subtarget.hasVLX() ? &X86::VR128XRegClass : &X86::VR128RegClass; in LowerFormalArguments()
8732 if (!VT.is512BitVector() && !Subtarget.hasVLX()) { in lowerBuildVectorAsBroadcast()
8886 (Subtarget.hasVLX() && ScalarSize == 64)) { in lowerBuildVectorAsBroadcast()
9903 if (Subtarget.hasVLX() && Subtarget.hasBWI()) in createVariablePermute()
9938 if (Subtarget.hasVLX() && Subtarget.hasVBMI()) in createVariablePermute()
9972 if (Subtarget.hasVLX() && Subtarget.hasBWI()) in createVariablePermute()
10010 if (!Subtarget.hasVLX()) { in createVariablePermute()
11346 if (!VT.is512BitVector() && !Subtarget.hasVLX()) in matchShuffleAsVTRUNC()
11412 if (!Subtarget.hasVLX() && !SrcVT.is512BitVector()) { in getAVX512TruncNode()
11680 if (NumStages != 1 && SizeBits == 128 && Subtarget.hasVLX()) in lowerShuffleWithPACK()
11927 if (Subtarget.hasBWI() && Subtarget.hasVLX()) { in lowerShuffleAsBlend()
11935 if (Subtarget.hasVLX()) in lowerShuffleAsBlend()
12545 assert((Subtarget.hasVLX() || (!VT.is128BitVector() && !VT.is256BitVector())) in lowerShuffleAsVALIGN()
14003 if (Subtarget.hasVLX()) in lowerV2I64Shuffle()
14297 if (Subtarget.hasVLX()) in lowerV4I32Shuffle()
15032 !Subtarget.hasVLX()) { in lowerV8I16Shuffle()
15079 if (!VT.is512BitVector() && !Subtarget.hasVLX()) { in lowerShuffleWithPERMV()
15900 if (Subtarget.hasVLX()) { in lowerV2X128Shuffle()
16708 if (Subtarget.hasVLX()) in lowerV4F64Shuffle()
16776 if (Subtarget.hasVLX()) { in lowerV4I64Shuffle()
16898 if (Subtarget.hasVLX()) in lowerV8F32Shuffle()
16981 if (Subtarget.hasVLX()) { in lowerV8I32Shuffle()
17254 if (Subtarget.hasVLX()) in lowerV32I8Shuffle()
18076 ExtVT = Subtarget.hasVLX() ? MVT::v8i32 : MVT::v8i64; in lower1BitShuffle()
19456 if (!Subtarget.hasVLX() && !VT.is512BitVector()) { in LowerFunnelShift()
19470 if (!Subtarget.hasVLX() && !VT.is512BitVector()) in LowerFunnelShift()
19475 if (!Subtarget.hasVLX() && !VT.is512BitVector()) in LowerFunnelShift()
19544 unsigned NumElts = Subtarget.hasVLX() ? 4 : 8; in LowerI64IntToFP_AVX512DQ()
19687 assert(!Subtarget.hasVLX() && "Unexpected features"); in lowerINT_TO_FP_vXi64()
20038 if (!Subtarget.hasVLX()) { in lowerUINT_TO_FP_v2i32()
20090 assert(!Subtarget.hasVLX() && "Unexpected features"); in lowerUINT_TO_FP_vXi32()
20653 if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) { in LowerZERO_EXTEND_Mask()
20865 MVT EltVT = Subtarget.hasVLX() ? MVT::i32 : MVT::getIntegerVT(512/NumElts); in LowerTruncateVecI1()
20898 assert((InVT == MVT::v16i64 || Subtarget.hasVLX()) && in LowerTRUNCATE()
21057 if (!IsSigned && !Subtarget.hasVLX()) { in LowerFP_TO_INT()
21099 assert(Subtarget.useAVX512Regs() && !Subtarget.hasVLX() && in LowerFP_TO_INT()
21132 !Subtarget.hasVLX() && "Unexpected features!"); in LowerFP_TO_INT()
21160 if (!Subtarget.hasVLX()) { in LowerFP_TO_INT()
21179 assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL"); in LowerFP_TO_INT()
22606 (!IsStrict || Subtarget.hasVLX() || in LowerVSETCC()
23619 if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) { in LowerSIGN_EXTEND_Mask()
27931 (Subtarget.hasBWI() && Subtarget.hasVLX() && VT == MVT::v16i8)) { in LowerShift()
29431 if (Index.getValueType() == MVT::v2i64 && Subtarget.hasVLX()) { in LowerMSCATTER()
29452 if (!Subtarget.hasVLX() && !VT.is512BitVector() && in LowerMSCATTER()
29507 assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() && in LowerMLOAD()
29555 assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() && in LowerMSTORE()
29604 if (Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() && in LowerMGATHER()
30045 if ((InBits == 256 && Subtarget.hasVLX()) || InBits == 512) { in ReplaceNodeResults()
30057 if (Subtarget.hasVLX() && InVT == MVT::v8i64 && VT == MVT::v8i8 && in ReplaceNodeResults()
30220 if (!IsSigned && !Subtarget.hasVLX()) { in ReplaceNodeResults()
30269 unsigned NumElts = Subtarget.hasVLX() ? 2 : 8; in ReplaceNodeResults()
30328 if (Subtarget.hasDQI() && Subtarget.hasVLX() && SrcVT == MVT::v2i64) { in ReplaceNodeResults()
30660 (Subtarget.hasVLX() || !Subtarget.hasAVX512())) { in ReplaceNodeResults()
30673 if (!Subtarget.hasVLX()) { in ReplaceNodeResults()
34874 ((MaskVT.is128BitVector() && Subtarget.hasVLX()) || in matchBinaryPermuteShuffle()
34875 (MaskVT.is256BitVector() && Subtarget.hasVLX()) || in matchBinaryPermuteShuffle()
35090 if (RootSizeInBits == 512 || (Subtarget.hasVLX() && RootSizeInBits >= 128)) { in combineX86ShuffleChain()
35479 (RootVT.is128BitVector() && Subtarget.hasVLX())) && in combineX86ShuffleChain()
42802 if (Subtarget.hasVLX()) in combineVectorPack()
43532 !(Subtarget.hasVLX() || SetccVT.is512BitVector())) in combineScalarAndWithMaskSetcc()
43691 Subtarget.hasVLX(); in canonicalizeBitSelect()
43809 if (Subtarget.hasVLX()) in combineLogicBlendIntoPBLENDV()
44252 (Subtarget.hasVLX() || InVT.getSizeInBits() > 256) && in combineTruncateWithSat()
44298 if (!Subtarget.hasVLX() && !InVT.is512BitVector()) { in combineTruncateWithSat()
47180 bool NeedZExt = PreferKOT && !Subtarget.hasVLX() && OpSize != 512; in combineVectorSizedSetCCEquality()
49292 if (InOpcode == ISD::UINT_TO_FP && Subtarget.hasVLX() && in combineExtractSubvector()
49327 if (InOpcode == ISD::TRUNCATE && Subtarget.hasVLX() && in combineExtractSubvector()
50722 if (VConstraint && Subtarget.hasVLX()) in getRegForInlineAsmConstraint()
50727 if (VConstraint && Subtarget.hasVLX()) in getRegForInlineAsmConstraint()
50732 if (VConstraint && Subtarget.hasVLX()) in getRegForInlineAsmConstraint()
50745 if (VConstraint && Subtarget.hasVLX()) in getRegForInlineAsmConstraint()
50755 if (VConstraint && Subtarget.hasVLX()) in getRegForInlineAsmConstraint()