Lines Matching refs:hasAVX2

5405   if (Subtarget.hasAVX2())  in shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd()
6045 } else if (Subtarget.hasAVX2()) { in SplitOpsAndApply()
8566 if (!Subtarget.hasAVX2() && ScalarSize < 32) in EltsFromConsecutiveLoads()
8763 (SplatBitSize < 32 && Subtarget.hasAVX2())) { in lowerBuildVectorAsBroadcast()
8840 if (ConstSplatVal && (Subtarget.hasAVX2() || OptForSize)) { in lowerBuildVectorAsBroadcast()
8849 (OptForSize && (ScalarSize == 64 || Subtarget.hasAVX2()))) { in lowerBuildVectorAsBroadcast()
9622 ((VT == MVT::v16i16 || VT == MVT::v8i32) && Subtarget.hasAVX2())) { in LowerToHorizontalOp()
9985 if (Subtarget.hasAVX2()) in createVariablePermute()
10344 if (Subtarget.hasAVX2() && EVTBits == 32 && Values.size() == 2) { in LowerBUILD_VECTOR()
11133 (Subtarget.hasAVX2() && VT.is256BitVector()) || in lowerShuffleWithPSHUFB()
11873 assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!"); in lowerShuffleAsBlend()
11888 assert(Subtarget.hasAVX2() && "v16i16 blends require AVX2!"); in lowerShuffleAsBlend()
11917 assert(Subtarget.hasAVX2() && "256-bit byte-blends require AVX2!"); in lowerShuffleAsBlend()
12119 (VT.is256BitVector() && !Subtarget.hasAVX2()) || in lowerShuffleAsByteRotateAndPermute()
13312 assert(Subtarget.hasAVX2() && in lowerShuffleAsTruncBroadcast()
13442 (Subtarget.hasAVX2() && VT.isInteger()))) in lowerShuffleAsBroadcast()
13448 unsigned Opcode = (VT == MVT::v2f64 && !Subtarget.hasAVX2()) in lowerShuffleAsBroadcast()
13451 bool BroadcastFromReg = (Opcode == X86ISD::MOVDDUP) || Subtarget.hasAVX2(); in lowerShuffleAsBroadcast()
13887 if (Subtarget.hasAVX2()) in lowerV2F64Shuffle()
13967 if (Subtarget.hasAVX2()) in lowerV2I64Shuffle()
14172 if (Subtarget.hasAVX2()) in lowerV4F32Shuffle()
14263 if (Subtarget.hasAVX2()) in lowerV4I32Shuffle()
15684 bool CanUseSublanes = Subtarget.hasAVX2() && V2.isUndef(); in lowerShuffleAsLanePermuteAndPermute()
15801 if (!Subtarget.hasAVX2()) { in lowerShuffleAsLanePermuteAndShuffle()
15848 if (Subtarget.hasAVX2() && V2.isUndef()) in lowerV2X128Shuffle()
16269 if (Subtarget.hasAVX2()) { in lowerShuffleWithUndefHalf()
16300 if (Subtarget.hasAVX2() && EltWidth == 64) in lowerShuffleWithUndefHalf()
16342 if (Subtarget.hasAVX2()) { in lowerShuffleAsRepeatedMaskAndLanePermute()
16395 int SubLaneScale = Subtarget.hasAVX2() && VT.is256BitVector() ? 2 : 1; in lowerShuffleAsRepeatedMaskAndLanePermute()
16640 if (Subtarget.hasAVX2()) in lowerV4F64Shuffle()
16701 if (!(Subtarget.hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) || in lowerV4F64Shuffle()
16715 if (Subtarget.hasAVX2()) in lowerV4F64Shuffle()
16735 assert(Subtarget.hasAVX2() && "We can only lower v4i64 with AVX2!"); in lowerV4I64Shuffle()
16882 if (Subtarget.hasAVX2()) { in lowerV8F32Shuffle()
16912 if (Subtarget.hasAVX2()) in lowerV8F32Shuffle()
16932 assert(Subtarget.hasAVX2() && "We can only lower v8i32 with AVX2!"); in lowerV8I32Shuffle()
17047 assert(Subtarget.hasAVX2() && "We can only lower v16i16 with AVX2!"); in lowerV16I16Shuffle()
17162 assert(Subtarget.hasAVX2() && "We can only lower v32i8 with AVX2!"); in lowerV32I8Shuffle()
17294 if (VT.isInteger() && !Subtarget.hasAVX2()) { in lower256BitShuffle()
18400 if (Subtarget.hasAVX2()) in LowerVSELECT()
18733 (Subtarget.hasAVX2() && EltVT == MVT::i32)) { in LowerINSERT_VECTOR_ELT()
27690 if (VT == MVT::v8i16 && !Subtarget.hasAVX2()) { in convertShiftLeftToScale()
28255 if (VT.is256BitVector() && !Subtarget.hasAVX2()) in LowerRotate()
28261 Subtarget.hasAVX2())) && in LowerRotate()
28343 if (IsSplatAmt || LegalVarShifts || (Subtarget.hasAVX2() && !ConstantAmt)) { in LowerRotate()
29584 assert(Subtarget.hasAVX2() && in LowerMGATHER()
31186 if (Subtarget.hasAVX2() && (Bits == 32 || Bits == 64)) in isVectorShiftByScalarCheap()
31404 if (!Subtarget.hasAVX2()) in isVectorClearMaskLegal()
34638 if (Subtarget.hasAVX2() && MaskVT.is256BitVector()) { in matchUnaryPermuteShuffle()
34692 (MaskVT.is256BitVector() && Subtarget.hasAVX2()) || in matchUnaryPermuteShuffle()
34727 (MaskVT.is256BitVector() && Subtarget.hasAVX2()) || in matchUnaryPermuteShuffle()
34810 (MaskVT.is256BitVector() && Subtarget.hasAVX2()) || in matchBinaryShuffle()
34815 if (MaskVT.is256BitVector() && !Subtarget.hasAVX2()) in matchBinaryShuffle()
34893 (MaskVT.is256BitVector() && Subtarget.hasAVX2()) || in matchBinaryPermuteShuffle()
34907 (MaskVT == MVT::v16i16 && Subtarget.hasAVX2())) { in matchBinaryPermuteShuffle()
35084 (RootVT.is256BitVector() && !Subtarget.hasAVX2()); in combineX86ShuffleChain()
35222 !(Subtarget.hasAVX2() && isUndefOrInRange(BaseMask, 0, 2)) && in combineX86ShuffleChain()
35304 (!MaskVT.is256BitVector() || Subtarget.hasAVX2()); in combineX86ShuffleChain()
35314 if ((Subtarget.hasAVX2() || in combineX86ShuffleChain()
35327 if (Subtarget.hasAVX2()) { in combineX86ShuffleChain()
35513 if (Subtarget.hasAVX2() && in combineX86ShuffleChain()
35671 (RootVT.is256BitVector() && Subtarget.hasAVX2()) || in combineX86ShuffleChain()
36741 assert(Subtarget.hasAVX2() && "Expected AVX2"); in combineTargetShuffle()
37411 if (!Subtarget.hasAVX2() || !isa<ShuffleVectorSDNode>(N)) in combineShuffleOfConcatUndef()
40502 if (VT == MVT::v32i8 && !Subtarget.hasAVX2()) in combineVSelectToBLENDV()
41046 (Subtarget.hasAVX2() && EltBitWidth == 64) || in combineSelect()
44098 case MVT::v4i64: if (!Subtarget.hasAVX2()) return SDValue(); break; in foldVectorXorShiftIntoCmp()
45306 if (!IsIdentityPostShuffle && !Subtarget.hasAVX2() && VT.isFloatingPoint() && in isHorizontalBinOp()
45476 if (!Subtarget.hasSSE2() || Subtarget.hasAVX2()) in combineVectorTruncation()
46791 } else if (Subtarget.hasAVX2() && NumElts < EltSizeInBits && in combineToExtendBoolVectorInReg()
48741 (Subtarget.hasAVX2() || MayFoldLoad(Op0.getOperand(0)))) in combineConcatVectorOps()
48749 (Subtarget.hasAVX2() || in combineConcatVectorOps()
49170 if (Subtarget.hasAVX() && !Subtarget.hasAVX2() && in combineExtractSubvector()