Lines Matching refs:hasSSE41
1078 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) { in X86TargetLowering()
2338 return (Align < 16 || !Subtarget.hasSSE41()); in allowsMisalignedMemoryAccesses()
7975 ((VT == MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41())) && in LowerBuildVectorAsInsert()
8014 if (NumNonZero > 8 && !Subtarget.hasSSE41()) in LowerBuildVectorv16i8()
8018 if (Subtarget.hasSSE41()) in LowerBuildVectorv16i8()
8080 if (NumNonZero > 4 && !Subtarget.hasSSE41()) in LowerBuildVectorv8i16()
8176 if (!Subtarget.hasSSE41()) in LowerBuildVectorv4x32()
9927 } else if (Subtarget.hasSSE41()) { in createVariablePermute()
10457 if (Subtarget.hasSSE41()) { in LowerBUILD_VECTOR()
11243 if ((Subtarget.hasSSE41() || VT == MVT::v2i64 || VT == MVT::v2f64) && in matchShuffleWithUNPCK()
11619 if (Subtarget.hasSSE41() || BitSize == 8) { in matchShuffleWithPACK()
11687 (PackOpcode == X86ISD::PACKSS || Subtarget.hasSSE41())) in lowerShuffleWithPACK()
11884 assert(Subtarget.hasSSE41() && "128-bit blends require SSE41!"); in lowerShuffleAsBlend()
11920 assert(Subtarget.hasSSE41() && "128-bit byte-blends require SSE41!"); in lowerShuffleAsBlend()
12927 if (Subtarget.hasSSE41()) { in lowerShuffleAsSpecificZeroOrAnyExtend()
13915 if (Subtarget.hasSSE41()) in lowerV2F64Shuffle()
13990 bool IsBlendSupported = Subtarget.hasSSE41(); in lowerV2I64Shuffle()
14186 if (Subtarget.hasSSE41()) { in lowerV4F32Shuffle()
14280 bool IsBlendSupported = Subtarget.hasSSE41(); in lowerV4I32Shuffle()
14989 bool IsBlendSupported = Subtarget.hasSSE41(); in lowerV8I16Shuffle()
15031 if ((NumEvenDrops == 1 || NumEvenDrops == 2) && Subtarget.hasSSE41() && in lowerV8I16Shuffle()
15318 if (Subtarget.hasSSE41()) in lowerV16I8Shuffle()
18353 if (!Subtarget.hasSSE41()) in LowerVSELECT()
18589 !(Subtarget.hasSSE41() && MayFoldIntoStore(Op))) in LowerEXTRACT_VECTOR_ELT()
18599 if (Subtarget.hasSSE41()) in LowerEXTRACT_VECTOR_ELT()
18713 if ((IsZeroElt || IsAllOnesElt) && Subtarget.hasSSE41() && in LowerINSERT_VECTOR_ELT()
18778 if (VT == MVT::v8i16 || (VT == MVT::v16i8 && Subtarget.hasSSE41())) { in LowerINSERT_VECTOR_ELT()
18785 assert(Subtarget.hasSSE41() && "SSE41 required for PINSRB"); in LowerINSERT_VECTOR_ELT()
18795 if (Subtarget.hasSSE41()) { in LowerINSERT_VECTOR_ELT()
20185 if (Subtarget.hasSSE41()) { in lowerUINT_TO_FP_vXi32()
20736 (Opcode == X86ISD::PACKSS || Subtarget.hasSSE41())) { in truncateVectorWithPACK()
20938 unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8; in LowerTRUNCATE()
21834 bool UsePTEST = Subtarget.hasSSE41(); in LowerVectorAllZero()
22963 if (Opc == X86ISD::PCMPEQ && !Subtarget.hasSSE41()) { in LowerVSETCC()
24526 if (Subtarget.hasSSE41()) in getTargetVShiftNode()
24538 } else if (Subtarget.hasSSE41() && in getTargetVShiftNode()
26953 if ((VT == MVT::v2i64 || VT == MVT::v4i64) && Subtarget.hasSSE41()) { in LowerABS()
27061 assert(Subtarget.hasSSE2() && !Subtarget.hasSSE41() && in LowerMUL()
27182 (IsSigned && Subtarget.hasSSE41()) ? X86ISD::PMULDQ : X86ISD::PMULUDQ; in LowerMULH()
27203 if (IsSigned && !Subtarget.hasSSE41()) { in LowerMULH()
27256 if (IsSigned && VT == MVT::v16i8 && Subtarget.hasSSE41()) { in LowerMULH()
27298 } else if (IsSigned && VT == MVT::v16i8 && Subtarget.hasSSE41()) { in LowerMULH()
27696 if (Subtarget.hasSSE41()) in convertShiftLeftToScale()
27808 (VT == MVT::v4i32 || Subtarget.hasSSE41() || Opc != ISD::SHL || in LowerShift()
27849 ((Subtarget.hasSSE41() && !Subtarget.hasXOP() && in LowerShift()
27913 if (Subtarget.hasSSE41()) { in LowerShift()
28009 } else if (Subtarget.hasSSE41()) { in LowerShift()
28122 bool UseSSE41 = Subtarget.hasSSE41() && in LowerShift()
28280 if (Subtarget.hasSSE41()) { in LowerRotate()
30087 if (!Subtarget.hasSSE41() && VT == MVT::v4i64 && in ReplaceNodeResults()
30343 Subtarget.hasSSE41() && !Subtarget.hasAVX512()) { in ReplaceNodeResults()
34506 if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE41()) || in matchUnaryShuffle()
34781 Subtarget.hasSSE2() && (AllowFloatDomain || !Subtarget.hasSSE41())) { in matchBinaryShuffle()
34788 (AllowFloatDomain || !Subtarget.hasSSE41())) { in matchBinaryShuffle()
34905 if ((NumMaskElts <= 8 && ((Subtarget.hasSSE41() && MaskVT.is128BitVector()) || in matchBinaryPermuteShuffle()
34943 if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() && in matchBinaryPermuteShuffle()
35015 if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() && in matchBinaryPermuteShuffle()
35368 Subtarget.hasSSE41() && in combineX86ShuffleChain()
39441 if (!Subtarget.hasSSE41()) in combineHorizontalMinMaxResult()
39556 if (BinOp == ISD::AND && !Subtarget.hasSSE41() && in combineHorizontalPredicateResult()
39885 ((SrcIdx == 0 && Subtarget.hasSSE2()) || Subtarget.hasSSE41())) { in combineExtractWithShuffle()
39893 (SrcVT == MVT::v16i8 && Subtarget.hasSSE41())) { in combineExtractWithShuffle()
40034 if (Subtarget.hasSSE41()) { in combineReductionToHorizontal()
40499 if (VT.is128BitVector() && !Subtarget.hasSSE41()) in combineVSelectToBLENDV()
41584 if (IsAllOf && Subtarget.hasSSE41()) { in combineSetCCMOVMSK()
41991 if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow())) in reduceVMULWidth()
42166 if (!Subtarget.hasSSE41() && in combineMulToPMADDWD()
42207 if (Subtarget.hasSSE41() && DAG.ComputeNumSignBits(N0) > 32 && in combineMulToPMULDQ()
42390 if (!Subtarget.hasSSE41()) in combineShiftToPMULH()
43805 if (!Subtarget.hasSSE41()) in combineLogicBlendIntoPBLENDV()
44273 } else if (SVT == MVT::i8 || Subtarget.hasSSE41()) in combineTruncateWithSat()
45496 if (Subtarget.hasSSE41() || OutSVT == MVT::i8) in combineVectorTruncation()
45547 unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8; in combineVectorSignBitsTruncation()
47174 bool HasPT = Subtarget.hasSSE41(); in combineVectorSizedSetCCEquality()
49493 (Opcode == ISD::ZERO_EXTEND_VECTOR_INREG && Subtarget.hasSSE41())) { in combineEXTEND_VECTOR_INREG()