• Home
  • History
  • Annotate
  • Raw
  • Download

Lines Matching refs:hasSSE2

107   X86ScalarSSEf64 = Subtarget.hasSSE2();  in X86TargetLowering()
870 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) { in X86TargetLowering()
2279 if (Subtarget.hasSSE2() && (Subtarget.getPreferVectorWidth() >= 128)) in getOptimalMemOpType()
2287 Op.size() >= 8 && !Subtarget.is64Bit() && Subtarget.hasSSE2()) { in getOptimalMemOpType()
2701 } else if (!Subtarget.hasSSE2() && in LowerReturn()
2733 if (!Subtarget.hasSSE2()) in LowerReturn()
3034 } else if (!Subtarget.hasSSE2() && in LowerCallResult()
3528 else if (Subtarget.hasSSE2()) in forwardMustTailParameters()
5381 return Subtarget.hasSSE2(); in hasAndNot()
5795 if (!Subtarget.hasSSE2() && VT.is128BitVector()) { in getZeroVector()
6037 assert(Subtarget.hasSSE2() && "Target assumed to support at least SSE2"); in SplitOpsAndApply()
7974 assert(((VT == MVT::v8i16 && Subtarget.hasSSE2()) || in LowerBuildVectorAsInsert()
8544 if (!Subtarget.hasSSE2() && VT == MVT::v4f32) in EltsFromConsecutiveLoads()
9820 if (Subtarget.hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) { in materializeVectorConstant()
14159 if (!Subtarget.hasSSE2()) { in lowerV4F32Shuffle()
14203 if (!Subtarget.hasSSE2()) { in lowerV4F32Shuffle()
18781 assert(Subtarget.hasSSE2() && "SSE2 required for PINSRW"); in LowerINSERT_VECTOR_ELT()
19570 if (!Subtarget.hasSSE2() || FromVT != MVT::v4i32) in useVectorCast()
19648 if (!Subtarget.hasSSE2() || (VT != MVT::f32 && VT != MVT::f64) || in lowerFPToIntToFP()
19832 if (SrcVT == MVT::i64 && Subtarget.hasSSE2() && !Subtarget.is64Bit()) in LowerSINT_TO_FP()
20706 if (!Subtarget.hasSSE2()) in truncateVectorWithPACK()
21862 if (!Subtarget.hasSSE2() || !Op->hasOneUse()) in MatchVectorAllZeroTest()
22185 (VT == MVT::v4f32 && Subtarget.hasSSE2() && !Reciprocal) || in getSqrtEstimate()
22530 if (!Subtarget.hasSSE2()) in LowerVSETCCWithSUBUS()
22901 assert(Subtarget.hasSSE2() && "Don't know how to lower!"); in LowerVSETCC()
22966 assert(Subtarget.hasSSE2() && !FlipSigns && "Don't know how to lower!"); in LowerVSETCC()
23683 if (!(VT.is128BitVector() && Subtarget.hasSSE2()) && in LowerEXTEND_VECTOR_INREG()
23959 if (Subtarget.hasSSE2()) { in LowerStore()
27061 assert(Subtarget.hasSSE2() && !Subtarget.hasSSE41() && in LowerMUL()
27153 assert((VT == MVT::v4i32 && Subtarget.hasSSE2()) || in LowerMULH()
27395 bool LShift = (VT.is128BitVector() && Subtarget.hasSSE2()) || in SupportedVectorShiftWithImm()
27721 assert(Subtarget.hasSSE2() && "Only custom lower when we have SSE2!"); in LowerShift()
28741 assert(Subtarget.hasSSE2() && "Requires at least SSE2!"); in LowerBITCAST()
29222 MVT StVT = Subtarget.hasSSE2() ? MVT::v2i64 : MVT::v4f32; in LowerATOMIC_STORE()
29937 assert(Subtarget.hasSSE2() && "Requires at least SSE2!"); in ReplaceNodeResults()
30209 assert(Subtarget.hasSSE2() && "Requires at least SSE2!"); in ReplaceNodeResults()
30401 assert(Subtarget.hasSSE2() && "Requires at least SSE2!"); in ReplaceNodeResults()
30550 MVT LdVT = Subtarget.hasSSE2() ? MVT::v2i64 : MVT::v4f32; in ReplaceNodeResults()
30555 if (Subtarget.hasSSE2()) { in ReplaceNodeResults()
30625 assert(Subtarget.hasSSE2() && "Requires at least SSE2!"); in ReplaceNodeResults()
30644 assert(Subtarget.hasSSE2() && "Requires SSE2"); in ReplaceNodeResults()
30702 if (Subtarget.hasSSE2()) { in ReplaceNodeResults()
34493 SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT; in matchUnaryShuffle()
34499 SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT; in matchUnaryShuffle()
34543 if (((MaskEltSize == 32) || (MaskEltSize == 64 && Subtarget.hasSSE2())) && in matchUnaryShuffle()
34547 SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT; in matchUnaryShuffle()
34691 ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) || in matchUnaryPermuteShuffle()
34726 ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) || in matchUnaryPermuteShuffle()
34770 Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKL : X86ISD::MOVLHPS; in matchBinaryShuffle()
34771 SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32; in matchBinaryShuffle()
34776 Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKH : X86ISD::MOVHLPS; in matchBinaryShuffle()
34777 SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32; in matchBinaryShuffle()
34781 Subtarget.hasSSE2() && (AllowFloatDomain || !Subtarget.hasSSE41())) { in matchBinaryShuffle()
34796 if (((MaskVT == MVT::v8i16 || MaskVT == MVT::v16i8) && Subtarget.hasSSE2()) || in matchBinaryShuffle()
34808 (MaskVT.is128BitVector() && Subtarget.hasSSE2()) || in matchBinaryShuffle()
34953 ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) || in matchBinaryPermuteShuffle()
35303 bool AllowIntDomain = (!FloatDomain || (Depth >= 3)) && Subtarget.hasSSE2() && in combineX86ShuffleChain()
38766 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2()) { in combineBitcastvxi1()
38797 if (!Subtarget.hasSSE2() || (Subtarget.hasAVX512() && !PreferMovMsk)) in combineBitcastvxi1()
39360 (Subtarget.hasSSE2() && VT == MVT::f64))) in combineBitcast()
39516 if (!Subtarget.hasSSE2()) in combineHorizontalPredicateResult()
39659 if (!Subtarget.hasSSE2()) in combineBasicSADPattern()
39885 ((SrcIdx == 0 && Subtarget.hasSSE2()) || Subtarget.hasSSE41())) { in combineExtractWithShuffle()
39892 if ((SrcVT == MVT::v8i16 && Subtarget.hasSSE2()) || in combineExtractWithShuffle()
40010 if (!Subtarget.hasSSE2()) in combineReductionToHorizontal()
40691 (Subtarget.hasSSE2() || in combineSelect()
41709 (FalseOp.getValueType() == MVT::f64 && !Subtarget.hasSSE2()) || in combineCMov()
41983 if (!Subtarget.hasSSE2()) in reduceVMULWidth()
42139 if (!Subtarget.hasSSE2()) in combineMulToPMADDWD()
42191 if (!Subtarget.hasSSE2()) in combineMulToPMULDQ()
43024 if (Subtarget.hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) { in combineCompareEqual()
43288 (Subtarget.hasSSE2() && N00Type == MVT::f64))) in convertIntLogicToFPLogic()
43555 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) { in combineAnd()
43777 if (!((VT.is128BitVector() && Subtarget.hasSSE2()) || in combineLogicBlendIntoPBLENDV()
43936 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) { in combineOr()
44094 case MVT::v2i64: if (!Subtarget.hasSSE2()) return SDValue(); break; in foldVectorXorShiftIntoCmp()
44221 if (!Subtarget.hasSSE2() || !VT.isVector()) in combineTruncateWithSat()
44341 if (!Subtarget.hasSSE2()) in detectAVGPattern()
44945 if (VT.is128BitVector() && Subtarget.hasSSE2()) { in combineStore()
45061 !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2(); in combineStore()
45476 if (!Subtarget.hasSSE2() || Subtarget.hasAVX2()) in combineVectorTruncation()
45511 if (!Subtarget.hasSSE2()) in combineVectorSignBitsTruncation()
45586 if (!Subtarget.hasSSE2()) in combinePMULH()
46071 if (!VT.isVector() || !Subtarget.hasSSE2()) in lowerX86FPLogicOp()
46114 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && in combineXor()
46190 (VT == MVT::f64 && Subtarget.hasSSE2()) || in combineFAndFNotToFAndn()
46191 (VT == MVT::v4f32 && Subtarget.hasSSE1() && !Subtarget.hasSSE2()))) in combineFAndFNotToFAndn()
46294 (Subtarget.hasSSE2() && VT == MVT::f64) || in combineFMinNumFMaxNum()
46746 if (!Subtarget.hasSSE2() || Subtarget.hasAVX512()) in combineToExtendBoolVectorInReg()
47171 if ((OpSize == 128 && Subtarget.hasSSE2()) || in combineVectorSizedSetCCEquality()
47348 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32 && in combineSetCC()
47378 if (Subtarget.hasSSE2() && Src.getOpcode() == ISD::BITCAST && in combineMOVMSK()
48228 if (!Subtarget.hasSSE2()) in matchPMADDWD()
48325 if (!Subtarget.hasSSE2()) in matchPMADDWD_2()
48524 if (!(Subtarget.hasSSE2() && (EltVT == MVT::i8 || EltVT == MVT::i16)) && in combineSubToSubus()
50322 if (!Subtarget.hasSSE2()) in getSingleConstraintMatchWeight()