1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt -O2 -S -mattr=avx < %s | FileCheck %s 3; RUN: opt -passes='default<O2>' -S -mattr=avx < %s | FileCheck %s 4 5target triple = "x86_64--" 6target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" 7 8define i32 @ext_ext_or_reduction_v4i32(<4 x i32> %x, <4 x i32> %y) { 9; CHECK-LABEL: @ext_ext_or_reduction_v4i32( 10; CHECK-NEXT: [[Z:%.*]] = and <4 x i32> [[Y:%.*]], [[X:%.*]] 11; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[Z]]) 12; CHECK-NEXT: ret i32 [[TMP1]] 13; 14 %z = and <4 x i32> %x, %y 15 %z0 = extractelement <4 x i32> %z, i32 0 16 %z1 = extractelement <4 x i32> %z, i32 1 17 %z01 = or i32 %z0, %z1 18 %z2 = extractelement <4 x i32> %z, i32 2 19 %z012 = or i32 %z01, %z2 20 %z3 = extractelement <4 x i32> %z, i32 3 21 %z0123 = or i32 %z3, %z012 22 ret i32 %z0123 23} 24 25define i32 @ext_ext_partial_add_reduction_v4i32(<4 x i32> %x) { 26; CHECK-LABEL: @ext_ext_partial_add_reduction_v4i32( 27; CHECK-NEXT: [[SHIFT:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef> 28; CHECK-NEXT: [[TMP1:%.*]] = add <4 x i32> [[SHIFT]], [[X]] 29; CHECK-NEXT: [[SHIFT1:%.*]] = shufflevector <4 x i32> [[X]], <4 x i32> undef, <4 x i32> <i32 2, i32 undef, i32 undef, i32 undef> 30; CHECK-NEXT: [[TMP2:%.*]] = add <4 x i32> [[TMP1]], [[SHIFT1]] 31; CHECK-NEXT: [[X210:%.*]] = extractelement <4 x i32> [[TMP2]], i64 0 32; CHECK-NEXT: ret i32 [[X210]] 33; 34 %x0 = extractelement <4 x i32> %x, i32 0 35 %x1 = extractelement <4 x i32> %x, i32 1 36 %x10 = add i32 %x1, %x0 37 %x2 = extractelement <4 x i32> %x, i32 2 38 %x210 = add i32 %x2, %x10 39 ret i32 %x210 40} 41 42define i32 @ext_ext_partial_add_reduction_and_extra_add_v4i32(<4 x i32> %x, <4 x i32> %y) { 43; CHECK-LABEL: @ext_ext_partial_add_reduction_and_extra_add_v4i32( 44; CHECK-NEXT: [[SHIFT:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> undef, <4 x i32> <i32 2, i32 undef, i32 undef, i32 undef> 45; CHECK-NEXT: [[TMP1:%.*]] = add <4 x i32> [[SHIFT]], [[Y:%.*]] 46; CHECK-NEXT: [[SHIFT1:%.*]] = shufflevector <4 x i32> [[Y]], <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef> 47; CHECK-NEXT: [[TMP2:%.*]] = add <4 x i32> [[TMP1]], [[SHIFT1]] 48; CHECK-NEXT: [[SHIFT2:%.*]] = shufflevector <4 x i32> [[Y]], <4 x i32> undef, <4 x i32> <i32 2, i32 undef, i32 undef, i32 undef> 49; CHECK-NEXT: [[TMP3:%.*]] = add <4 x i32> [[TMP2]], [[SHIFT2]] 50; CHECK-NEXT: [[X2Y210:%.*]] = extractelement <4 x i32> [[TMP3]], i32 0 51; CHECK-NEXT: ret i32 [[X2Y210]] 52; 53 %y0 = extractelement <4 x i32> %y, i32 0 54 %y1 = extractelement <4 x i32> %y, i32 1 55 %y10 = add i32 %y1, %y0 56 %y2 = extractelement <4 x i32> %y, i32 2 57 %y210 = add i32 %y2, %y10 58 %x2 = extractelement <4 x i32> %x, i32 2 59 %x2y210 = add i32 %x2, %y210 60 ret i32 %x2y210 61} 62 63; PR43953 - https://bugs.llvm.org/show_bug.cgi?id=43953 64; We want to end up with a single reduction on the next 4 tests. 65 66define i32 @TestVectorsEqual(i32* noalias %Vec0, i32* noalias %Vec1, i32 %Tolerance) { 67; CHECK-LABEL: @TestVectorsEqual( 68; CHECK-NEXT: entry: 69; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[VEC0:%.*]] to <4 x i32>* 70; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4 71; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32* [[VEC1:%.*]] to <4 x i32>* 72; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* [[TMP2]], align 4 73; CHECK-NEXT: [[TMP4:%.*]] = sub nsw <4 x i32> [[TMP1]], [[TMP3]] 74; CHECK-NEXT: [[TMP5:%.*]] = icmp slt <4 x i32> [[TMP4]], zeroinitializer 75; CHECK-NEXT: [[TMP6:%.*]] = sub nsw <4 x i32> zeroinitializer, [[TMP4]] 76; CHECK-NEXT: [[TMP7:%.*]] = select <4 x i1> [[TMP5]], <4 x i32> [[TMP6]], <4 x i32> [[TMP4]] 77; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP7]]) 78; CHECK-NEXT: [[CMP5_NOT:%.*]] = icmp sle i32 [[TMP8]], [[TOLERANCE:%.*]] 79; CHECK-NEXT: [[COND6:%.*]] = zext i1 [[CMP5_NOT]] to i32 80; CHECK-NEXT: ret i32 [[COND6]] 81; 82entry: 83 br label %for.cond 84 85for.cond: 86 %sum.0 = phi i32 [ 0, %entry ], [ %add, %for.inc ] 87 %Component.0 = phi i32 [ 0, %entry ], [ %inc, %for.inc ] 88 %cmp = icmp slt i32 %Component.0, 4 89 br i1 %cmp, label %for.body, label %for.cond.cleanup 90 91for.cond.cleanup: 92 br label %for.end 93 94for.body: 95 %idxprom = sext i32 %Component.0 to i64 96 %arrayidx = getelementptr inbounds i32, i32* %Vec0, i64 %idxprom 97 %0 = load i32, i32* %arrayidx, align 4 98 %idxprom1 = sext i32 %Component.0 to i64 99 %arrayidx2 = getelementptr inbounds i32, i32* %Vec1, i64 %idxprom1 100 %1 = load i32, i32* %arrayidx2, align 4 101 %sub = sub nsw i32 %0, %1 102 %cmp3 = icmp sge i32 %sub, 0 103 br i1 %cmp3, label %cond.true, label %cond.false 104 105cond.true: 106 br label %cond.end 107 108cond.false: 109 %sub4 = sub nsw i32 0, %sub 110 br label %cond.end 111 112cond.end: 113 %cond = phi i32 [ %sub, %cond.true ], [ %sub4, %cond.false ] 114 %add = add nsw i32 %sum.0, %cond 115 br label %for.inc 116 117for.inc: 118 %inc = add nsw i32 %Component.0, 1 119 br label %for.cond 120 121for.end: 122 %cmp5 = icmp sle i32 %sum.0, %Tolerance 123 %2 = zext i1 %cmp5 to i64 124 %cond6 = select i1 %cmp5, i32 1, i32 0 125 ret i32 %cond6 126} 127 128define i32 @TestVectorsEqual_alt(i32* noalias %Vec0, i32* noalias %Vec1, i32 %Tolerance) { 129; CHECK-LABEL: @TestVectorsEqual_alt( 130; CHECK-NEXT: entry: 131; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[VEC0:%.*]] to <4 x i32>* 132; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4 133; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32* [[VEC1:%.*]] to <4 x i32>* 134; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* [[TMP2]], align 4 135; CHECK-NEXT: [[TMP4:%.*]] = sub <4 x i32> [[TMP1]], [[TMP3]] 136; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP4]]) 137; CHECK-NEXT: [[CMP3_NOT:%.*]] = icmp ule i32 [[TMP5]], [[TOLERANCE:%.*]] 138; CHECK-NEXT: [[COND:%.*]] = zext i1 [[CMP3_NOT]] to i32 139; CHECK-NEXT: ret i32 [[COND]] 140; 141entry: 142 br label %for.cond 143 144for.cond: 145 %sum.0 = phi i32 [ 0, %entry ], [ %add, %for.inc ] 146 %Component.0 = phi i32 [ 0, %entry ], [ %inc, %for.inc ] 147 %cmp = icmp slt i32 %Component.0, 4 148 br i1 %cmp, label %for.body, label %for.cond.cleanup 149 150for.cond.cleanup: 151 br label %for.end 152 153for.body: 154 %idxprom = sext i32 %Component.0 to i64 155 %arrayidx = getelementptr inbounds i32, i32* %Vec0, i64 %idxprom 156 %0 = load i32, i32* %arrayidx, align 4 157 %idxprom1 = sext i32 %Component.0 to i64 158 %arrayidx2 = getelementptr inbounds i32, i32* %Vec1, i64 %idxprom1 159 %1 = load i32, i32* %arrayidx2, align 4 160 %sub = sub i32 %0, %1 161 %add = add i32 %sum.0, %sub 162 br label %for.inc 163 164for.inc: 165 %inc = add nsw i32 %Component.0, 1 166 br label %for.cond 167 168for.end: 169 %cmp3 = icmp ule i32 %sum.0, %Tolerance 170 %2 = zext i1 %cmp3 to i64 171 %cond = select i1 %cmp3, i32 1, i32 0 172 ret i32 %cond 173} 174 175define i32 @TestVectorsEqualFP(float* noalias %Vec0, float* noalias %Vec1, float %Tolerance) { 176; CHECK-LABEL: @TestVectorsEqualFP( 177; CHECK-NEXT: entry: 178; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[VEC0:%.*]] to <4 x float>* 179; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4 180; CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[VEC1:%.*]] to <4 x float>* 181; CHECK-NEXT: [[TMP3:%.*]] = load <4 x float>, <4 x float>* [[TMP2]], align 4 182; CHECK-NEXT: [[TMP4:%.*]] = fsub fast <4 x float> [[TMP1]], [[TMP3]] 183; CHECK-NEXT: [[TMP5:%.*]] = call fast <4 x float> @llvm.fabs.v4f32(<4 x float> [[TMP4]]) 184; CHECK-NEXT: [[TMP6:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP5]]) 185; CHECK-NEXT: [[CMP4:%.*]] = fcmp fast ole float [[TMP6]], [[TOLERANCE:%.*]] 186; CHECK-NEXT: [[COND5:%.*]] = zext i1 [[CMP4]] to i32 187; CHECK-NEXT: ret i32 [[COND5]] 188; 189entry: 190 br label %for.cond 191 192for.cond: 193 %sum.0 = phi float [ 0.000000e+00, %entry ], [ %add, %for.inc ] 194 %Component.0 = phi i32 [ 0, %entry ], [ %inc, %for.inc ] 195 %cmp = icmp slt i32 %Component.0, 4 196 br i1 %cmp, label %for.body, label %for.cond.cleanup 197 198for.cond.cleanup: 199 br label %for.end 200 201for.body: 202 %idxprom = sext i32 %Component.0 to i64 203 %arrayidx = getelementptr inbounds float, float* %Vec0, i64 %idxprom 204 %0 = load float, float* %arrayidx, align 4 205 %idxprom1 = sext i32 %Component.0 to i64 206 %arrayidx2 = getelementptr inbounds float, float* %Vec1, i64 %idxprom1 207 %1 = load float, float* %arrayidx2, align 4 208 %sub = fsub fast float %0, %1 209 %cmp3 = fcmp fast oge float %sub, 0.000000e+00 210 br i1 %cmp3, label %cond.true, label %cond.false 211 212cond.true: 213 br label %cond.end 214 215cond.false: 216 %fneg = fneg fast float %sub 217 br label %cond.end 218 219cond.end: 220 %cond = phi fast float [ %sub, %cond.true ], [ %fneg, %cond.false ] 221 %add = fadd fast float %sum.0, %cond 222 br label %for.inc 223 224for.inc: 225 %inc = add nsw i32 %Component.0, 1 226 br label %for.cond 227 228for.end: 229 %cmp4 = fcmp fast ole float %sum.0, %Tolerance 230 %2 = zext i1 %cmp4 to i64 231 %cond5 = select i1 %cmp4, i32 1, i32 0 232 ret i32 %cond5 233} 234 235define i32 @TestVectorsEqualFP_alt(float* noalias %Vec0, float* noalias %Vec1, float %Tolerance) { 236; CHECK-LABEL: @TestVectorsEqualFP_alt( 237; CHECK-NEXT: entry: 238; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[VEC0:%.*]] to <4 x float>* 239; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4 240; CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[VEC1:%.*]] to <4 x float>* 241; CHECK-NEXT: [[TMP3:%.*]] = load <4 x float>, <4 x float>* [[TMP2]], align 4 242; CHECK-NEXT: [[TMP4:%.*]] = fsub fast <4 x float> [[TMP1]], [[TMP3]] 243; CHECK-NEXT: [[TMP5:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP4]]) 244; CHECK-NEXT: [[CMP3:%.*]] = fcmp fast ole float [[TMP5]], [[TOLERANCE:%.*]] 245; CHECK-NEXT: [[COND:%.*]] = zext i1 [[CMP3]] to i32 246; CHECK-NEXT: ret i32 [[COND]] 247; 248entry: 249 br label %for.cond 250 251for.cond: 252 %sum.0 = phi float [ 0.000000e+00, %entry ], [ %add, %for.inc ] 253 %Component.0 = phi i32 [ 0, %entry ], [ %inc, %for.inc ] 254 %cmp = icmp slt i32 %Component.0, 4 255 br i1 %cmp, label %for.body, label %for.cond.cleanup 256 257for.cond.cleanup: 258 br label %for.end 259 260for.body: 261 %idxprom = sext i32 %Component.0 to i64 262 %arrayidx = getelementptr inbounds float, float* %Vec0, i64 %idxprom 263 %0 = load float, float* %arrayidx, align 4 264 %idxprom1 = sext i32 %Component.0 to i64 265 %arrayidx2 = getelementptr inbounds float, float* %Vec1, i64 %idxprom1 266 %1 = load float, float* %arrayidx2, align 4 267 %sub = fsub fast float %0, %1 268 %add = fadd fast float %sum.0, %sub 269 br label %for.inc 270 271for.inc: 272 %inc = add nsw i32 %Component.0, 1 273 br label %for.cond 274 275for.end: 276 %cmp3 = fcmp fast ole float %sum.0, %Tolerance 277 %2 = zext i1 %cmp3 to i64 278 %cond = select i1 %cmp3, i32 1, i32 0 279 ret i32 %cond 280} 281 282; PR43745 - https://bugs.llvm.org/show_bug.cgi?id=43745 283 284define i1 @cmp_lt_gt(double %a, double %b, double %c) { 285; CHECK-LABEL: @cmp_lt_gt( 286; CHECK-NEXT: entry: 287; CHECK-NEXT: [[FNEG:%.*]] = fneg double [[B:%.*]] 288; CHECK-NEXT: [[MUL:%.*]] = fmul double [[A:%.*]], 2.000000e+00 289; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x double> undef, double [[C:%.*]], i32 0 290; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> [[TMP0]], double [[FNEG]], i32 1 291; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x double> undef, double [[B]], i32 0 292; CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x double> [[TMP2]], double [[C]], i32 1 293; CHECK-NEXT: [[TMP4:%.*]] = fsub <2 x double> [[TMP1]], [[TMP3]] 294; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x double> undef, double [[MUL]], i32 0 295; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <2 x double> [[TMP5]], <2 x double> undef, <2 x i32> zeroinitializer 296; CHECK-NEXT: [[TMP7:%.*]] = fdiv <2 x double> [[TMP4]], [[TMP6]] 297; CHECK-NEXT: [[TMP8:%.*]] = fcmp olt <2 x double> [[TMP7]], <double 0x3EB0C6F7A0B5ED8D, double 0x3EB0C6F7A0B5ED8D> 298; CHECK-NEXT: [[SHIFT:%.*]] = shufflevector <2 x i1> [[TMP8]], <2 x i1> undef, <2 x i32> <i32 1, i32 undef> 299; CHECK-NEXT: [[TMP9:%.*]] = and <2 x i1> [[TMP8]], [[SHIFT]] 300; CHECK-NEXT: [[OR_COND:%.*]] = extractelement <2 x i1> [[TMP9]], i64 0 301; CHECK-NEXT: br i1 [[OR_COND]], label [[CLEANUP:%.*]], label [[LOR_LHS_FALSE:%.*]] 302; CHECK: lor.lhs.false: 303; CHECK-NEXT: [[TMP10:%.*]] = fcmp ule <2 x double> [[TMP7]], <double 1.000000e+00, double 1.000000e+00> 304; CHECK-NEXT: [[SHIFT2:%.*]] = shufflevector <2 x i1> [[TMP10]], <2 x i1> undef, <2 x i32> <i32 1, i32 undef> 305; CHECK-NEXT: [[TMP11:%.*]] = or <2 x i1> [[TMP10]], [[SHIFT2]] 306; CHECK-NEXT: [[NOT_OR_COND1:%.*]] = extractelement <2 x i1> [[TMP11]], i32 0 307; CHECK-NEXT: ret i1 [[NOT_OR_COND1]] 308; CHECK: cleanup: 309; CHECK-NEXT: ret i1 false 310; 311entry: 312 %fneg = fneg double %b 313 %add = fadd double %fneg, %c 314 %mul = fmul double 2.0, %a 315 %div = fdiv double %add, %mul 316 %fneg1 = fneg double %b 317 %sub = fsub double %fneg1, %c 318 %mul2 = fmul double 2.0, %a 319 %div3 = fdiv double %sub, %mul2 320 %cmp = fcmp olt double %div, 0x3EB0C6F7A0B5ED8D 321 br i1 %cmp, label %land.lhs.true, label %lor.lhs.false 322 323land.lhs.true: 324 %cmp4 = fcmp olt double %div3, 0x3EB0C6F7A0B5ED8D 325 br i1 %cmp4, label %if.then, label %lor.lhs.false 326 327lor.lhs.false: 328 %cmp5 = fcmp ogt double %div, 1.0 329 br i1 %cmp5, label %land.lhs.true6, label %if.end 330 331land.lhs.true6: 332 %cmp7 = fcmp ogt double %div3, 1.0 333 br i1 %cmp7, label %if.then, label %if.end 334 335if.then: 336 br label %cleanup 337 338if.end: 339 br label %cleanup 340 341cleanup: 342 %retval.0 = phi i1 [ false, %if.then ], [ true, %if.end ] 343 ret i1 %retval.0 344} 345