1; RUN: opt < %s -loop-vectorize -force-vector-interleave=1 -force-vector-width=2 -S | FileCheck %s 2 3target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" 4 5; Make sure that we can handle multiple integer induction variables. 6; CHECK-LABEL: @multi_int_induction( 7; CHECK: vector.body: 8; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] 9; CHECK: %normalized.idx = sub i64 %index, 0 10; CHECK: %[[VAR:.*]] = trunc i64 %normalized.idx to i32 11; CHECK: %offset.idx = add i32 190, %[[VAR]] 12define void @multi_int_induction(i32* %A, i32 %N) { 13for.body.lr.ph: 14 br label %for.body 15 16for.body: 17 %indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ] 18 %count.09 = phi i32 [ 190, %for.body.lr.ph ], [ %inc, %for.body ] 19 %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv 20 store i32 %count.09, i32* %arrayidx2, align 4 21 %inc = add nsw i32 %count.09, 1 22 %indvars.iv.next = add i64 %indvars.iv, 1 23 %lftr.wideiv = trunc i64 %indvars.iv.next to i32 24 %exitcond = icmp ne i32 %lftr.wideiv, %N 25 br i1 %exitcond, label %for.body, label %for.end 26 27for.end: 28 ret void 29} 30 31; RUN: opt < %s -loop-vectorize -force-vector-interleave=1 -force-vector-width=2 -instcombine -S | FileCheck %s --check-prefix=IND 32 33; Make sure we remove unneeded vectorization of induction variables. 34; In order for instcombine to cleanup the vectorized induction variables that we 35; create in the loop vectorizer we need to perform some form of redundancy 36; elimination to get rid of multiple uses. 37 38; IND-LABEL: scalar_use 39 40; IND: br label %vector.body 41; IND: vector.body: 42; Vectorized induction variable. 43; IND-NOT: insertelement <2 x i64> 44; IND-NOT: shufflevector <2 x i64> 45; IND: br {{.*}}, label %vector.body 46 47define void @scalar_use(float* %a, float %b, i64 %offset, i64 %offset2, i64 %n) { 48entry: 49 br label %for.body 50 51for.body: 52 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] 53 %ind.sum = add i64 %iv, %offset 54 %arr.idx = getelementptr inbounds float, float* %a, i64 %ind.sum 55 %l1 = load float, float* %arr.idx, align 4 56 %ind.sum2 = add i64 %iv, %offset2 57 %arr.idx2 = getelementptr inbounds float, float* %a, i64 %ind.sum2 58 %l2 = load float, float* %arr.idx2, align 4 59 %m = fmul fast float %b, %l2 60 %ad = fadd fast float %l1, %m 61 store float %ad, float* %arr.idx, align 4 62 %iv.next = add nuw nsw i64 %iv, 1 63 %exitcond = icmp eq i64 %iv.next, %n 64 br i1 %exitcond, label %loopexit, label %for.body 65 66loopexit: 67 ret void 68} 69 70 71; Make sure that the loop exit count computation does not overflow for i8 and 72; i16. The exit count of these loops is i8/i16 max + 1. If we don't cast the 73; induction variable to a bigger type the exit count computation will overflow 74; to 0. 75; PR17532 76 77; CHECK-LABEL: i8_loop 78; CHECK: icmp eq i32 {{.*}}, 256 79define i32 @i8_loop() nounwind readnone ssp uwtable { 80 br label %1 81 82; <label>:1 ; preds = %1, %0 83 %a.0 = phi i32 [ 1, %0 ], [ %2, %1 ] 84 %b.0 = phi i8 [ 0, %0 ], [ %3, %1 ] 85 %2 = and i32 %a.0, 4 86 %3 = add i8 %b.0, -1 87 %4 = icmp eq i8 %3, 0 88 br i1 %4, label %5, label %1 89 90; <label>:5 ; preds = %1 91 ret i32 %2 92} 93 94; CHECK-LABEL: i16_loop 95; CHECK: icmp eq i32 {{.*}}, 65536 96 97define i32 @i16_loop() nounwind readnone ssp uwtable { 98 br label %1 99 100; <label>:1 ; preds = %1, %0 101 %a.0 = phi i32 [ 1, %0 ], [ %2, %1 ] 102 %b.0 = phi i16 [ 0, %0 ], [ %3, %1 ] 103 %2 = and i32 %a.0, 4 104 %3 = add i16 %b.0, -1 105 %4 = icmp eq i16 %3, 0 106 br i1 %4, label %5, label %1 107 108; <label>:5 ; preds = %1 109 ret i32 %2 110} 111 112; This loop has a backedge taken count of i32_max. We need to check for this 113; condition and branch directly to the scalar loop. 114 115; CHECK-LABEL: max_i32_backedgetaken 116; CHECK: %backedge.overflow = icmp eq i32 -1, -1 117; CHECK: br i1 %backedge.overflow, label %scalar.ph, label %overflow.checked 118 119; CHECK: scalar.ph: 120; CHECK: %bc.resume.val = phi i32 [ %resume.val, %middle.block ], [ 0, %0 ] 121; CHECK: %bc.merge.rdx = phi i32 [ 1, %0 ], [ %5, %middle.block ] 122 123define i32 @max_i32_backedgetaken() nounwind readnone ssp uwtable { 124 125 br label %1 126 127; <label>:1 ; preds = %1, %0 128 %a.0 = phi i32 [ 1, %0 ], [ %2, %1 ] 129 %b.0 = phi i32 [ 0, %0 ], [ %3, %1 ] 130 %2 = and i32 %a.0, 4 131 %3 = add i32 %b.0, -1 132 %4 = icmp eq i32 %3, 0 133 br i1 %4, label %5, label %1 134 135; <label>:5 ; preds = %1 136 ret i32 %2 137} 138 139; When generating the overflow check we must sure that the induction start value 140; is defined before the branch to the scalar preheader. 141 142; CHECK-LABEL: testoverflowcheck 143; CHECK: entry 144; CHECK: %[[LOAD:.*]] = load i8 145; CHECK: %[[VAL:.*]] = zext i8 %[[LOAD]] to i32 146; CHECK: br 147 148; CHECK: scalar.ph 149; CHECK: phi i32 [ %{{.*}}, %middle.block ], [ %[[VAL]], %entry ] 150 151@e = global i8 1, align 1 152@d = common global i32 0, align 4 153@c = common global i32 0, align 4 154define i32 @testoverflowcheck() { 155entry: 156 %.pr.i = load i8, i8* @e, align 1 157 %0 = load i32, i32* @d, align 4 158 %c.promoted.i = load i32, i32* @c, align 4 159 br label %cond.end.i 160 161cond.end.i: 162 %inc4.i = phi i8 [ %.pr.i, %entry ], [ %inc.i, %cond.end.i ] 163 %and3.i = phi i32 [ %c.promoted.i, %entry ], [ %and.i, %cond.end.i ] 164 %and.i = and i32 %0, %and3.i 165 %inc.i = add i8 %inc4.i, 1 166 %tobool.i = icmp eq i8 %inc.i, 0 167 br i1 %tobool.i, label %loopexit, label %cond.end.i 168 169loopexit: 170 ret i32 %and.i 171} 172