1; RUN: opt -S -loop-vectorize -force-vector-width=4 -force-vector-interleave=1 < %s | FileCheck %s
2
3target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
4
5; PR39417
6; Check that the need for overflow check prevents vectorizing a loop with tiny
7; trip count (which implies opt for size).
8; CHECK-LABEL: @func_34
9; CHECK-NOT: vector.scevcheck
10; CHECK-NOT: vector.body:
11; CHECK-LABEL: bb67:
12define void @func_34() {
13bb1:
14  br label %bb67
15
16bb67:
17  %storemerge2 = phi i32 [ 0, %bb1 ], [ %_tmp2300, %bb67 ]
18  %sext = shl i32 %storemerge2, 16
19  %_tmp2299 = ashr exact i32 %sext, 16
20  %_tmp2300 = add nsw i32 %_tmp2299, 1
21  %_tmp2310 = trunc i32 %_tmp2300 to i16
22  %_tmp2312 = icmp slt i16 %_tmp2310, 3
23  br i1 %_tmp2312, label %bb67, label %bb68
24
25bb68:
26  ret void
27}
28
29; Check that a loop under opt-for-size is vectorized, w/o checking for
30; stride==1.
31; NOTE: Some assertions have been autogenerated by utils/update_test_checks.py
32define void @scev4stride1(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32 %k) #0 {
33; CHECK-LABEL: @scev4stride1(
34; CHECK-NEXT:  for.body.preheader:
35; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
36; CHECK:       vector.ph:
37; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> undef, i32 [[K:%.*]], i32 0
38; CHECK-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> undef, <4 x i32> zeroinitializer
39; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
40; CHECK:       vector.body:
41; CHECK-NEXT:    [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
42; CHECK-NEXT:    [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
43; CHECK-NEXT:    [[TMP0:%.*]] = add i32 [[INDEX]], 0
44; CHECK-NEXT:    [[TMP1:%.*]] = add i32 [[INDEX]], 1
45; CHECK-NEXT:    [[TMP2:%.*]] = add i32 [[INDEX]], 2
46; CHECK-NEXT:    [[TMP3:%.*]] = add i32 [[INDEX]], 3
47; CHECK-NEXT:    [[TMP4:%.*]] = mul nsw <4 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
48; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <4 x i32> [[TMP4]], i32 0
49; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i32 [[TMP5]]
50; CHECK-NEXT:    [[TMP7:%.*]] = extractelement <4 x i32> [[TMP4]], i32 1
51; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[TMP7]]
52; CHECK-NEXT:    [[TMP9:%.*]] = extractelement <4 x i32> [[TMP4]], i32 2
53; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[TMP9]]
54; CHECK-NEXT:    [[TMP11:%.*]] = extractelement <4 x i32> [[TMP4]], i32 3
55; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[TMP11]]
56; CHECK-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP6]], align 4
57; CHECK-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP8]], align 4
58; CHECK-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP10]], align 4
59; CHECK-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP12]], align 4
60; CHECK-NEXT:    [[TMP17:%.*]] = insertelement <4 x i32> undef, i32 [[TMP13]], i32 0
61; CHECK-NEXT:    [[TMP18:%.*]] = insertelement <4 x i32> [[TMP17]], i32 [[TMP14]], i32 1
62; CHECK-NEXT:    [[TMP19:%.*]] = insertelement <4 x i32> [[TMP18]], i32 [[TMP15]], i32 2
63; CHECK-NEXT:    [[TMP20:%.*]] = insertelement <4 x i32> [[TMP19]], i32 [[TMP16]], i32 3
64; CHECK-NEXT:    [[TMP21:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i32 [[TMP0]]
65; CHECK-NEXT:    [[TMP22:%.*]] = getelementptr inbounds i32, i32* [[TMP21]], i32 0
66; CHECK-NEXT:    [[TMP23:%.*]] = bitcast i32* [[TMP22]] to <4 x i32>*
67; CHECK-NEXT:    store <4 x i32> [[TMP20]], <4 x i32>* [[TMP23]], align 4
68; CHECK-NEXT:    [[INDEX_NEXT]] = add i32 [[INDEX]], 4
69; CHECK-NEXT:    [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], <i32 4, i32 4, i32 4, i32 4>
70; CHECK-NEXT:    [[TMP24:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1024
71; CHECK-NEXT:    br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !0
72; CHECK:       middle.block:
73; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i32 1024, 1024
74; CHECK-NEXT:    br i1 [[CMP_N]], label [[FOR_END_LOOPEXIT:%.*]], label [[SCALAR_PH]]
75; CHECK:       scalar.ph:
76; CHECK:       for.body:
77; CHECK:       for.end.loopexit:
78; CHECK-NEXT:    ret void
79;
80for.body.preheader:
81  br label %for.body
82
83for.body:
84  %i.07 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ]
85  %mul = mul nsw i32 %i.07, %k
86  %arrayidx = getelementptr inbounds i32, i32* %b, i32 %mul
87  %0 = load i32, i32* %arrayidx, align 4
88  %arrayidx1 = getelementptr inbounds i32, i32* %a, i32 %i.07
89  store i32 %0, i32* %arrayidx1, align 4
90  %inc = add nuw nsw i32 %i.07, 1
91  %exitcond = icmp eq i32 %inc, 1024
92  br i1 %exitcond, label %for.end.loopexit, label %for.body
93
94for.end.loopexit:
95  ret void
96}
97
98attributes #0 = { optsize }
99