1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -basic-aa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
3
4target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
5target triple = "x86_64-apple-macosx10.8.0"
6
7define i32 @rollable(i32* noalias nocapture %in, i32* noalias nocapture %out, i64 %n) {
8; CHECK-LABEL: @rollable(
9; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i64 [[N:%.*]], 0
10; CHECK-NEXT:    br i1 [[TMP1]], label [[DOT_CRIT_EDGE:%.*]], label [[DOTLR_PH:%.*]]
11; CHECK:       .lr.ph:
12; CHECK-NEXT:    [[I_019:%.*]] = phi i64 [ [[TMP10:%.*]], [[DOTLR_PH]] ], [ 0, [[TMP0:%.*]] ]
13; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[I_019]], 2
14; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i32, i32* [[IN:%.*]], i64 [[TMP2]]
15; CHECK-NEXT:    [[TMP4:%.*]] = bitcast i32* [[TMP3]] to <4 x i32>*
16; CHECK-NEXT:    [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* [[TMP4]], align 4
17; CHECK-NEXT:    [[TMP6:%.*]] = mul <4 x i32> [[TMP5]], <i32 7, i32 7, i32 7, i32 7>
18; CHECK-NEXT:    [[TMP7:%.*]] = add <4 x i32> [[TMP6]], <i32 7, i32 14, i32 21, i32 28>
19; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[OUT:%.*]], i64 [[TMP2]]
20; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i32* [[TMP8]] to <4 x i32>*
21; CHECK-NEXT:    store <4 x i32> [[TMP7]], <4 x i32>* [[TMP9]], align 4
22; CHECK-NEXT:    [[TMP10]] = add i64 [[I_019]], 1
23; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[TMP10]], [[N]]
24; CHECK-NEXT:    br i1 [[EXITCOND]], label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]]
25; CHECK:       ._crit_edge:
26; CHECK-NEXT:    ret i32 undef
27;
28  %1 = icmp eq i64 %n, 0
29  br i1 %1, label %._crit_edge, label %.lr.ph
30
31.lr.ph:                                           ; preds = %0, %.lr.ph
32  %i.019 = phi i64 [ %26, %.lr.ph ], [ 0, %0 ]
33  %2 = shl i64 %i.019, 2
34  %3 = getelementptr inbounds i32, i32* %in, i64 %2
35  %4 = load i32, i32* %3, align 4
36  %5 = or i64 %2, 1
37  %6 = getelementptr inbounds i32, i32* %in, i64 %5
38  %7 = load i32, i32* %6, align 4
39  %8 = or i64 %2, 2
40  %9 = getelementptr inbounds i32, i32* %in, i64 %8
41  %10 = load i32, i32* %9, align 4
42  %11 = or i64 %2, 3
43  %12 = getelementptr inbounds i32, i32* %in, i64 %11
44  %13 = load i32, i32* %12, align 4
45  %14 = mul i32 %4, 7
46  %15 = add i32 %14, 7
47  %16 = mul i32 %7, 7
48  %17 = add i32 %16, 14
49  %18 = mul i32 %10, 7
50  %19 = add i32 %18, 21
51  %20 = mul i32 %13, 7
52  %21 = add i32 %20, 28
53  %22 = getelementptr inbounds i32, i32* %out, i64 %2
54  store i32 %15, i32* %22, align 4
55  %23 = getelementptr inbounds i32, i32* %out, i64 %5
56  store i32 %17, i32* %23, align 4
57  %24 = getelementptr inbounds i32, i32* %out, i64 %8
58  store i32 %19, i32* %24, align 4
59  %25 = getelementptr inbounds i32, i32* %out, i64 %11
60  store i32 %21, i32* %25, align 4
61  %26 = add i64 %i.019, 1
62  %exitcond = icmp eq i64 %26, %n
63  br i1 %exitcond, label %._crit_edge, label %.lr.ph
64
65._crit_edge:                                      ; preds = %.lr.ph, %0
66  ret i32 undef
67}
68
69define i32 @unrollable(i32* %in, i32* %out, i64 %n) nounwind ssp uwtable {
70; CHECK-LABEL: @unrollable(
71; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i64 [[N:%.*]], 0
72; CHECK-NEXT:    br i1 [[TMP1]], label [[DOT_CRIT_EDGE:%.*]], label [[DOTLR_PH:%.*]]
73; CHECK:       .lr.ph:
74; CHECK-NEXT:    [[I_019:%.*]] = phi i64 [ [[TMP26:%.*]], [[DOTLR_PH]] ], [ 0, [[TMP0:%.*]] ]
75; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[I_019]], 2
76; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i32, i32* [[IN:%.*]], i64 [[TMP2]]
77; CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
78; CHECK-NEXT:    [[TMP5:%.*]] = or i64 [[TMP2]], 1
79; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[IN]], i64 [[TMP5]]
80; CHECK-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
81; CHECK-NEXT:    [[TMP8:%.*]] = or i64 [[TMP2]], 2
82; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[IN]], i64 [[TMP8]]
83; CHECK-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
84; CHECK-NEXT:    [[TMP11:%.*]] = or i64 [[TMP2]], 3
85; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds i32, i32* [[IN]], i64 [[TMP11]]
86; CHECK-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
87; CHECK-NEXT:    [[TMP14:%.*]] = mul i32 [[TMP4]], 7
88; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP14]], 7
89; CHECK-NEXT:    [[TMP16:%.*]] = mul i32 [[TMP7]], 7
90; CHECK-NEXT:    [[TMP17:%.*]] = add i32 [[TMP16]], 14
91; CHECK-NEXT:    [[TMP18:%.*]] = mul i32 [[TMP10]], 7
92; CHECK-NEXT:    [[TMP19:%.*]] = add i32 [[TMP18]], 21
93; CHECK-NEXT:    [[TMP20:%.*]] = mul i32 [[TMP13]], 7
94; CHECK-NEXT:    [[TMP21:%.*]] = add i32 [[TMP20]], 28
95; CHECK-NEXT:    [[TMP22:%.*]] = getelementptr inbounds i32, i32* [[OUT:%.*]], i64 [[TMP2]]
96; CHECK-NEXT:    store i32 [[TMP15]], i32* [[TMP22]], align 4
97; CHECK-NEXT:    [[TMP23:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 [[TMP5]]
98; CHECK-NEXT:    store i32 [[TMP17]], i32* [[TMP23]], align 4
99; CHECK-NEXT:    [[BARRIER:%.*]] = call i32 @goo(i32 0)
100; CHECK-NEXT:    [[TMP24:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 [[TMP8]]
101; CHECK-NEXT:    store i32 [[TMP19]], i32* [[TMP24]], align 4
102; CHECK-NEXT:    [[TMP25:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 [[TMP11]]
103; CHECK-NEXT:    store i32 [[TMP21]], i32* [[TMP25]], align 4
104; CHECK-NEXT:    [[TMP26]] = add i64 [[I_019]], 1
105; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[TMP26]], [[N]]
106; CHECK-NEXT:    br i1 [[EXITCOND]], label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]]
107; CHECK:       ._crit_edge:
108; CHECK-NEXT:    ret i32 undef
109;
110  %1 = icmp eq i64 %n, 0
111  br i1 %1, label %._crit_edge, label %.lr.ph
112
113.lr.ph:                                           ; preds = %0, %.lr.ph
114  %i.019 = phi i64 [ %26, %.lr.ph ], [ 0, %0 ]
115  %2 = shl i64 %i.019, 2
116  %3 = getelementptr inbounds i32, i32* %in, i64 %2
117  %4 = load i32, i32* %3, align 4
118  %5 = or i64 %2, 1
119  %6 = getelementptr inbounds i32, i32* %in, i64 %5
120  %7 = load i32, i32* %6, align 4
121  %8 = or i64 %2, 2
122  %9 = getelementptr inbounds i32, i32* %in, i64 %8
123  %10 = load i32, i32* %9, align 4
124  %11 = or i64 %2, 3
125  %12 = getelementptr inbounds i32, i32* %in, i64 %11
126  %13 = load i32, i32* %12, align 4
127  %14 = mul i32 %4, 7
128  %15 = add i32 %14, 7
129  %16 = mul i32 %7, 7
130  %17 = add i32 %16, 14
131  %18 = mul i32 %10, 7
132  %19 = add i32 %18, 21
133  %20 = mul i32 %13, 7
134  %21 = add i32 %20, 28
135  %22 = getelementptr inbounds i32, i32* %out, i64 %2
136  store i32 %15, i32* %22, align 4
137  %23 = getelementptr inbounds i32, i32* %out, i64 %5
138  store i32 %17, i32* %23, align 4
139  %barrier = call i32 @goo(i32 0)                      ; <---------------- memory barrier.
140  %24 = getelementptr inbounds i32, i32* %out, i64 %8
141  store i32 %19, i32* %24, align 4
142  %25 = getelementptr inbounds i32, i32* %out, i64 %11
143  store i32 %21, i32* %25, align 4
144  %26 = add i64 %i.019, 1
145  %exitcond = icmp eq i64 %26, %n
146  br i1 %exitcond, label %._crit_edge, label %.lr.ph
147
148._crit_edge:                                      ; preds = %.lr.ph, %0
149  ret i32 undef
150}
151
152declare i32 @goo(i32)
153