1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -loop-vectorize -force-vector-width=4 -S < %s | FileCheck %s
3
4; This is the test case from PR26314.
5; When we were retrying dependence checking with memchecks only,
6; the loop-invariant access in the inner loop was incorrectly determined to be wrapping
7; because it was not strided in the inner loop.
8; Improved wrapping detection allows vectorization in the following case.
9
10; #define Z 32
11; typedef struct s {
12;       int v1[Z];
13;       int v2[Z];
14;       int v3[Z][Z];
15; } s;
16;
17; void slow_function (s* const obj, int z) {
18;    for (int j=0; j<Z; j++) {
19;        for (int k=0; k<z; k++) {
20;            int x = obj->v1[k] + obj->v2[j];
21;            obj->v3[j][k] += x;
22;        }
23;    }
24; }
25
26target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
27
28%struct.s = type { [32 x i32], [32 x i32], [32 x [32 x i32]] }
29
30define void @Test(%struct.s* nocapture %obj, i64 %z) #0 {
31; CHECK-LABEL: @Test(
32; CHECK-NEXT:    [[OBJ4:%.*]] = bitcast %struct.s* [[OBJ:%.*]] to i8*
33; CHECK-NEXT:    [[SCEVGEP5:%.*]] = getelementptr [[STRUCT_S:%.*]], %struct.s* [[OBJ]], i64 0, i32 0, i64 [[Z:%.*]]
34; CHECK-NEXT:    [[SCEVGEP56:%.*]] = bitcast i32* [[SCEVGEP5]] to i8*
35; CHECK-NEXT:    br label [[DOTOUTER_PREHEADER:%.*]]
36; CHECK:       .outer.preheader:
37; CHECK-NEXT:    [[I:%.*]] = phi i64 [ 0, [[TMP0:%.*]] ], [ [[I_NEXT:%.*]], [[DOTOUTER:%.*]] ]
38; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr [[STRUCT_S]], %struct.s* [[OBJ]], i64 0, i32 2, i64 [[I]], i64 0
39; CHECK-NEXT:    [[SCEVGEP1:%.*]] = bitcast i32* [[SCEVGEP]] to i8*
40; CHECK-NEXT:    [[SCEVGEP2:%.*]] = getelementptr [[STRUCT_S]], %struct.s* [[OBJ]], i64 0, i32 2, i64 [[I]], i64 [[Z]]
41; CHECK-NEXT:    [[SCEVGEP23:%.*]] = bitcast i32* [[SCEVGEP2]] to i8*
42; CHECK-NEXT:    [[SCEVGEP7:%.*]] = getelementptr [[STRUCT_S]], %struct.s* [[OBJ]], i64 0, i32 1, i64 [[I]]
43; CHECK-NEXT:    [[SCEVGEP78:%.*]] = bitcast i32* [[SCEVGEP7]] to i8*
44; CHECK-NEXT:    [[UGLYGEP:%.*]] = getelementptr i8, i8* [[SCEVGEP78]], i64 1
45; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.s* [[OBJ]], i64 0, i32 1, i64 [[I]]
46; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[Z]], 4
47; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
48; CHECK:       vector.memcheck:
49; CHECK-NEXT:    [[BOUND0:%.*]] = icmp ult i8* [[SCEVGEP1]], [[SCEVGEP56]]
50; CHECK-NEXT:    [[BOUND1:%.*]] = icmp ult i8* [[OBJ4]], [[SCEVGEP23]]
51; CHECK-NEXT:    [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
52; CHECK-NEXT:    [[BC:%.*]] = bitcast i32* [[TMP1]] to i8*
53; CHECK-NEXT:    [[BOUND09:%.*]] = icmp ult i8* [[SCEVGEP1]], [[UGLYGEP]]
54; CHECK-NEXT:    [[BOUND110:%.*]] = icmp ult i8* [[BC]], [[SCEVGEP23]]
55; CHECK-NEXT:    [[FOUND_CONFLICT11:%.*]] = and i1 [[BOUND09]], [[BOUND110]]
56; CHECK-NEXT:    [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT11]]
57; CHECK-NEXT:    [[MEMCHECK_CONFLICT:%.*]] = and i1 [[CONFLICT_RDX]], true
58; CHECK-NEXT:    br i1 [[MEMCHECK_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
59; CHECK:       vector.ph:
60; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[Z]], 4
61; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 [[Z]], [[N_MOD_VF]]
62; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
63; CHECK:       vector.body:
64; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
65; CHECK-NEXT:    [[TMP2:%.*]] = add i64 [[INDEX]], 0
66; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.s* [[OBJ]], i64 0, i32 0, i64 [[TMP2]]
67; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i32, i32* [[TMP3]], i32 0
68; CHECK-NEXT:    [[TMP5:%.*]] = bitcast i32* [[TMP4]] to <4 x i32>*
69; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP5]], align 4, !alias.scope !0
70; CHECK-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4, !alias.scope !3
71; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> undef, i32 [[TMP6]], i32 0
72; CHECK-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> undef, <4 x i32> zeroinitializer
73; CHECK-NEXT:    [[TMP7:%.*]] = add nsw <4 x i32> [[BROADCAST_SPLAT]], [[WIDE_LOAD]]
74; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.s* [[OBJ]], i64 0, i32 2, i64 [[I]], i64 [[TMP2]]
75; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[TMP8]], i32 0
76; CHECK-NEXT:    [[TMP10:%.*]] = bitcast i32* [[TMP9]] to <4 x i32>*
77; CHECK-NEXT:    [[WIDE_LOAD12:%.*]] = load <4 x i32>, <4 x i32>* [[TMP10]], align 4, !alias.scope !5, !noalias !7
78; CHECK-NEXT:    [[TMP11:%.*]] = add nsw <4 x i32> [[TMP7]], [[WIDE_LOAD12]]
79; CHECK-NEXT:    [[TMP12:%.*]] = bitcast i32* [[TMP9]] to <4 x i32>*
80; CHECK-NEXT:    store <4 x i32> [[TMP11]], <4 x i32>* [[TMP12]], align 4, !alias.scope !5, !noalias !7
81; CHECK-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], 4
82; CHECK-NEXT:    [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
83; CHECK-NEXT:    br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], [[LOOP8:!llvm.loop !.*]]
84; CHECK:       middle.block:
85; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[Z]], [[N_VEC]]
86; CHECK-NEXT:    br i1 [[CMP_N]], label [[DOTOUTER]], label [[SCALAR_PH]]
87; CHECK:       scalar.ph:
88; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[DOTOUTER_PREHEADER]] ], [ 0, [[VECTOR_MEMCHECK]] ]
89; CHECK-NEXT:    br label [[DOTINNER:%.*]]
90; CHECK:       .exit:
91; CHECK-NEXT:    ret void
92; CHECK:       .outer:
93; CHECK-NEXT:    [[I_NEXT]] = add nuw nsw i64 [[I]], 1
94; CHECK-NEXT:    [[EXITCOND_OUTER:%.*]] = icmp eq i64 [[I_NEXT]], 32
95; CHECK-NEXT:    br i1 [[EXITCOND_OUTER]], label [[DOTEXIT:%.*]], label [[DOTOUTER_PREHEADER]]
96; CHECK:       .inner:
97; CHECK-NEXT:    [[J:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[J_NEXT:%.*]], [[DOTINNER]] ]
98; CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.s* [[OBJ]], i64 0, i32 0, i64 [[J]]
99; CHECK-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
100; CHECK-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP1]], align 4
101; CHECK-NEXT:    [[TMP17:%.*]] = add nsw i32 [[TMP16]], [[TMP15]]
102; CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.s* [[OBJ]], i64 0, i32 2, i64 [[I]], i64 [[J]]
103; CHECK-NEXT:    [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 4
104; CHECK-NEXT:    [[TMP20:%.*]] = add nsw i32 [[TMP17]], [[TMP19]]
105; CHECK-NEXT:    store i32 [[TMP20]], i32* [[TMP18]], align 4
106; CHECK-NEXT:    [[J_NEXT]] = add nuw nsw i64 [[J]], 1
107; CHECK-NEXT:    [[EXITCOND_INNER:%.*]] = icmp eq i64 [[J_NEXT]], [[Z]]
108; CHECK-NEXT:    br i1 [[EXITCOND_INNER]], label [[DOTOUTER]], label [[DOTINNER]], [[LOOP10:!llvm.loop !.*]]
109;
110  br label %.outer.preheader
111
112
113.outer.preheader:
114  %i = phi i64 [ 0, %0 ], [ %i.next, %.outer ]
115  %1 = getelementptr inbounds %struct.s, %struct.s* %obj, i64 0, i32 1, i64 %i
116  br label %.inner
117
118.exit:
119  ret void
120
121.outer:
122  %i.next = add nuw nsw i64 %i, 1
123  %exitcond.outer = icmp eq i64 %i.next, 32
124  br i1 %exitcond.outer, label %.exit, label %.outer.preheader
125
126.inner:
127  %j = phi i64 [ 0, %.outer.preheader ], [ %j.next, %.inner ]
128  %2 = getelementptr inbounds %struct.s, %struct.s* %obj, i64 0, i32 0, i64 %j
129  %3 = load i32, i32* %2
130  %4 = load i32, i32* %1
131  %5 = add nsw i32 %4, %3
132  %6 = getelementptr inbounds %struct.s, %struct.s* %obj, i64 0, i32 2, i64 %i, i64 %j
133  %7 = load i32, i32* %6
134  %8 = add nsw i32 %5, %7
135  store i32 %8, i32* %6
136  %j.next = add nuw nsw i64 %j, 1
137  %exitcond.inner = icmp eq i64 %j.next, %z
138  br i1 %exitcond.inner, label %.outer, label %.inner
139}
140