1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -indvars -S | FileCheck %s
3
4target datalayout = "e-m:e-i64:64-p:64:64:64-n8:16:32:64-S128"
5
6; When widening IV and its users, trunc and zext/sext are not needed
7; if the original 32-bit user is known to be non-negative, whether
8; the IV is considered signed or unsigned.
9define void @foo(i32* %A, i32* %B, i32* %C, i32 %N) {
10; CHECK-LABEL: @foo(
11; CHECK-NEXT:  entry:
12; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 0, [[N:%.*]]
13; CHECK-NEXT:    br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
14; CHECK:       for.body.lr.ph:
15; CHECK-NEXT:    [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[N]] to i64
16; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
17; CHECK:       for.body:
18; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ], [ 0, [[FOR_BODY_LR_PH]] ]
19; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV]]
20; CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
21; CHECK-NEXT:    [[TMP1:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 2
22; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[TMP1]]
23; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4
24; CHECK-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP0]], [[TMP2]]
25; CHECK-NEXT:    [[TMP3:%.*]] = trunc i64 [[TMP1]] to i32
26; CHECK-NEXT:    [[DIV0:%.*]] = udiv i32 5, [[TMP3]]
27; CHECK-NEXT:    [[ADD4:%.*]] = add nsw i32 [[ADD3]], [[DIV0]]
28; CHECK-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDVARS_IV]]
29; CHECK-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX5]], align 4
30; CHECK-NEXT:    br label [[FOR_INC]]
31; CHECK:       for.inc:
32; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
33; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
34; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]]
35; CHECK:       for.cond.for.end_crit_edge:
36; CHECK-NEXT:    br label [[FOR_END]]
37; CHECK:       for.end:
38; CHECK-NEXT:    ret void
39;
40entry:
41  %cmp1 = icmp slt i32 0, %N
42  br i1 %cmp1, label %for.body.lr.ph, label %for.end
43
44for.body.lr.ph:                                   ; preds = %entry
45  br label %for.body
46
47for.body:                                         ; preds = %for.body.lr.ph, %for.inc
48  %i.02 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.inc ]
49  %idxprom = sext i32 %i.02 to i64
50  %arrayidx = getelementptr inbounds i32, i32* %B, i64 %idxprom
51  %0 = load i32, i32* %arrayidx, align 4
52  %add = add nsw i32 %i.02, 2
53  %idxprom1 = zext i32 %add to i64
54  %arrayidx2 = getelementptr inbounds i32, i32* %C, i64 %idxprom1
55  %1 = load i32, i32* %arrayidx2, align 4
56  %add3 = add nsw i32 %0, %1
57  %div0 = udiv i32 5, %add
58  %add4 = add nsw i32 %add3, %div0
59  %idxprom4 = zext i32 %i.02 to i64
60  %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %idxprom4
61  store i32 %add4, i32* %arrayidx5, align 4
62  br label %for.inc
63
64for.inc:                                          ; preds = %for.body
65  %inc = add nsw i32 %i.02, 1
66  %cmp = icmp slt i32 %inc, %N
67  br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
68
69for.cond.for.end_crit_edge:                       ; preds = %for.inc
70  br label %for.end
71
72for.end:                                          ; preds = %for.cond.for.end_crit_edge, %entry
73  ret void
74}
75
76define void @foo1(i32* %A, i32* %B, i32* %C, i32 %N) {
77; CHECK-LABEL: @foo1(
78; CHECK-NEXT:  entry:
79; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 0, [[N:%.*]]
80; CHECK-NEXT:    br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
81; CHECK:       for.body.lr.ph:
82; CHECK-NEXT:    [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[N]] to i64
83; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
84; CHECK:       for.body:
85; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ], [ 0, [[FOR_BODY_LR_PH]] ]
86; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV]]
87; CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
88; CHECK-NEXT:    [[TMP1:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 2
89; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[TMP1]]
90; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4
91; CHECK-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP0]], [[TMP2]]
92; CHECK-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDVARS_IV]]
93; CHECK-NEXT:    store i32 [[ADD3]], i32* [[ARRAYIDX5]], align 4
94; CHECK-NEXT:    br label [[FOR_INC]]
95; CHECK:       for.inc:
96; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
97; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
98; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]]
99; CHECK:       for.cond.for.end_crit_edge:
100; CHECK-NEXT:    br label [[FOR_END]]
101; CHECK:       for.end:
102; CHECK-NEXT:    ret void
103;
104entry:
105  %cmp1 = icmp slt i32 0, %N
106  br i1 %cmp1, label %for.body.lr.ph, label %for.end
107
108for.body.lr.ph:                                   ; preds = %entry
109  br label %for.body
110
111for.body:                                         ; preds = %for.body.lr.ph, %for.inc
112  %i.02 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.inc ]
113  %idxprom = zext i32 %i.02 to i64
114  %arrayidx = getelementptr inbounds i32, i32* %B, i64 %idxprom
115  %0 = load i32, i32* %arrayidx, align 4
116  %add = add nsw i32 %i.02, 2
117  %idxprom1 = sext i32 %add to i64
118  %arrayidx2 = getelementptr inbounds i32, i32* %C, i64 %idxprom1
119  %1 = load i32, i32* %arrayidx2, align 4
120  %add3 = add nsw i32 %0, %1
121  %idxprom4 = sext i32 %i.02 to i64
122  %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %idxprom4
123  store i32 %add3, i32* %arrayidx5, align 4
124  br label %for.inc
125
126for.inc:                                          ; preds = %for.body
127  %inc = add nsw i32 %i.02, 1
128  %cmp = icmp slt i32 %inc, %N
129  br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
130
131for.cond.for.end_crit_edge:                       ; preds = %for.inc
132  br label %for.end
133
134for.end:                                          ; preds = %for.cond.for.end_crit_edge, %entry
135  ret void
136}
137
138
139@a = common global [100 x i32] zeroinitializer, align 16
140@b = common global [100 x i32] zeroinitializer, align 16
141
142define i32 @foo2(i32 %M) {
143; CHECK-LABEL: @foo2(
144; CHECK-NEXT:  entry:
145; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 0, [[M:%.*]]
146; CHECK-NEXT:    br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
147; CHECK:       for.body.lr.ph:
148; CHECK-NEXT:    [[TMP0:%.*]] = sext i32 [[M]] to i64
149; CHECK-NEXT:    [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[M]] to i64
150; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
151; CHECK:       for.body:
152; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ], [ 0, [[FOR_BODY_LR_PH]] ]
153; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* @a, i64 0, i64 [[INDVARS_IV]]
154; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
155; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* @b, i64 0, i64 [[INDVARS_IV]]
156; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4
157; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP2]]
158; CHECK-NEXT:    [[TMP3:%.*]] = add nsw i64 [[INDVARS_IV]], [[TMP0]]
159; CHECK-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* @a, i64 0, i64 [[TMP3]]
160; CHECK-NEXT:    store i32 [[ADD]], i32* [[ARRAYIDX5]], align 4
161; CHECK-NEXT:    br label [[FOR_INC]]
162; CHECK:       for.inc:
163; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
164; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
165; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]]
166; CHECK:       for.cond.for.end_crit_edge:
167; CHECK-NEXT:    br label [[FOR_END]]
168; CHECK:       for.end:
169; CHECK-NEXT:    [[CALL:%.*]] = call i32 @dummy(i32* getelementptr inbounds ([100 x i32], [100 x i32]* @a, i32 0, i32 0), i32* getelementptr inbounds ([100 x i32], [100 x i32]* @b, i32 0, i32 0))
170; CHECK-NEXT:    ret i32 0
171;
172entry:
173  %cmp1 = icmp slt i32 0, %M
174  br i1 %cmp1, label %for.body.lr.ph, label %for.end
175
176for.body.lr.ph:                                   ; preds = %entry
177  br label %for.body
178
179for.body:                                         ; preds = %for.body.lr.ph, %for.inc
180  %i.02 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.inc ]
181  %idxprom = zext i32 %i.02 to i64
182  %arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* @a, i64 0, i64 %idxprom
183  %0 = load i32, i32* %arrayidx, align 4
184  %idxprom1 = sext i32 %i.02 to i64
185  %arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* @b, i64 0, i64 %idxprom1
186  %1 = load i32, i32* %arrayidx2, align 4
187  %add = add nsw i32 %0, %1
188  %add3 = add nsw i32 %i.02, %M
189  %idxprom4 = sext i32 %add3 to i64
190  %arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* @a, i64 0, i64 %idxprom4
191  store i32 %add, i32* %arrayidx5, align 4
192  br label %for.inc
193
194for.inc:                                          ; preds = %for.body
195  %inc = add nsw i32 %i.02, 1
196  %cmp = icmp slt i32 %inc, %M
197  br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
198
199for.cond.for.end_crit_edge:                       ; preds = %for.inc
200  br label %for.end
201
202for.end:                                          ; preds = %for.cond.for.end_crit_edge, %entry
203  %call = call i32 @dummy(i32* getelementptr inbounds ([100 x i32], [100 x i32]* @a, i32 0, i32 0), i32* getelementptr inbounds ([100 x i32], [100 x i32]* @b, i32 0, i32 0))
204  ret i32 0
205}
206
207declare i32 @dummy(i32*, i32*)
208
209; A case where zext should not be eliminated when its operands could only be extended by sext.
210define i32 @foo3(i32 %M) {
211; CHECK-LABEL: @foo3(
212; CHECK-NEXT:  entry:
213; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 0, [[M:%.*]]
214; CHECK-NEXT:    br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
215; CHECK:       for.body.lr.ph:
216; CHECK-NEXT:    [[TMP0:%.*]] = sext i32 [[M]] to i64
217; CHECK-NEXT:    [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[M]] to i64
218; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
219; CHECK:       for.body:
220; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ], [ 0, [[FOR_BODY_LR_PH]] ]
221; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* @a, i64 0, i64 [[INDVARS_IV]]
222; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
223; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* @b, i64 0, i64 [[INDVARS_IV]]
224; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4
225; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP2]]
226; CHECK-NEXT:    [[TMP3:%.*]] = add nsw i64 [[INDVARS_IV]], [[TMP0]]
227; CHECK-NEXT:    [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
228; CHECK-NEXT:    [[IDXPROM4:%.*]] = zext i32 [[TMP4]] to i64
229; CHECK-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* @a, i64 0, i64 [[IDXPROM4]]
230; CHECK-NEXT:    store i32 [[ADD]], i32* [[ARRAYIDX5]], align 4
231; CHECK-NEXT:    br label [[FOR_INC]]
232; CHECK:       for.inc:
233; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
234; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
235; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]]
236; CHECK:       for.cond.for.end_crit_edge:
237; CHECK-NEXT:    br label [[FOR_END]]
238; CHECK:       for.end:
239; CHECK-NEXT:    [[CALL:%.*]] = call i32 @dummy(i32* getelementptr inbounds ([100 x i32], [100 x i32]* @a, i32 0, i32 0), i32* getelementptr inbounds ([100 x i32], [100 x i32]* @b, i32 0, i32 0))
240; CHECK-NEXT:    ret i32 0
241;
242entry:
243  %cmp1 = icmp slt i32 0, %M
244  br i1 %cmp1, label %for.body.lr.ph, label %for.end
245
246for.body.lr.ph:                                   ; preds = %entry
247  br label %for.body
248
249for.body:                                         ; preds = %for.body.lr.ph, %for.inc
250  %i.02 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.inc ]
251  %idxprom = sext i32 %i.02 to i64
252  %arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* @a, i64 0, i64 %idxprom
253  %0 = load i32, i32* %arrayidx, align 4
254  %idxprom1 = sext i32 %i.02 to i64
255  %arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* @b, i64 0, i64 %idxprom1
256  %1 = load i32, i32* %arrayidx2, align 4
257  %add = add nsw i32 %0, %1
258  %add3 = add nsw i32 %i.02, %M
259  %idxprom4 = zext i32 %add3 to i64
260  %arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* @a, i64 0, i64 %idxprom4
261  store i32 %add, i32* %arrayidx5, align 4
262  br label %for.inc
263
264for.inc:                                          ; preds = %for.body
265  %inc = add nsw i32 %i.02, 1
266  %cmp = icmp slt i32 %inc, %M
267  br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
268
269for.cond.for.end_crit_edge:                       ; preds = %for.inc
270  br label %for.end
271
272for.end:                                          ; preds = %for.cond.for.end_crit_edge, %entry
273  %call = call i32 @dummy(i32* getelementptr inbounds ([100 x i32], [100 x i32]* @a, i32 0, i32 0), i32* getelementptr inbounds ([100 x i32], [100 x i32]* @b, i32 0, i32 0))
274  ret i32 0
275}
276
277%struct.image = type {i32, i32}
278define i32 @foo4(%struct.image* %input, i32 %length, i32* %in) {
279; CHECK-LABEL: @foo4(
280; CHECK-NEXT:  entry:
281; CHECK-NEXT:    [[STRIDE:%.*]] = getelementptr inbounds [[STRUCT_IMAGE:%.*]], %struct.image* [[INPUT:%.*]], i64 0, i32 1
282; CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[STRIDE]], align 4
283; CHECK-NEXT:    [[CMP17:%.*]] = icmp sgt i32 [[LENGTH:%.*]], 1
284; CHECK-NEXT:    br i1 [[CMP17]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_COND_CLEANUP:%.*]]
285; CHECK:       for.body.lr.ph:
286; CHECK-NEXT:    [[CHANNEL:%.*]] = getelementptr inbounds [[STRUCT_IMAGE]], %struct.image* [[INPUT]], i64 0, i32 0
287; CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[TMP0]] to i64
288; CHECK-NEXT:    [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[LENGTH]] to i64
289; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
290; CHECK:       for.cond.cleanup.loopexit:
291; CHECK-NEXT:    [[TMP2:%.*]] = phi i32 [ [[TMP10:%.*]], [[FOR_BODY]] ]
292; CHECK-NEXT:    br label [[FOR_COND_CLEANUP]]
293; CHECK:       for.cond.cleanup:
294; CHECK-NEXT:    [[TMP3:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP2]], [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] ]
295; CHECK-NEXT:    ret i32 [[TMP3]]
296; CHECK:       for.body:
297; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 1, [[FOR_BODY_LR_PH]] ]
298; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
299; CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CHANNEL]], align 8
300; CHECK-NEXT:    [[TMP5:%.*]] = sext i32 [[TMP4]] to i64
301; CHECK-NEXT:    [[TMP6:%.*]] = mul nsw i64 [[TMP5]], [[INDVARS_IV_NEXT]]
302; CHECK-NEXT:    [[ADD_PTR:%.*]] = getelementptr inbounds i32, i32* [[IN:%.*]], i64 [[TMP6]]
303; CHECK-NEXT:    [[TMP7:%.*]] = load i32, i32* [[ADD_PTR]], align 4
304; CHECK-NEXT:    [[TMP8:%.*]] = mul nsw i64 [[TMP1]], [[INDVARS_IV_NEXT]]
305; CHECK-NEXT:    [[ADD_PTR1:%.*]] = getelementptr inbounds i32, i32* [[IN]], i64 [[TMP8]]
306; CHECK-NEXT:    [[TMP9:%.*]] = load i32, i32* [[ADD_PTR1]], align 4
307; CHECK-NEXT:    [[TMP10]] = add i32 [[TMP7]], [[TMP9]]
308; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
309; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP_LOOPEXIT]]
310;
311entry:
312  %stride = getelementptr inbounds %struct.image, %struct.image* %input, i64 0, i32 1
313  %0 = load i32, i32* %stride, align 4
314  %cmp17 = icmp sgt i32 %length, 1
315  br i1 %cmp17, label %for.body.lr.ph, label %for.cond.cleanup
316
317for.body.lr.ph:                                   ; preds = %entry
318  %channel = getelementptr inbounds %struct.image, %struct.image* %input, i64 0, i32 0
319  br label %for.body
320
321for.cond.cleanup.loopexit:                        ; preds = %for.body
322  %1 = phi i32 [ %6, %for.body ]
323  br label %for.cond.cleanup
324
325for.cond.cleanup:                                 ; preds = %for.cond.cleanup.loopexit, %entry
326  %2 = phi i32 [ 0, %entry ], [ %1, %for.cond.cleanup.loopexit ]
327  ret i32 %2
328
329; mul instruction below is widened instead of generating a truncate instruction for it
330; regardless if Load operand of mul is inside or outside the loop (we have both cases).
331for.body:                                         ; preds = %for.body.lr.ph, %for.body
332  %x.018 = phi i32 [ 1, %for.body.lr.ph ], [ %add, %for.body ]
333  %add = add nuw nsw i32 %x.018, 1
334  %3 = load i32, i32* %channel, align 8
335  %mul = mul nsw i32 %3, %add
336  %idx.ext = sext i32 %mul to i64
337  %add.ptr = getelementptr inbounds i32, i32* %in, i64 %idx.ext
338  %4 = load i32, i32* %add.ptr, align 4
339  %mul1 = mul nsw i32 %0, %add
340  %idx.ext1 = sext i32 %mul1 to i64
341  %add.ptr1 = getelementptr inbounds i32, i32* %in, i64 %idx.ext1
342  %5 = load i32, i32* %add.ptr1, align 4
343  %6 = add i32 %4, %5
344  %cmp = icmp slt i32 %add, %length
345  br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit
346}
347
348
349define i32 @foo5(%struct.image* %input, i32 %length, i32* %in) {
350; CHECK-LABEL: @foo5(
351; CHECK-NEXT:  entry:
352; CHECK-NEXT:    [[STRIDE:%.*]] = getelementptr inbounds [[STRUCT_IMAGE:%.*]], %struct.image* [[INPUT:%.*]], i64 0, i32 1
353; CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[STRIDE]], align 4
354; CHECK-NEXT:    [[CMP17:%.*]] = icmp sgt i32 [[LENGTH:%.*]], 1
355; CHECK-NEXT:    br i1 [[CMP17]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_COND_CLEANUP:%.*]]
356; CHECK:       for.body.lr.ph:
357; CHECK-NEXT:    [[CHANNEL:%.*]] = getelementptr inbounds [[STRUCT_IMAGE]], %struct.image* [[INPUT]], i64 0, i32 0
358; CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[TMP0]] to i64
359; CHECK-NEXT:    [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[LENGTH]] to i64
360; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
361; CHECK:       for.cond.cleanup.loopexit:
362; CHECK-NEXT:    [[TMP2:%.*]] = phi i32 [ [[TMP10:%.*]], [[FOR_BODY]] ]
363; CHECK-NEXT:    br label [[FOR_COND_CLEANUP]]
364; CHECK:       for.cond.cleanup:
365; CHECK-NEXT:    [[TMP3:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP2]], [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] ]
366; CHECK-NEXT:    ret i32 [[TMP3]]
367; CHECK:       for.body:
368; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 1, [[FOR_BODY_LR_PH]] ]
369; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
370; CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CHANNEL]], align 8
371; CHECK-NEXT:    [[TMP5:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
372; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP4]], [[TMP5]]
373; CHECK-NEXT:    [[IDX_EXT:%.*]] = sext i32 [[MUL]] to i64
374; CHECK-NEXT:    [[ADD_PTR:%.*]] = getelementptr inbounds i32, i32* [[IN:%.*]], i64 [[IDX_EXT]]
375; CHECK-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ADD_PTR]], align 4
376; CHECK-NEXT:    [[TMP7:%.*]] = mul nsw i64 [[TMP1]], [[INDVARS_IV_NEXT]]
377; CHECK-NEXT:    [[ADD_PTR1:%.*]] = getelementptr inbounds i32, i32* [[IN]], i64 [[TMP7]]
378; CHECK-NEXT:    [[TMP8:%.*]] = load i32, i32* [[ADD_PTR1]], align 4
379; CHECK-NEXT:    [[TMP9:%.*]] = add i32 [[TMP6]], [[TMP8]]
380; CHECK-NEXT:    [[TMP10]] = add i32 [[TMP9]], [[MUL]]
381; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
382; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP_LOOPEXIT]]
383;
384entry:
385  %stride = getelementptr inbounds %struct.image, %struct.image* %input, i64 0, i32 1
386  %0 = load i32, i32* %stride, align 4
387  %cmp17 = icmp sgt i32 %length, 1
388  br i1 %cmp17, label %for.body.lr.ph, label %for.cond.cleanup
389
390for.body.lr.ph:                                   ; preds = %entry
391  %channel = getelementptr inbounds %struct.image, %struct.image* %input, i64 0, i32 0
392  br label %for.body
393
394for.cond.cleanup.loopexit:                        ; preds = %for.body
395  %1 = phi i32 [ %7, %for.body ]
396  br label %for.cond.cleanup
397
398for.cond.cleanup:                                 ; preds = %for.cond.cleanup.loopexit, %entry
399  %2 = phi i32 [ 0, %entry ], [ %1, %for.cond.cleanup.loopexit ]
400  ret i32 %2
401
402; This example is the same as above except that the first mul is used in two places
403; and this may result in having two versions of the multiply: an i32 and i64 version.
404; In this case, keep the trucate instructions to avoid this redundancy.
405for.body:                                         ; preds = %for.body.lr.ph, %for.body
406  %x.018 = phi i32 [ 1, %for.body.lr.ph ], [ %add, %for.body ]
407  %add = add nuw nsw i32 %x.018, 1
408  %3 = load i32, i32* %channel, align 8
409  %mul = mul nsw i32 %3, %add
410  %idx.ext = sext i32 %mul to i64
411  %add.ptr = getelementptr inbounds i32, i32* %in, i64 %idx.ext
412  %4 = load i32, i32* %add.ptr, align 4
413  %mul1 = mul nsw i32 %0, %add
414  %idx.ext1 = sext i32 %mul1 to i64
415  %add.ptr1 = getelementptr inbounds i32, i32* %in, i64 %idx.ext1
416  %5 = load i32, i32* %add.ptr1, align 4
417  %6 = add i32 %4, %5
418  %7 = add i32 %6, %mul
419  %cmp = icmp slt i32 %add, %length
420  br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit
421}
422
423define i32 @foo6(%struct.image* %input, i32 %length, i32* %in) {
424entry:
425  %stride = getelementptr inbounds %struct.image, %struct.image* %input, i64 0, i32 1
426  %0 = load i32, i32* %stride, align 4
427  %cmp17 = icmp sgt i32 %length, 1
428  br i1 %cmp17, label %for.body.lr.ph, label %for.cond.cleanup
429
430for.body.lr.ph:                                   ; preds = %entry
431  %channel = getelementptr inbounds %struct.image, %struct.image* %input, i64 0, i32 0
432  br label %for.body
433
434for.cond.cleanup.loopexit:                        ; preds = %for.body
435  %1 = phi i32 [ %6, %for.body ]
436  br label %for.cond.cleanup
437
438for.cond.cleanup:                                 ; preds = %for.cond.cleanup.loopexit, %entry
439  %2 = phi i32 [ 0, %entry ], [ %1, %for.cond.cleanup.loopexit ]
440  ret i32 %2
441
442; Extend foo4 so that any loop variants (%3 and %or) with mul/sub/add then extend will not
443; need a trunc instruction
444; CHECK: for.body:
445; CHECK-NOT: trunc
446; CHECK:      [[TMP0:%.*]] = and i32 %length, %0
447; CHECK-NEXT: zext i32 [[TMP0]] to i64
448; CHECK:      [[TMP1:%.*]] = or i32 %length, [[TMP2:%.*]]
449; CHECK-NEXT: zext i32 [[TMP1]] to i64
450for.body:                                         ; preds = %for.body.lr.ph, %for.body
451  %x.018 = phi i32 [ 1, %for.body.lr.ph ], [ %add, %for.body ]
452  %add = add nuw nsw i32 %x.018, 1
453  %3 = and i32 %length, %0
454  %mul = mul nuw i32 %3, %add
455  %idx.ext = zext i32 %mul to i64
456  %add.ptr = getelementptr inbounds i32, i32* %in, i64 %idx.ext
457  %4 = load i32, i32* %add.ptr, align 4
458  %mul1 = mul nuw i32 %0, %add
459  %idx.ext1 = zext i32 %mul1 to i64
460  %add.ptr1 = getelementptr inbounds i32, i32* %in, i64 %idx.ext1
461  %5 = load i32, i32* %add.ptr1, align 4
462  %or = or i32 %length, %5
463  %sub.or = sub nuw i32 %or, %add
464  %or.ext = zext i32 %sub.or to i64
465  %ptr.or = getelementptr inbounds i32, i32* %in, i64 %or.ext
466  %val.or = load i32, i32* %ptr.or
467  %6 = add i32 %4, %val.or
468  %cmp = icmp ult i32 %add, %length
469  br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit
470}
471