1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -instcombine -S | FileCheck %s
3
4; General pattern:
5;   X & Y
6;
7; Where Y is checking that all the high bits (covered by a mask 4294967168)
8; are uniform, i.e.  %arg & 4294967168  can be either  4294967168  or  0
9; Pattern can be one of:
10;   %t = add        i32 %arg,    128
11;   %r = icmp   ult i32 %t,      256
12; Or
13;   %t0 = shl       i32 %arg,    24
14;   %t1 = ashr      i32 %t0,     24
15;   %r  = icmp  eq  i32 %t1,     %arg
16; Or
17;   %t0 = trunc     i32 %arg  to i8
18;   %t1 = sext      i8  %t0   to i32
19;   %r  = icmp  eq  i32 %t1,     %arg
20; This pattern is a signed truncation check.
21;
22; And X is checking that some bit in that same mask is zero.
23; I.e. can be one of:
24;   %r = icmp sgt i32   %arg,    -1
25; Or
26;   %t = and      i32   %arg,    2147483648
27;   %r = icmp eq  i32   %t,      0
28;
29; Since we are checking that all the bits in that mask are the same,
30; and a particular bit is zero, what we are really checking is that all the
31; masked bits are zero.
32; So this should be transformed to:
33;   %r = icmp ult i32 %arg, 128
34
35; ============================================================================ ;
36; Basic positive test
37; ============================================================================ ;
38
39define i1 @positive_with_signbit(i32 %arg) {
40; CHECK-LABEL: @positive_with_signbit(
41; CHECK-NEXT:    [[T4_SIMPLIFIED:%.*]] = icmp ult i32 [[ARG:%.*]], 128
42; CHECK-NEXT:    ret i1 [[T4_SIMPLIFIED]]
43;
44  %t1 = icmp sgt i32 %arg, -1
45  %t2 = add i32 %arg, 128
46  %t3 = icmp ult i32 %t2, 256
47  %t4 = and i1 %t1, %t3
48  ret i1 %t4
49}
50
51define i1 @positive_with_mask(i32 %arg) {
52; CHECK-LABEL: @positive_with_mask(
53; CHECK-NEXT:    [[T5_SIMPLIFIED:%.*]] = icmp ult i32 [[ARG:%.*]], 128
54; CHECK-NEXT:    ret i1 [[T5_SIMPLIFIED]]
55;
56  %t1 = and i32 %arg, 1107296256
57  %t2 = icmp eq i32 %t1, 0
58  %t3 = add i32 %arg, 128
59  %t4 = icmp ult i32 %t3, 256
60  %t5 = and i1 %t2, %t4
61  ret i1 %t5
62}
63
64define i1 @positive_with_icmp(i32 %arg) {
65; CHECK-LABEL: @positive_with_icmp(
66; CHECK-NEXT:    [[T4_SIMPLIFIED:%.*]] = icmp ult i32 [[ARG:%.*]], 128
67; CHECK-NEXT:    ret i1 [[T4_SIMPLIFIED]]
68;
69  %t1 = icmp ult i32 %arg, 512
70  %t2 = add i32 %arg, 128
71  %t3 = icmp ult i32 %t2, 256
72  %t4 = and i1 %t1, %t3
73  ret i1 %t4
74}
75
76; Still the same
77define i1 @positive_with_aggressive_icmp(i32 %arg) {
78; CHECK-LABEL: @positive_with_aggressive_icmp(
79; CHECK-NEXT:    [[T4_SIMPLIFIED:%.*]] = icmp ult i32 [[ARG:%.*]], 128
80; CHECK-NEXT:    ret i1 [[T4_SIMPLIFIED]]
81;
82  %t1 = icmp ult i32 %arg, 128
83  %t2 = add i32 %arg, 256
84  %t3 = icmp ult i32 %t2, 512
85  %t4 = and i1 %t1, %t3
86  ret i1 %t4
87}
88
89; I'm sure there is a bunch more patterns possible :/
90
91; This used to trigger an assert, because the icmp's are not direct
92; operands of the and.
93define i1 @positive_with_extra_and(i32 %arg, i1 %z) {
94; CHECK-LABEL: @positive_with_extra_and(
95; CHECK-NEXT:    [[T5_SIMPLIFIED:%.*]] = icmp ult i32 [[ARG:%.*]], 128
96; CHECK-NEXT:    [[TMP1:%.*]] = and i1 [[T5_SIMPLIFIED]], [[Z:%.*]]
97; CHECK-NEXT:    ret i1 [[TMP1]]
98;
99  %t1 = icmp sgt i32 %arg, -1
100  %t2 = add i32 %arg, 128
101  %t3 = icmp ult i32 %t2, 256
102  %t4 = and i1 %t1, %z
103  %t5 = and i1 %t3, %t4
104  ret i1 %t5
105}
106
107; ============================================================================ ;
108; Vector tests
109; ============================================================================ ;
110
111define <2 x i1> @positive_vec_splat(<2 x i32> %arg) {
112; CHECK-LABEL: @positive_vec_splat(
113; CHECK-NEXT:    [[T4_SIMPLIFIED:%.*]] = icmp ult <2 x i32> [[ARG:%.*]], <i32 128, i32 128>
114; CHECK-NEXT:    ret <2 x i1> [[T4_SIMPLIFIED]]
115;
116  %t1 = icmp sgt <2 x i32> %arg, <i32 -1, i32 -1>
117  %t2 = add <2 x i32> %arg, <i32 128, i32 128>
118  %t3 = icmp ult <2 x i32> %t2, <i32 256, i32 256>
119  %t4 = and <2 x i1> %t1, %t3
120  ret <2 x i1> %t4
121}
122
123define <2 x i1> @positive_vec_nonsplat(<2 x i32> %arg) {
124; CHECK-LABEL: @positive_vec_nonsplat(
125; CHECK-NEXT:    [[T1:%.*]] = icmp sgt <2 x i32> [[ARG:%.*]], <i32 -1, i32 -1>
126; CHECK-NEXT:    [[T2:%.*]] = add <2 x i32> [[ARG]], <i32 128, i32 256>
127; CHECK-NEXT:    [[T3:%.*]] = icmp ult <2 x i32> [[T2]], <i32 256, i32 512>
128; CHECK-NEXT:    [[T4:%.*]] = and <2 x i1> [[T1]], [[T3]]
129; CHECK-NEXT:    ret <2 x i1> [[T4]]
130;
131  %t1 = icmp sgt <2 x i32> %arg, <i32 -1, i32 -1>
132  %t2 = add <2 x i32> %arg, <i32 128, i32 256>
133  %t3 = icmp ult <2 x i32> %t2, <i32 256, i32 512>
134  %t4 = and <2 x i1> %t1, %t3
135  ret <2 x i1> %t4
136}
137
138define <3 x i1> @positive_vec_undef0(<3 x i32> %arg) {
139; CHECK-LABEL: @positive_vec_undef0(
140; CHECK-NEXT:    [[T1:%.*]] = icmp sgt <3 x i32> [[ARG:%.*]], <i32 -1, i32 undef, i32 -1>
141; CHECK-NEXT:    [[T2:%.*]] = add <3 x i32> [[ARG]], <i32 128, i32 128, i32 128>
142; CHECK-NEXT:    [[T3:%.*]] = icmp ult <3 x i32> [[T2]], <i32 256, i32 256, i32 256>
143; CHECK-NEXT:    [[T4:%.*]] = and <3 x i1> [[T1]], [[T3]]
144; CHECK-NEXT:    ret <3 x i1> [[T4]]
145;
146  %t1 = icmp sgt <3 x i32> %arg, <i32 -1, i32 undef, i32 -1>
147  %t2 = add <3 x i32> %arg, <i32 128, i32 128, i32 128>
148  %t3 = icmp ult <3 x i32> %t2, <i32 256, i32 256, i32 256>
149  %t4 = and <3 x i1> %t1, %t3
150  ret <3 x i1> %t4
151}
152
153define <3 x i1> @positive_vec_undef1(<3 x i32> %arg) {
154; CHECK-LABEL: @positive_vec_undef1(
155; CHECK-NEXT:    [[T1:%.*]] = icmp sgt <3 x i32> [[ARG:%.*]], <i32 -1, i32 -1, i32 -1>
156; CHECK-NEXT:    [[T2:%.*]] = add <3 x i32> [[ARG]], <i32 128, i32 undef, i32 128>
157; CHECK-NEXT:    [[T3:%.*]] = icmp ult <3 x i32> [[T2]], <i32 256, i32 256, i32 256>
158; CHECK-NEXT:    [[T4:%.*]] = and <3 x i1> [[T1]], [[T3]]
159; CHECK-NEXT:    ret <3 x i1> [[T4]]
160;
161  %t1 = icmp sgt <3 x i32> %arg, <i32 -1, i32 -1, i32 -1>
162  %t2 = add <3 x i32> %arg, <i32 128, i32 undef, i32 128>
163  %t3 = icmp ult <3 x i32> %t2, <i32 256, i32 256, i32 256>
164  %t4 = and <3 x i1> %t1, %t3
165  ret <3 x i1> %t4
166}
167
168define <3 x i1> @positive_vec_undef2(<3 x i32> %arg) {
169; CHECK-LABEL: @positive_vec_undef2(
170; CHECK-NEXT:    [[T1:%.*]] = icmp sgt <3 x i32> [[ARG:%.*]], <i32 -1, i32 -1, i32 -1>
171; CHECK-NEXT:    [[T2:%.*]] = add <3 x i32> [[ARG]], <i32 128, i32 128, i32 128>
172; CHECK-NEXT:    [[T3:%.*]] = icmp ult <3 x i32> [[T2]], <i32 256, i32 undef, i32 256>
173; CHECK-NEXT:    [[T4:%.*]] = and <3 x i1> [[T1]], [[T3]]
174; CHECK-NEXT:    ret <3 x i1> [[T4]]
175;
176  %t1 = icmp sgt <3 x i32> %arg, <i32 -1, i32 -1, i32 -1>
177  %t2 = add <3 x i32> %arg, <i32 128, i32 128, i32 128>
178  %t3 = icmp ult <3 x i32> %t2, <i32 256, i32 undef, i32 256>
179  %t4 = and <3 x i1> %t1, %t3
180  ret <3 x i1> %t4
181}
182
183define <3 x i1> @positive_vec_undef3(<3 x i32> %arg) {
184; CHECK-LABEL: @positive_vec_undef3(
185; CHECK-NEXT:    [[T1:%.*]] = icmp sgt <3 x i32> [[ARG:%.*]], <i32 -1, i32 undef, i32 -1>
186; CHECK-NEXT:    [[T2:%.*]] = add <3 x i32> [[ARG]], <i32 128, i32 undef, i32 128>
187; CHECK-NEXT:    [[T3:%.*]] = icmp ult <3 x i32> [[T2]], <i32 256, i32 256, i32 256>
188; CHECK-NEXT:    [[T4:%.*]] = and <3 x i1> [[T1]], [[T3]]
189; CHECK-NEXT:    ret <3 x i1> [[T4]]
190;
191  %t1 = icmp sgt <3 x i32> %arg, <i32 -1, i32 undef, i32 -1>
192  %t2 = add <3 x i32> %arg, <i32 128, i32 undef, i32 128>
193  %t3 = icmp ult <3 x i32> %t2, <i32 256, i32 256, i32 256>
194  %t4 = and <3 x i1> %t1, %t3
195  ret <3 x i1> %t4
196}
197
198define <3 x i1> @positive_vec_undef4(<3 x i32> %arg) {
199; CHECK-LABEL: @positive_vec_undef4(
200; CHECK-NEXT:    [[T1:%.*]] = icmp sgt <3 x i32> [[ARG:%.*]], <i32 -1, i32 undef, i32 -1>
201; CHECK-NEXT:    [[T2:%.*]] = add <3 x i32> [[ARG]], <i32 128, i32 128, i32 128>
202; CHECK-NEXT:    [[T3:%.*]] = icmp ult <3 x i32> [[T2]], <i32 256, i32 undef, i32 256>
203; CHECK-NEXT:    [[T4:%.*]] = and <3 x i1> [[T1]], [[T3]]
204; CHECK-NEXT:    ret <3 x i1> [[T4]]
205;
206  %t1 = icmp sgt <3 x i32> %arg, <i32 -1, i32 undef, i32 -1>
207  %t2 = add <3 x i32> %arg, <i32 128, i32 128, i32 128>
208  %t3 = icmp ult <3 x i32> %t2, <i32 256, i32 undef, i32 256>
209  %t4 = and <3 x i1> %t1, %t3
210  ret <3 x i1> %t4
211}
212
213define <3 x i1> @positive_vec_undef5(<3 x i32> %arg) {
214; CHECK-LABEL: @positive_vec_undef5(
215; CHECK-NEXT:    [[T1:%.*]] = icmp sgt <3 x i32> [[ARG:%.*]], <i32 -1, i32 -1, i32 -1>
216; CHECK-NEXT:    [[T2:%.*]] = add <3 x i32> [[ARG]], <i32 128, i32 undef, i32 128>
217; CHECK-NEXT:    [[T3:%.*]] = icmp ult <3 x i32> [[T2]], <i32 256, i32 undef, i32 256>
218; CHECK-NEXT:    [[T4:%.*]] = and <3 x i1> [[T1]], [[T3]]
219; CHECK-NEXT:    ret <3 x i1> [[T4]]
220;
221  %t1 = icmp sgt <3 x i32> %arg, <i32 -1, i32 -1, i32 -1>
222  %t2 = add <3 x i32> %arg, <i32 128, i32 undef, i32 128>
223  %t3 = icmp ult <3 x i32> %t2, <i32 256, i32 undef, i32 256>
224  %t4 = and <3 x i1> %t1, %t3
225  ret <3 x i1> %t4
226}
227
228define <3 x i1> @positive_vec_undef6(<3 x i32> %arg) {
229; CHECK-LABEL: @positive_vec_undef6(
230; CHECK-NEXT:    [[T1:%.*]] = icmp sgt <3 x i32> [[ARG:%.*]], <i32 -1, i32 undef, i32 -1>
231; CHECK-NEXT:    [[T2:%.*]] = add <3 x i32> [[ARG]], <i32 128, i32 undef, i32 128>
232; CHECK-NEXT:    [[T3:%.*]] = icmp ult <3 x i32> [[T2]], <i32 256, i32 undef, i32 256>
233; CHECK-NEXT:    [[T4:%.*]] = and <3 x i1> [[T1]], [[T3]]
234; CHECK-NEXT:    ret <3 x i1> [[T4]]
235;
236  %t1 = icmp sgt <3 x i32> %arg, <i32 -1, i32 undef, i32 -1>
237  %t2 = add <3 x i32> %arg, <i32 128, i32 undef, i32 128>
238  %t3 = icmp ult <3 x i32> %t2, <i32 256, i32 undef, i32 256>
239  %t4 = and <3 x i1> %t1, %t3
240  ret <3 x i1> %t4
241}
242
243; ============================================================================ ;
244; Commutativity tests.
245; ============================================================================ ;
246
247declare i32 @gen32()
248
249define i1 @commutative() {
250; CHECK-LABEL: @commutative(
251; CHECK-NEXT:    [[ARG:%.*]] = call i32 @gen32()
252; CHECK-NEXT:    [[T4_SIMPLIFIED:%.*]] = icmp ult i32 [[ARG]], 128
253; CHECK-NEXT:    ret i1 [[T4_SIMPLIFIED]]
254;
255  %arg = call i32 @gen32()
256  %t1 = icmp sgt i32 %arg, -1
257  %t2 = add i32 %arg, 128
258  %t3 = icmp ult i32 %t2, 256
259  %t4 = and i1 %t3, %t1 ; swapped order
260  ret i1 %t4
261}
262
263define i1 @commutative_with_icmp() {
264; CHECK-LABEL: @commutative_with_icmp(
265; CHECK-NEXT:    [[ARG:%.*]] = call i32 @gen32()
266; CHECK-NEXT:    [[T4_SIMPLIFIED:%.*]] = icmp ult i32 [[ARG]], 128
267; CHECK-NEXT:    ret i1 [[T4_SIMPLIFIED]]
268;
269  %arg = call i32 @gen32()
270  %t1 = icmp ult i32 %arg, 512
271  %t2 = add i32 %arg, 128
272  %t3 = icmp ult i32 %t2, 256
273  %t4 = and i1 %t3, %t1 ; swapped order
274  ret i1 %t4
275}
276
277; ============================================================================ ;
278; Truncations.
279; ============================================================================ ;
280
281define i1 @positive_trunc_signbit(i32 %arg) {
282; CHECK-LABEL: @positive_trunc_signbit(
283; CHECK-NEXT:    [[T5_SIMPLIFIED:%.*]] = icmp ult i32 [[ARG:%.*]], 128
284; CHECK-NEXT:    ret i1 [[T5_SIMPLIFIED]]
285;
286  %t1 = trunc i32 %arg to i8
287  %t2 = icmp sgt i8 %t1, -1
288  %t3 = add i32 %arg, 128
289  %t4 = icmp ult i32 %t3, 256
290  %t5 = and i1 %t2, %t4
291  ret i1 %t5
292}
293
294define i1 @positive_trunc_base(i32 %arg) {
295; CHECK-LABEL: @positive_trunc_base(
296; CHECK-NEXT:    [[T1:%.*]] = trunc i32 [[ARG:%.*]] to i16
297; CHECK-NEXT:    [[T5_SIMPLIFIED:%.*]] = icmp ult i16 [[T1]], 128
298; CHECK-NEXT:    ret i1 [[T5_SIMPLIFIED]]
299;
300  %t1 = trunc i32 %arg to i16
301  %t2 = icmp sgt i16 %t1, -1
302  %t3 = add i16 %t1, 128
303  %t4 = icmp ult i16 %t3, 256
304  %t5 = and i1 %t2, %t4
305  ret i1 %t5
306}
307
308define i1 @positive_different_trunc_both(i32 %arg) {
309; CHECK-LABEL: @positive_different_trunc_both(
310; CHECK-NEXT:    [[T1:%.*]] = trunc i32 [[ARG:%.*]] to i15
311; CHECK-NEXT:    [[T2:%.*]] = icmp sgt i15 [[T1]], -1
312; CHECK-NEXT:    [[T3:%.*]] = trunc i32 [[ARG]] to i16
313; CHECK-NEXT:    [[T4:%.*]] = add i16 [[T3]], 128
314; CHECK-NEXT:    [[T5:%.*]] = icmp ult i16 [[T4]], 256
315; CHECK-NEXT:    [[T6:%.*]] = and i1 [[T2]], [[T5]]
316; CHECK-NEXT:    ret i1 [[T6]]
317;
318  %t1 = trunc i32 %arg to i15
319  %t2 = icmp sgt i15 %t1, -1
320  %t3 = trunc i32 %arg to i16
321  %t4 = add i16 %t3, 128
322  %t5 = icmp ult i16 %t4, 256
323  %t6 = and i1 %t2, %t5
324  ret i1 %t6
325}
326
327; ============================================================================ ;
328; One-use tests.
329;
330; We will only produce one instruction, so we do not care about one-use.
331; But, we *could* handle more patterns that we weren't able to canonicalize
332; because of extra-uses.
333; ============================================================================ ;
334
335declare void @use32(i32)
336declare void @use8(i8)
337declare void @use1(i1)
338
339define i1 @oneuse_with_signbit(i32 %arg) {
340; CHECK-LABEL: @oneuse_with_signbit(
341; CHECK-NEXT:    [[T1:%.*]] = icmp sgt i32 [[ARG:%.*]], -1
342; CHECK-NEXT:    call void @use1(i1 [[T1]])
343; CHECK-NEXT:    [[T2:%.*]] = add i32 [[ARG]], 128
344; CHECK-NEXT:    call void @use32(i32 [[T2]])
345; CHECK-NEXT:    [[T3:%.*]] = icmp ult i32 [[T2]], 256
346; CHECK-NEXT:    call void @use1(i1 [[T3]])
347; CHECK-NEXT:    [[T4_SIMPLIFIED:%.*]] = icmp ult i32 [[ARG]], 128
348; CHECK-NEXT:    ret i1 [[T4_SIMPLIFIED]]
349;
350  %t1 = icmp sgt i32 %arg, -1
351  call void @use1(i1 %t1)
352  %t2 = add i32 %arg, 128
353  call void @use32(i32 %t2)
354  %t3 = icmp ult i32 %t2, 256
355  call void @use1(i1 %t3)
356  %t4 = and i1 %t1, %t3
357  ret i1 %t4
358}
359
360define i1 @oneuse_with_mask(i32 %arg) {
361; CHECK-LABEL: @oneuse_with_mask(
362; CHECK-NEXT:    [[T1:%.*]] = and i32 [[ARG:%.*]], 603979776
363; CHECK-NEXT:    call void @use32(i32 [[T1]])
364; CHECK-NEXT:    [[T2:%.*]] = icmp eq i32 [[T1]], 0
365; CHECK-NEXT:    call void @use1(i1 [[T2]])
366; CHECK-NEXT:    [[T3:%.*]] = add i32 [[ARG]], 128
367; CHECK-NEXT:    call void @use32(i32 [[T3]])
368; CHECK-NEXT:    [[T4:%.*]] = icmp ult i32 [[T3]], 256
369; CHECK-NEXT:    call void @use1(i1 [[T4]])
370; CHECK-NEXT:    [[T5_SIMPLIFIED:%.*]] = icmp ult i32 [[ARG]], 128
371; CHECK-NEXT:    ret i1 [[T5_SIMPLIFIED]]
372;
373  %t1 = and i32 %arg, 603979776 ; some bit within the target 4294967168 mask.
374  call void @use32(i32 %t1)
375  %t2 = icmp eq i32 %t1, 0
376  call void @use1(i1 %t2)
377  %t3 = add i32 %arg, 128
378  call void @use32(i32 %t3)
379  %t4 = icmp ult i32 %t3, 256
380  call void @use1(i1 %t4)
381  %t5 = and i1 %t2, %t4
382  ret i1 %t5
383}
384
385define i1 @oneuse_shl_ashr(i32 %arg) {
386; CHECK-LABEL: @oneuse_shl_ashr(
387; CHECK-NEXT:    [[T1:%.*]] = trunc i32 [[ARG:%.*]] to i8
388; CHECK-NEXT:    call void @use8(i8 [[T1]])
389; CHECK-NEXT:    [[T2:%.*]] = icmp sgt i8 [[T1]], -1
390; CHECK-NEXT:    call void @use1(i1 [[T2]])
391; CHECK-NEXT:    [[T3:%.*]] = shl i32 [[ARG]], 24
392; CHECK-NEXT:    call void @use32(i32 [[T3]])
393; CHECK-NEXT:    [[T4:%.*]] = ashr exact i32 [[T3]], 24
394; CHECK-NEXT:    call void @use32(i32 [[T4]])
395; CHECK-NEXT:    [[T5:%.*]] = icmp eq i32 [[T4]], [[ARG]]
396; CHECK-NEXT:    call void @use1(i1 [[T5]])
397; CHECK-NEXT:    [[T6:%.*]] = and i1 [[T2]], [[T5]]
398; CHECK-NEXT:    ret i1 [[T6]]
399;
400  %t1 = trunc i32 %arg to i8
401  call void @use8(i8 %t1)
402  %t2 = icmp sgt i8 %t1, -1
403  call void @use1(i1 %t2)
404  %t3 = shl i32 %arg, 24
405  call void @use32(i32 %t3)
406  %t4 = ashr i32 %t3, 24
407  call void @use32(i32 %t4)
408  %t5 = icmp eq i32 %t4, %arg
409  call void @use1(i1 %t5)
410  %t6 = and i1 %t2, %t5
411  ret i1 %t6
412}
413
414define zeroext i1 @oneuse_trunc_sext(i32 %arg) {
415; CHECK-LABEL: @oneuse_trunc_sext(
416; CHECK-NEXT:    [[T1:%.*]] = trunc i32 [[ARG:%.*]] to i8
417; CHECK-NEXT:    call void @use8(i8 [[T1]])
418; CHECK-NEXT:    [[T2:%.*]] = icmp sgt i8 [[T1]], -1
419; CHECK-NEXT:    call void @use1(i1 [[T2]])
420; CHECK-NEXT:    [[T3:%.*]] = trunc i32 [[ARG]] to i8
421; CHECK-NEXT:    call void @use8(i8 [[T3]])
422; CHECK-NEXT:    [[T4:%.*]] = sext i8 [[T3]] to i32
423; CHECK-NEXT:    call void @use32(i32 [[T4]])
424; CHECK-NEXT:    [[T5:%.*]] = icmp eq i32 [[T4]], [[ARG]]
425; CHECK-NEXT:    call void @use1(i1 [[T5]])
426; CHECK-NEXT:    [[T6:%.*]] = and i1 [[T2]], [[T5]]
427; CHECK-NEXT:    ret i1 [[T6]]
428;
429  %t1 = trunc i32 %arg to i8
430  call void @use8(i8 %t1)
431  %t2 = icmp sgt i8 %t1, -1
432  call void @use1(i1 %t2)
433  %t3 = trunc i32 %arg to i8
434  call void @use8(i8 %t3)
435  %t4 = sext i8 %t3 to i32
436  call void @use32(i32 %t4)
437  %t5 = icmp eq i32 %t4, %arg
438  call void @use1(i1 %t5)
439  %t6 = and i1 %t2, %t5
440  ret i1 %t6
441}
442
443; ============================================================================ ;
444; Negative tests
445; ============================================================================ ;
446
447define i1 @negative_not_arg(i32 %arg, i32 %arg2) {
448; CHECK-LABEL: @negative_not_arg(
449; CHECK-NEXT:    [[T1:%.*]] = icmp sgt i32 [[ARG:%.*]], -1
450; CHECK-NEXT:    [[T2:%.*]] = add i32 [[ARG2:%.*]], 128
451; CHECK-NEXT:    [[T3:%.*]] = icmp ult i32 [[T2]], 256
452; CHECK-NEXT:    [[T4:%.*]] = and i1 [[T1]], [[T3]]
453; CHECK-NEXT:    ret i1 [[T4]]
454;
455  %t1 = icmp sgt i32 %arg, -1
456  %t2 = add i32 %arg2, 128 ; not %arg
457  %t3 = icmp ult i32 %t2, 256
458  %t4 = and i1 %t1, %t3
459  ret i1 %t4
460}
461
462define i1 @negative_trunc_not_arg(i32 %arg, i32 %arg2) {
463; CHECK-LABEL: @negative_trunc_not_arg(
464; CHECK-NEXT:    [[T1:%.*]] = trunc i32 [[ARG:%.*]] to i8
465; CHECK-NEXT:    [[T2:%.*]] = icmp sgt i8 [[T1]], -1
466; CHECK-NEXT:    [[T3:%.*]] = add i32 [[ARG2:%.*]], 128
467; CHECK-NEXT:    [[T4:%.*]] = icmp ult i32 [[T3]], 256
468; CHECK-NEXT:    [[T5:%.*]] = and i1 [[T2]], [[T4]]
469; CHECK-NEXT:    ret i1 [[T5]]
470;
471  %t1 = trunc i32 %arg to i8
472  %t2 = icmp sgt i8 %t1, -1
473  %t3 = add i32 %arg2, 128 ; not %arg
474  %t4 = icmp ult i32 %t3, 256
475  %t5 = and i1 %t2, %t4
476  ret i1 %t5
477}
478
479define i1 @positive_with_mask_not_arg(i32 %arg, i32 %arg2) {
480; CHECK-LABEL: @positive_with_mask_not_arg(
481; CHECK-NEXT:    [[T1:%.*]] = and i32 [[ARG:%.*]], 1140850688
482; CHECK-NEXT:    [[T2:%.*]] = icmp eq i32 [[T1]], 0
483; CHECK-NEXT:    [[T3:%.*]] = add i32 [[ARG2:%.*]], 128
484; CHECK-NEXT:    [[T4:%.*]] = icmp ult i32 [[T3]], 256
485; CHECK-NEXT:    [[T5:%.*]] = and i1 [[T2]], [[T4]]
486; CHECK-NEXT:    ret i1 [[T5]]
487;
488  %t1 = and i32 %arg, 1140850688
489  %t2 = icmp eq i32 %t1, 0
490  %t3 = add i32 %arg2, 128 ; not %arg
491  %t4 = icmp ult i32 %t3, 256
492  %t5 = and i1 %t2, %t4
493  ret i1 %t5
494}
495
496define i1 @negative_with_nonuniform_bad_mask(i32 %arg) {
497; CHECK-LABEL: @negative_with_nonuniform_bad_mask(
498; CHECK-NEXT:    [[T1:%.*]] = and i32 [[ARG:%.*]], 1711276033
499; CHECK-NEXT:    [[T2:%.*]] = icmp eq i32 [[T1]], 0
500; CHECK-NEXT:    [[T3:%.*]] = add i32 [[ARG]], 128
501; CHECK-NEXT:    [[T4:%.*]] = icmp ult i32 [[T3]], 256
502; CHECK-NEXT:    [[T5:%.*]] = and i1 [[T2]], [[T4]]
503; CHECK-NEXT:    ret i1 [[T5]]
504;
505  %t1 = and i32 %arg, 1711276033 ; lowest bit is set
506  %t2 = icmp eq i32 %t1, 0
507  %t3 = add i32 %arg, 128
508  %t4 = icmp ult i32 %t3, 256
509  %t5 = and i1 %t2, %t4
510  ret i1 %t5
511}
512
513define i1 @negative_with_uniform_bad_mask(i32 %arg) {
514; CHECK-LABEL: @negative_with_uniform_bad_mask(
515; CHECK-NEXT:    [[T1:%.*]] = and i32 [[ARG:%.*]], -16777152
516; CHECK-NEXT:    [[T2:%.*]] = icmp eq i32 [[T1]], 0
517; CHECK-NEXT:    [[T3:%.*]] = add i32 [[ARG]], 128
518; CHECK-NEXT:    [[T4:%.*]] = icmp ult i32 [[T3]], 256
519; CHECK-NEXT:    [[T5:%.*]] = and i1 [[T2]], [[T4]]
520; CHECK-NEXT:    ret i1 [[T5]]
521;
522  %t1 = and i32 %arg, 4278190144 ; 7'th bit is set
523  %t2 = icmp eq i32 %t1, 0
524  %t3 = add i32 %arg, 128
525  %t4 = icmp ult i32 %t3, 256
526  %t5 = and i1 %t2, %t4
527  ret i1 %t5
528}
529
530define i1 @negative_with_wrong_mask(i32 %arg) {
531; CHECK-LABEL: @negative_with_wrong_mask(
532; CHECK-NEXT:    [[T1:%.*]] = and i32 [[ARG:%.*]], 1
533; CHECK-NEXT:    [[T2:%.*]] = icmp eq i32 [[T1]], 0
534; CHECK-NEXT:    [[T3:%.*]] = add i32 [[ARG]], 128
535; CHECK-NEXT:    [[T4:%.*]] = icmp ult i32 [[T3]], 256
536; CHECK-NEXT:    [[T5:%.*]] = and i1 [[T2]], [[T4]]
537; CHECK-NEXT:    ret i1 [[T5]]
538;
539  %t1 = and i32 %arg, 1 ; not even checking the right mask
540  %t2 = icmp eq i32 %t1, 0
541  %t3 = add i32 %arg, 128
542  %t4 = icmp ult i32 %t3, 256
543  %t5 = and i1 %t2, %t4
544  ret i1 %t5
545}
546
547define i1 @negative_not_less_than(i32 %arg) {
548; CHECK-LABEL: @negative_not_less_than(
549; CHECK-NEXT:    ret i1 false
550;
551  %t1 = icmp sgt i32 %arg, -1
552  %t2 = add i32 %arg, 256 ; should be less than 256
553  %t3 = icmp ult i32 %t2, 256
554  %t4 = and i1 %t1, %t3
555  ret i1 %t4
556}
557
558define i1 @negative_not_power_of_two(i32 %arg) {
559; CHECK-LABEL: @negative_not_power_of_two(
560; CHECK-NEXT:    [[T1:%.*]] = icmp sgt i32 [[ARG:%.*]], -1
561; CHECK-NEXT:    [[T2:%.*]] = add i32 [[ARG]], 255
562; CHECK-NEXT:    [[T3:%.*]] = icmp ult i32 [[T2]], 256
563; CHECK-NEXT:    [[T4:%.*]] = and i1 [[T1]], [[T3]]
564; CHECK-NEXT:    ret i1 [[T4]]
565;
566  %t1 = icmp sgt i32 %arg, -1
567  %t2 = add i32 %arg, 255 ; should be power of two
568  %t3 = icmp ult i32 %t2, 256
569  %t4 = and i1 %t1, %t3
570  ret i1 %t4
571}
572
573define i1 @negative_not_next_power_of_two(i32 %arg) {
574; CHECK-LABEL: @negative_not_next_power_of_two(
575; CHECK-NEXT:    [[T1:%.*]] = icmp sgt i32 [[ARG:%.*]], -1
576; CHECK-NEXT:    [[T2:%.*]] = add i32 [[ARG]], 64
577; CHECK-NEXT:    [[T3:%.*]] = icmp ult i32 [[T2]], 256
578; CHECK-NEXT:    [[T4:%.*]] = and i1 [[T1]], [[T3]]
579; CHECK-NEXT:    ret i1 [[T4]]
580;
581  %t1 = icmp sgt i32 %arg, -1
582  %t2 = add i32 %arg, 64 ; should be 256 >> 1
583  %t3 = icmp ult i32 %t2, 256
584  %t4 = and i1 %t1, %t3
585  ret i1 %t4
586}
587
588; I don't think this can be folded, at least not into single instruction.
589define i1 @two_signed_truncation_checks(i32 %arg) {
590; CHECK-LABEL: @two_signed_truncation_checks(
591; CHECK-NEXT:    [[T1:%.*]] = add i32 [[ARG:%.*]], 512
592; CHECK-NEXT:    [[T2:%.*]] = icmp ult i32 [[T1]], 1024
593; CHECK-NEXT:    [[T3:%.*]] = add i32 [[ARG]], 128
594; CHECK-NEXT:    [[T4:%.*]] = icmp ult i32 [[T3]], 256
595; CHECK-NEXT:    [[T5:%.*]] = and i1 [[T2]], [[T4]]
596; CHECK-NEXT:    ret i1 [[T5]]
597;
598  %t1 = add i32 %arg, 512
599  %t2 = icmp ult i32 %t1, 1024
600  %t3 = add i32 %arg, 128
601  %t4 = icmp ult i32 %t3, 256
602  %t5 = and i1 %t2, %t4
603  ret i1 %t5
604}
605
606define i1 @bad_trunc_stc(i32 %arg) {
607; CHECK-LABEL: @bad_trunc_stc(
608; CHECK-NEXT:    [[T1:%.*]] = icmp sgt i32 [[ARG:%.*]], -1
609; CHECK-NEXT:    [[T2:%.*]] = trunc i32 [[ARG]] to i16
610; CHECK-NEXT:    [[T3:%.*]] = add i16 [[T2]], 128
611; CHECK-NEXT:    [[T4:%.*]] = icmp ult i16 [[T3]], 256
612; CHECK-NEXT:    [[T5:%.*]] = and i1 [[T1]], [[T4]]
613; CHECK-NEXT:    ret i1 [[T5]]
614;
615  %t1 = icmp sgt i32 %arg, -1 ; checks a bit outside of the i16
616  %t2 = trunc i32 %arg to i16
617  %t3 = add i16 %t2, 128
618  %t4 = icmp ult i16 %t3, 256
619  %t5 = and i1 %t1, %t4
620  ret i1 %t5
621}
622