1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -instcombine -S | FileCheck %s
3
4; Given pattern:
5;   icmp eq/ne (and ((x shift Q), (y oppositeshift K))), 0
6; we should move shifts to the same hand of 'and', i.e. e.g. rewrite as
7;   icmp eq/ne (and (((x shift Q) shift K), y)), 0
8; We are only interested in opposite logical shifts here.
9; We still can handle the case where there is a truncation between a shift and
10; an 'and'. If it's trunc-of-shl - no extra legality check is needed.
11
12;-------------------------------------------------------------------------------
13; Basic scalar tests
14;-------------------------------------------------------------------------------
15
16define i1 @t0_const_after_fold_lshr_shl_ne(i32 %x, i64 %y, i32 %len) {
17; CHECK-LABEL: @t0_const_after_fold_lshr_shl_ne(
18; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 [[X:%.*]], 31
19; CHECK-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
20; CHECK-NEXT:    [[TMP3:%.*]] = and i64 [[TMP2]], [[Y:%.*]]
21; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne i64 [[TMP3]], 0
22; CHECK-NEXT:    ret i1 [[TMP4]]
23;
24  %t0 = sub i32 32, %len
25  %t1 = lshr i32 %x, %t0
26  %t2 = add i32 %len, -1
27  %t2_wide = zext i32 %t2 to i64
28  %t3 = shl i64 %y, %t2_wide
29  %t3_trunc = trunc i64 %t3 to i32
30  %t4 = and i32 %t1, %t3_trunc
31  %t5 = icmp ne i32 %t4, 0
32  ret i1 %t5
33}
34
35;-------------------------------------------------------------------------------
36; Very basic vector tests
37;-------------------------------------------------------------------------------
38
39define <2 x i1> @t1_vec_splat(<2 x i32> %x, <2 x i64> %y, <2 x i32> %len) {
40; CHECK-LABEL: @t1_vec_splat(
41; CHECK-NEXT:    [[TMP1:%.*]] = lshr <2 x i32> [[X:%.*]], <i32 31, i32 31>
42; CHECK-NEXT:    [[TMP2:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64>
43; CHECK-NEXT:    [[TMP3:%.*]] = and <2 x i64> [[TMP2]], [[Y:%.*]]
44; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer
45; CHECK-NEXT:    ret <2 x i1> [[TMP4]]
46;
47  %t0 = sub <2 x i32> <i32 32, i32 32>, %len
48  %t1 = lshr <2 x i32> %x, %t0
49  %t2 = add <2 x i32> %len, <i32 -1, i32 -1>
50  %t2_wide = zext <2 x i32> %t2 to <2 x i64>
51  %t3 = shl <2 x i64> %y, %t2_wide
52  %t3_trunc = trunc <2 x i64> %t3 to <2 x i32>
53  %t4 = and <2 x i32> %t1, %t3_trunc
54  %t5 = icmp ne <2 x i32> %t4, <i32 0, i32 0>
55  ret <2 x i1> %t5
56}
57
58define <2 x i1> @t2_vec_nonsplat(<2 x i32> %x, <2 x i64> %y, <2 x i32> %len) {
59; CHECK-LABEL: @t2_vec_nonsplat(
60; CHECK-NEXT:    [[TMP1:%.*]] = zext <2 x i32> [[X:%.*]] to <2 x i64>
61; CHECK-NEXT:    [[TMP2:%.*]] = lshr <2 x i64> [[TMP1]], <i64 31, i64 30>
62; CHECK-NEXT:    [[TMP3:%.*]] = and <2 x i64> [[TMP2]], [[Y:%.*]]
63; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer
64; CHECK-NEXT:    ret <2 x i1> [[TMP4]]
65;
66  %t0 = sub <2 x i32> <i32 30, i32 32>, %len
67  %t1 = lshr <2 x i32> %x, %t0
68  %t2 = add <2 x i32> %len, <i32 1, i32 -2>
69  %t2_wide = zext <2 x i32> %t2 to <2 x i64>
70  %t3 = shl <2 x i64> %y, %t2_wide
71  %t3_trunc = trunc <2 x i64> %t3 to <2 x i32>
72  %t4 = and <2 x i32> %t1, %t3_trunc
73  %t5 = icmp ne <2 x i32> %t4, <i32 0, i32 0>
74  ret <2 x i1> %t5
75}
76
77;-------------------------------------------------------------------------------
78; Commutativity tests
79;-------------------------------------------------------------------------------
80
81declare i32 @gen32()
82declare i64 @gen64()
83
84; While 'and' is commutative, the 'trunc' *always* seems to be getting
85; canonicalized to the RHS, it does not seem possible to prevent that.
86
87;-------------------------------------------------------------------------------
88; One-use tests
89;-------------------------------------------------------------------------------
90
91declare void @use32(i32)
92declare void @use64(i64)
93
94; Nope, everything has extra uses.
95define i1 @t3_oneuse0(i32 %x, i64 %y, i32 %len) {
96; CHECK-LABEL: @t3_oneuse0(
97; CHECK-NEXT:    [[T0:%.*]] = sub i32 32, [[LEN:%.*]]
98; CHECK-NEXT:    call void @use32(i32 [[T0]])
99; CHECK-NEXT:    [[T1:%.*]] = lshr i32 [[X:%.*]], [[T0]]
100; CHECK-NEXT:    call void @use32(i32 [[T1]])
101; CHECK-NEXT:    [[T2:%.*]] = add i32 [[LEN]], -1
102; CHECK-NEXT:    call void @use32(i32 [[T2]])
103; CHECK-NEXT:    [[T2_WIDE:%.*]] = zext i32 [[T2]] to i64
104; CHECK-NEXT:    call void @use64(i64 [[T2_WIDE]])
105; CHECK-NEXT:    [[T3:%.*]] = shl i64 [[Y:%.*]], [[T2_WIDE]]
106; CHECK-NEXT:    call void @use64(i64 [[T3]])
107; CHECK-NEXT:    [[T3_TRUNC:%.*]] = trunc i64 [[T3]] to i32
108; CHECK-NEXT:    call void @use32(i32 [[T3_TRUNC]])
109; CHECK-NEXT:    [[T4:%.*]] = and i32 [[T1]], [[T3_TRUNC]]
110; CHECK-NEXT:    call void @use32(i32 [[T4]])
111; CHECK-NEXT:    [[T5:%.*]] = icmp ne i32 [[T4]], 0
112; CHECK-NEXT:    ret i1 [[T5]]
113;
114  %t0 = sub i32 32, %len
115  call void @use32(i32 %t0)
116  %t1 = lshr i32 %x, %t0
117  call void @use32(i32 %t1)
118  %t2 = add i32 %len, -1
119  call void @use32(i32 %t2)
120  %t2_wide = zext i32 %t2 to i64
121  call void @use64(i64 %t2_wide)
122  %t3 = shl i64 %y, %t2_wide
123  call void @use64(i64 %t3)
124  %t3_trunc = trunc i64 %t3 to i32
125  call void @use32(i32 %t3_trunc)
126  %t4 = and i32 %t1, %t3_trunc
127  call void @use32(i32 %t4)
128  %t5 = icmp ne i32 %t4, 0
129  ret i1 %t5
130}
131
132; Nope, still too much extra uses.
133define i1 @t4_oneuse1(i32 %x, i64 %y, i32 %len) {
134; CHECK-LABEL: @t4_oneuse1(
135; CHECK-NEXT:    [[T0:%.*]] = sub i32 32, [[LEN:%.*]]
136; CHECK-NEXT:    call void @use32(i32 [[T0]])
137; CHECK-NEXT:    [[T1:%.*]] = lshr i32 [[X:%.*]], [[T0]]
138; CHECK-NEXT:    call void @use32(i32 [[T1]])
139; CHECK-NEXT:    [[T2:%.*]] = add i32 [[LEN]], -1
140; CHECK-NEXT:    call void @use32(i32 [[T2]])
141; CHECK-NEXT:    [[T2_WIDE:%.*]] = zext i32 [[T2]] to i64
142; CHECK-NEXT:    call void @use64(i64 [[T2_WIDE]])
143; CHECK-NEXT:    [[T3:%.*]] = shl i64 [[Y:%.*]], [[T2_WIDE]]
144; CHECK-NEXT:    call void @use64(i64 [[T3]])
145; CHECK-NEXT:    [[T3_TRUNC:%.*]] = trunc i64 [[T3]] to i32
146; CHECK-NEXT:    call void @use32(i32 [[T3_TRUNC]])
147; CHECK-NEXT:    [[T4:%.*]] = and i32 [[T1]], [[T3_TRUNC]]
148; CHECK-NEXT:    [[T5:%.*]] = icmp ne i32 [[T4]], 0
149; CHECK-NEXT:    ret i1 [[T5]]
150;
151  %t0 = sub i32 32, %len
152  call void @use32(i32 %t0)
153  %t1 = lshr i32 %x, %t0
154  call void @use32(i32 %t1)
155  %t2 = add i32 %len, -1
156  call void @use32(i32 %t2)
157  %t2_wide = zext i32 %t2 to i64
158  call void @use64(i64 %t2_wide)
159  %t3 = shl i64 %y, %t2_wide
160  call void @use64(i64 %t3)
161  %t3_trunc = trunc i64 %t3 to i32
162  call void @use32(i32 %t3_trunc)
163  %t4 = and i32 %t1, %t3_trunc ; no extra uses
164  %t5 = icmp ne i32 %t4, 0
165  ret i1 %t5
166}
167
168; Still too much extra uses.
169define i1 @t5_oneuse2(i32 %x, i64 %y, i32 %len) {
170; CHECK-LABEL: @t5_oneuse2(
171; CHECK-NEXT:    [[T0:%.*]] = sub i32 32, [[LEN:%.*]]
172; CHECK-NEXT:    call void @use32(i32 [[T0]])
173; CHECK-NEXT:    [[T1:%.*]] = lshr i32 [[X:%.*]], [[T0]]
174; CHECK-NEXT:    [[T2:%.*]] = add i32 [[LEN]], -1
175; CHECK-NEXT:    call void @use32(i32 [[T2]])
176; CHECK-NEXT:    [[T2_WIDE:%.*]] = zext i32 [[T2]] to i64
177; CHECK-NEXT:    call void @use64(i64 [[T2_WIDE]])
178; CHECK-NEXT:    [[T3:%.*]] = shl i64 [[Y:%.*]], [[T2_WIDE]]
179; CHECK-NEXT:    call void @use64(i64 [[T3]])
180; CHECK-NEXT:    [[T3_TRUNC:%.*]] = trunc i64 [[T3]] to i32
181; CHECK-NEXT:    call void @use32(i32 [[T3_TRUNC]])
182; CHECK-NEXT:    [[T4:%.*]] = and i32 [[T1]], [[T3_TRUNC]]
183; CHECK-NEXT:    [[T5:%.*]] = icmp ne i32 [[T4]], 0
184; CHECK-NEXT:    ret i1 [[T5]]
185;
186  %t0 = sub i32 32, %len
187  call void @use32(i32 %t0)
188  %t1 = lshr i32 %x, %t0 ; no extra uses
189  %t2 = add i32 %len, -1
190  call void @use32(i32 %t2)
191  %t2_wide = zext i32 %t2 to i64
192  call void @use64(i64 %t2_wide)
193  %t3 = shl i64 %y, %t2_wide
194  call void @use64(i64 %t3)
195  %t3_trunc = trunc i64 %t3 to i32
196  call void @use32(i32 %t3_trunc)
197  %t4 = and i32 %t1, %t3_trunc ; no extra uses
198  %t5 = icmp ne i32 %t4, 0
199  ret i1 %t5
200}
201
202; Ok, trunc has no extra uses.
203define i1 @t6_oneuse3(i32 %x, i64 %y, i32 %len) {
204; CHECK-LABEL: @t6_oneuse3(
205; CHECK-NEXT:    [[T0:%.*]] = sub i32 32, [[LEN:%.*]]
206; CHECK-NEXT:    call void @use32(i32 [[T0]])
207; CHECK-NEXT:    [[T2:%.*]] = add i32 [[LEN]], -1
208; CHECK-NEXT:    call void @use32(i32 [[T2]])
209; CHECK-NEXT:    [[T2_WIDE:%.*]] = zext i32 [[T2]] to i64
210; CHECK-NEXT:    call void @use64(i64 [[T2_WIDE]])
211; CHECK-NEXT:    [[T3:%.*]] = shl i64 [[Y:%.*]], [[T2_WIDE]]
212; CHECK-NEXT:    call void @use64(i64 [[T3]])
213; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 [[X:%.*]], 31
214; CHECK-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
215; CHECK-NEXT:    [[TMP3:%.*]] = and i64 [[TMP2]], [[Y]]
216; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne i64 [[TMP3]], 0
217; CHECK-NEXT:    ret i1 [[TMP4]]
218;
219  %t0 = sub i32 32, %len
220  call void @use32(i32 %t0)
221  %t1 = lshr i32 %x, %t0 ; no extra uses
222  %t2 = add i32 %len, -1
223  call void @use32(i32 %t2)
224  %t2_wide = zext i32 %t2 to i64
225  call void @use64(i64 %t2_wide)
226  %t3 = shl i64 %y, %t2_wide
227  call void @use64(i64 %t3)
228  %t3_trunc = trunc i64 %t3 to i32 ; no extra uses
229  %t4 = and i32 %t1, %t3_trunc ; no extra uses
230  %t5 = icmp ne i32 %t4, 0
231  ret i1 %t5
232}
233
234; Ok, shift amount of non-truncated shift has no extra uses;
235define i1 @t7_oneuse4(i32 %x, i64 %y, i32 %len) {
236; CHECK-LABEL: @t7_oneuse4(
237; CHECK-NEXT:    [[T2:%.*]] = add i32 [[LEN:%.*]], -1
238; CHECK-NEXT:    call void @use32(i32 [[T2]])
239; CHECK-NEXT:    [[T2_WIDE:%.*]] = zext i32 [[T2]] to i64
240; CHECK-NEXT:    call void @use64(i64 [[T2_WIDE]])
241; CHECK-NEXT:    [[T3:%.*]] = shl i64 [[Y:%.*]], [[T2_WIDE]]
242; CHECK-NEXT:    call void @use64(i64 [[T3]])
243; CHECK-NEXT:    [[T3_TRUNC:%.*]] = trunc i64 [[T3]] to i32
244; CHECK-NEXT:    call void @use32(i32 [[T3_TRUNC]])
245; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 [[X:%.*]], 31
246; CHECK-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
247; CHECK-NEXT:    [[TMP3:%.*]] = and i64 [[TMP2]], [[Y]]
248; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne i64 [[TMP3]], 0
249; CHECK-NEXT:    ret i1 [[TMP4]]
250;
251  %t0 = sub i32 32, %len ; no extra uses
252  %t1 = lshr i32 %x, %t0 ; no extra uses
253  %t2 = add i32 %len, -1
254  call void @use32(i32 %t2)
255  %t2_wide = zext i32 %t2 to i64
256  call void @use64(i64 %t2_wide)
257  %t3 = shl i64 %y, %t2_wide
258  call void @use64(i64 %t3)
259  %t3_trunc = trunc i64 %t3 to i32
260  call void @use32(i32 %t3_trunc)
261  %t4 = and i32 %t1, %t3_trunc ; no extra uses
262  %t5 = icmp ne i32 %t4, 0
263  ret i1 %t5
264}
265
266; Ok, non-truncated shift is of constant;
267define i1 @t8_oneuse5(i32 %x, i64 %y, i32 %len) {
268; CHECK-LABEL: @t8_oneuse5(
269; CHECK-NEXT:    [[T0:%.*]] = sub i32 32, [[LEN:%.*]]
270; CHECK-NEXT:    call void @use32(i32 [[T0]])
271; CHECK-NEXT:    [[T1:%.*]] = lshr i32 -52543054, [[T0]]
272; CHECK-NEXT:    call void @use32(i32 [[T1]])
273; CHECK-NEXT:    [[T2:%.*]] = add i32 [[LEN]], -1
274; CHECK-NEXT:    call void @use32(i32 [[T2]])
275; CHECK-NEXT:    [[T2_WIDE:%.*]] = zext i32 [[T2]] to i64
276; CHECK-NEXT:    call void @use64(i64 [[T2_WIDE]])
277; CHECK-NEXT:    [[T3:%.*]] = shl i64 [[Y:%.*]], [[T2_WIDE]]
278; CHECK-NEXT:    call void @use64(i64 [[T3]])
279; CHECK-NEXT:    [[T3_TRUNC:%.*]] = trunc i64 [[T3]] to i32
280; CHECK-NEXT:    call void @use32(i32 [[T3_TRUNC]])
281; CHECK-NEXT:    [[TMP1:%.*]] = and i64 [[Y]], 1
282; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i64 [[TMP1]], 0
283; CHECK-NEXT:    ret i1 [[TMP2]]
284;
285  %t0 = sub i32 32, %len
286  call void @use32(i32 %t0)
287  %t1 = lshr i32 4242424242, %t0 ; shift-of-constant
288  call void @use32(i32 %t1)
289  %t2 = add i32 %len, -1
290  call void @use32(i32 %t2)
291  %t2_wide = zext i32 %t2 to i64
292  call void @use64(i64 %t2_wide)
293  %t3 = shl i64 %y, %t2_wide
294  call void @use64(i64 %t3)
295  %t3_trunc = trunc i64 %t3 to i32
296  call void @use32(i32 %t3_trunc)
297  %t4 = and i32 %t1, %t3_trunc ; no extra uses
298  %t5 = icmp ne i32 %t4, 0
299  ret i1 %t5
300}
301
302; Ok, truncated shift is of constant;
303define i1 @t9_oneuse5(i32 %x, i64 %y, i32 %len) {
304; CHECK-LABEL: @t9_oneuse5(
305; CHECK-NEXT:    [[T0:%.*]] = sub i32 32, [[LEN:%.*]]
306; CHECK-NEXT:    call void @use32(i32 [[T0]])
307; CHECK-NEXT:    [[T1:%.*]] = lshr i32 [[X:%.*]], [[T0]]
308; CHECK-NEXT:    call void @use32(i32 [[T1]])
309; CHECK-NEXT:    [[T2:%.*]] = add i32 [[LEN]], -1
310; CHECK-NEXT:    call void @use32(i32 [[T2]])
311; CHECK-NEXT:    [[T2_WIDE:%.*]] = zext i32 [[T2]] to i64
312; CHECK-NEXT:    call void @use64(i64 [[T2_WIDE]])
313; CHECK-NEXT:    [[T3:%.*]] = shl i64 4242424242, [[T2_WIDE]]
314; CHECK-NEXT:    call void @use64(i64 [[T3]])
315; CHECK-NEXT:    [[T3_TRUNC:%.*]] = trunc i64 [[T3]] to i32
316; CHECK-NEXT:    call void @use32(i32 [[T3_TRUNC]])
317; CHECK-NEXT:    ret i1 false
318;
319  %t0 = sub i32 32, %len
320  call void @use32(i32 %t0)
321  %t1 = lshr i32 %x, %t0 ; shift-of-constant
322  call void @use32(i32 %t1)
323  %t2 = add i32 %len, -1
324  call void @use32(i32 %t2)
325  %t2_wide = zext i32 %t2 to i64
326  call void @use64(i64 %t2_wide)
327  %t3 = shl i64 4242424242, %t2_wide
328  call void @use64(i64 %t3)
329  %t3_trunc = trunc i64 %t3 to i32
330  call void @use32(i32 %t3_trunc)
331  %t4 = and i32 %t1, %t3_trunc ; no extra uses
332  %t5 = icmp ne i32 %t4, 0
333  ret i1 %t5
334}
335
336;-------------------------------------------------------------------------------
337; Commutativity with extra uses
338;-------------------------------------------------------------------------------
339
340; While 'and' is commutative, the 'trunc' *always* seems to be getting
341; canonicalized to the RHS, it does not seem possible to prevent that.
342
343;
344
345; Constant shift amounts
346
347define i1 @t10_constants(i32 %x, i64 %y) {
348; CHECK-LABEL: @t10_constants(
349; CHECK-NEXT:    [[Y_TR:%.*]] = trunc i64 [[Y:%.*]] to i32
350; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 [[X:%.*]], 26
351; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], [[Y_TR]]
352; CHECK-NEXT:    [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
353; CHECK-NEXT:    ret i1 [[TMP3]]
354;
355  %t0 = lshr i32 %x, 12
356  %t1 = shl i64 %y, 14
357  %t1_trunc = trunc i64 %t1 to i32
358  %t2 = and i32 %t0, %t1_trunc
359  %t3 = icmp ne i32 %t2, 0
360  ret i1 %t3
361}
362
363define <2 x i1> @t11_constants_vec_splat(<2 x i32> %x, <2 x i64> %y) {
364; CHECK-LABEL: @t11_constants_vec_splat(
365; CHECK-NEXT:    [[Y_TR:%.*]] = trunc <2 x i64> [[Y:%.*]] to <2 x i32>
366; CHECK-NEXT:    [[TMP1:%.*]] = lshr <2 x i32> [[X:%.*]], <i32 26, i32 26>
367; CHECK-NEXT:    [[TMP2:%.*]] = and <2 x i32> [[TMP1]], [[Y_TR]]
368; CHECK-NEXT:    [[TMP3:%.*]] = icmp ne <2 x i32> [[TMP2]], zeroinitializer
369; CHECK-NEXT:    ret <2 x i1> [[TMP3]]
370;
371  %t0 = lshr <2 x i32> %x, <i32 12, i32 12>
372  %t1 = shl <2 x i64> %y, <i64 14, i64 14>
373  %t1_trunc = trunc <2 x i64> %t1 to <2 x i32>
374  %t2 = and <2 x i32> %t0, %t1_trunc
375  %t3 = icmp ne <2 x i32> %t2, <i32 0, i32 0>
376  ret <2 x i1> %t3
377}
378define <2 x i1> @t12_constants_vec_nonsplat(<2 x i32> %x, <2 x i64> %y) {
379; CHECK-LABEL: @t12_constants_vec_nonsplat(
380; CHECK-NEXT:    [[Y_TR:%.*]] = trunc <2 x i64> [[Y:%.*]] to <2 x i32>
381; CHECK-NEXT:    [[TMP1:%.*]] = lshr <2 x i32> [[X:%.*]], <i32 28, i32 28>
382; CHECK-NEXT:    [[TMP2:%.*]] = and <2 x i32> [[TMP1]], [[Y_TR]]
383; CHECK-NEXT:    [[TMP3:%.*]] = icmp ne <2 x i32> [[TMP2]], zeroinitializer
384; CHECK-NEXT:    ret <2 x i1> [[TMP3]]
385;
386  %t0 = lshr <2 x i32> %x, <i32 12, i32 14>
387  %t1 = shl <2 x i64> %y, <i64 16, i64 14>
388  %t1_trunc = trunc <2 x i64> %t1 to <2 x i32>
389  %t2 = and <2 x i32> %t0, %t1_trunc
390  %t3 = icmp ne <2 x i32> %t2, <i32 0, i32 0>
391  ret <2 x i1> %t3
392}
393
394;-------------------------------------------------------------------------------
395; Negative tests
396;-------------------------------------------------------------------------------
397
398define i1 @n13_overshift(i32 %x, i64 %y, i32 %len) {
399; CHECK-LABEL: @n13_overshift(
400; CHECK-NEXT:    [[T0:%.*]] = sub i32 32, [[LEN:%.*]]
401; CHECK-NEXT:    [[T1:%.*]] = lshr i32 [[X:%.*]], [[T0]]
402; CHECK-NEXT:    [[T2:%.*]] = add i32 [[LEN]], 32
403; CHECK-NEXT:    [[T2_WIDE:%.*]] = zext i32 [[T2]] to i64
404; CHECK-NEXT:    [[T3:%.*]] = shl i64 [[Y:%.*]], [[T2_WIDE]]
405; CHECK-NEXT:    [[T3_TRUNC:%.*]] = trunc i64 [[T3]] to i32
406; CHECK-NEXT:    [[T4:%.*]] = and i32 [[T1]], [[T3_TRUNC]]
407; CHECK-NEXT:    [[T5:%.*]] = icmp ne i32 [[T4]], 0
408; CHECK-NEXT:    ret i1 [[T5]]
409;
410  %t0 = sub i32 32, %len
411  %t1 = lshr i32 %x, %t0
412  %t2 = add i32 %len, 32 ; too much
413  %t2_wide = zext i32 %t2 to i64
414  %t3 = shl i64 %y, %t2_wide
415  %t3_trunc = trunc i64 %t3 to i32
416  %t4 = and i32 %t1, %t3_trunc
417  %t5 = icmp ne i32 %t4, 0
418  ret i1 %t5
419}
420
421define i1 @n14_trunc_of_lshr(i64 %x, i32 %y, i32 %len) {
422; CHECK-LABEL: @n14_trunc_of_lshr(
423; CHECK-NEXT:    [[T0:%.*]] = sub i32 32, [[LEN:%.*]]
424; CHECK-NEXT:    [[T0_WIDE:%.*]] = zext i32 [[T0]] to i64
425; CHECK-NEXT:    [[T1:%.*]] = lshr i64 [[X:%.*]], [[T0_WIDE]]
426; CHECK-NEXT:    [[T1_TRUNC:%.*]] = trunc i64 [[T1]] to i32
427; CHECK-NEXT:    [[T2:%.*]] = add i32 [[LEN]], -1
428; CHECK-NEXT:    [[T3:%.*]] = shl i32 [[Y:%.*]], [[T2]]
429; CHECK-NEXT:    [[T4:%.*]] = and i32 [[T3]], [[T1_TRUNC]]
430; CHECK-NEXT:    [[T5:%.*]] = icmp ne i32 [[T4]], 0
431; CHECK-NEXT:    ret i1 [[T5]]
432;
433  %t0 = sub i32 32, %len
434  %t0_wide = zext i32 %t0 to i64
435  %t1 = lshr i64 %x, %t0_wide
436  %t1_trunc = trunc i64 %t1 to i32
437  %t2 = add i32 %len, -1
438  %t3 = shl i32 %y, %t2
439  %t4 = and i32 %t1_trunc, %t3
440  %t5 = icmp ne i32 %t4, 0
441  ret i1 %t5
442}
443
444; Completely variable shift amounts
445
446define i1 @n15_variable_shamts(i32 %x, i64 %y, i32 %shamt0, i64 %shamt1) {
447; CHECK-LABEL: @n15_variable_shamts(
448; CHECK-NEXT:    [[T0:%.*]] = lshr i32 [[X:%.*]], [[SHAMT0:%.*]]
449; CHECK-NEXT:    [[T1:%.*]] = shl i64 [[Y:%.*]], [[SHAMT1:%.*]]
450; CHECK-NEXT:    [[T1_TRUNC:%.*]] = trunc i64 [[T1]] to i32
451; CHECK-NEXT:    [[T2:%.*]] = and i32 [[T0]], [[T1_TRUNC]]
452; CHECK-NEXT:    [[T3:%.*]] = icmp ne i32 [[T2]], 0
453; CHECK-NEXT:    ret i1 [[T3]]
454;
455  %t0 = lshr i32 %x, %shamt0
456  %t1 = shl i64 %y, %shamt1
457  %t1_trunc = trunc i64 %t1 to i32
458  %t2 = and i32 %t1_trunc, %t0
459  %t3 = icmp ne i32 %t2, 0
460  ret i1 %t3
461}
462