1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -instcombine -S | FileCheck %s
3
4; With left shift, the comparison should not be modified.
5define i1 @test_shift_and_cmp_not_changed1(i8 %p) {
6; CHECK-LABEL: @test_shift_and_cmp_not_changed1(
7; CHECK-NEXT:    [[SHLP:%.*]] = shl i8 %p, 5
8; CHECK-NEXT:    [[ANDP:%.*]] = and i8 [[SHLP]], -64
9; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8 [[ANDP]], 32
10; CHECK-NEXT:    ret i1 [[CMP]]
11;
12  %shlp = shl i8 %p, 5
13  %andp = and i8 %shlp, -64
14  %cmp = icmp slt i8 %andp, 32
15  ret i1 %cmp
16}
17
18; With arithmetic right shift, the comparison should not be modified.
19define i1 @test_shift_and_cmp_not_changed2(i8 %p) {
20; CHECK-LABEL: @test_shift_and_cmp_not_changed2(
21; CHECK-NEXT:    [[SHLP:%.*]] = ashr i8 %p, 5
22; CHECK-NEXT:    [[ANDP:%.*]] = and i8 [[SHLP]], -64
23; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8 [[ANDP]], 32
24; CHECK-NEXT:    ret i1 [[CMP]]
25;
26  %shlp = ashr i8 %p, 5
27  %andp = and i8 %shlp, -64
28  %cmp = icmp slt i8 %andp, 32
29  ret i1 %cmp
30}
31
32; This should simplify functionally to the left shift case.
33; The extra input parameter should be optimized away.
34define i1 @test_shift_and_cmp_changed1(i8 %p, i8 %q) {
35; CHECK-LABEL: @test_shift_and_cmp_changed1(
36; CHECK-NEXT:    [[ANDP:%.*]] = shl i8 %p, 5
37; CHECK-NEXT:    [[SHL:%.*]] = and i8 [[ANDP]], -64
38; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8 [[SHL]], 32
39; CHECK-NEXT:    ret i1 [[CMP]]
40;
41  %andp = and i8 %p, 6
42  %andq = and i8 %q, 8
43  %or = or i8 %andq, %andp
44  %shl = shl i8 %or, 5
45  %ashr = ashr i8 %shl, 5
46  %cmp = icmp slt i8 %ashr, 1
47  ret i1 %cmp
48}
49
50define <2 x i1> @test_shift_and_cmp_changed1_vec(<2 x i8> %p, <2 x i8> %q) {
51; CHECK-LABEL: @test_shift_and_cmp_changed1_vec(
52; CHECK-NEXT:    [[ANDP:%.*]] = shl <2 x i8> [[P:%.*]], <i8 5, i8 5>
53; CHECK-NEXT:    [[SHL:%.*]] = and <2 x i8> [[ANDP]], <i8 -64, i8 -64>
54; CHECK-NEXT:    [[CMP:%.*]] = icmp slt <2 x i8> [[SHL]], <i8 32, i8 32>
55; CHECK-NEXT:    ret <2 x i1> [[CMP]]
56;
57  %andp = and <2 x i8> %p, <i8 6, i8 6>
58  %andq = and <2 x i8> %q, <i8 8, i8 8>
59  %or = or <2 x i8> %andq, %andp
60  %shl = shl <2 x i8> %or, <i8 5, i8 5>
61  %ashr = ashr <2 x i8> %shl, <i8 5, i8 5>
62  %cmp = icmp slt <2 x i8> %ashr, <i8 1, i8 1>
63  ret <2 x i1> %cmp
64}
65
66; Unsigned compare allows a transformation to compare against 0.
67define i1 @test_shift_and_cmp_changed2(i8 %p) {
68; CHECK-LABEL: @test_shift_and_cmp_changed2(
69; CHECK-NEXT:    [[ANDP:%.*]] = and i8 %p, 6
70; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8 [[ANDP]], 0
71; CHECK-NEXT:    ret i1 [[CMP]]
72;
73  %shlp = shl i8 %p, 5
74  %andp = and i8 %shlp, -64
75  %cmp = icmp ult i8 %andp, 32
76  ret i1 %cmp
77}
78
79define <2 x i1> @test_shift_and_cmp_changed2_vec(<2 x i8> %p) {
80; CHECK-LABEL: @test_shift_and_cmp_changed2_vec(
81; CHECK-NEXT:    [[ANDP:%.*]] = and <2 x i8> %p, <i8 6, i8 6>
82; CHECK-NEXT:    [[CMP:%.*]] = icmp eq <2 x i8> [[ANDP]], zeroinitializer
83; CHECK-NEXT:    ret <2 x i1> [[CMP]]
84;
85  %shlp = shl <2 x i8> %p, <i8 5, i8 5>
86  %andp = and <2 x i8> %shlp, <i8 -64, i8 -64>
87  %cmp = icmp ult <2 x i8> %andp, <i8 32, i8 32>
88  ret <2 x i1> %cmp
89}
90
91; nsw on the shift should not affect the comparison.
92define i1 @test_shift_and_cmp_changed3(i8 %p) {
93; CHECK-LABEL: @test_shift_and_cmp_changed3(
94; CHECK-NEXT:    [[SHLP:%.*]] = shl nsw i8 %p, 5
95; CHECK-NEXT:    [[ANDP:%.*]] = and i8 [[SHLP]], -64
96; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8 [[ANDP]], 32
97; CHECK-NEXT:    ret i1 [[CMP]]
98;
99  %shlp = shl nsw i8 %p, 5
100  %andp = and i8 %shlp, -64
101  %cmp = icmp slt i8 %andp, 32
102  ret i1 %cmp
103}
104
105; Logical shift right allows a return true because the 'and' guarantees no bits are set.
106define i1 @test_shift_and_cmp_changed4(i8 %p) {
107; CHECK-LABEL: @test_shift_and_cmp_changed4(
108; CHECK-NEXT:    ret i1 true
109;
110  %shlp = lshr i8 %p, 5
111  %andp = and i8 %shlp, -64
112  %cmp = icmp slt i8 %andp, 32
113  ret i1 %cmp
114}
115
116