1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -instcombine -S | FileCheck %s
3
4; (A&B)^(A&C) -> A&(B^C) etc
5
6define <4 x i32> @test_v4i32_xor_repeated_and_0(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
7; CHECK-LABEL: @test_v4i32_xor_repeated_and_0(
8; CHECK-NEXT:    [[TMP1:%.*]] = xor <4 x i32> [[B:%.*]], [[C:%.*]]
9; CHECK-NEXT:    [[TMP2:%.*]] = and <4 x i32> [[TMP1]], [[A:%.*]]
10; CHECK-NEXT:    ret <4 x i32> [[TMP2]]
11;
12  %1 = and <4 x i32> %a, %b
13  %2 = and <4 x i32> %a, %c
14  %3 = xor <4 x i32> %1, %2
15  ret <4 x i32> %3
16}
17
18define <4 x i32> @test_v4i32_xor_repeated_and_1(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
19; CHECK-LABEL: @test_v4i32_xor_repeated_and_1(
20; CHECK-NEXT:    [[TMP1:%.*]] = xor <4 x i32> [[B:%.*]], [[C:%.*]]
21; CHECK-NEXT:    [[TMP2:%.*]] = and <4 x i32> [[TMP1]], [[A:%.*]]
22; CHECK-NEXT:    ret <4 x i32> [[TMP2]]
23;
24  %1 = and <4 x i32> %a, %b
25  %2 = and <4 x i32> %c, %a
26  %3 = xor <4 x i32> %1, %2
27  ret <4 x i32> %3
28}
29
30; xor(bswap(a), c) to bswap(xor(a, bswap(c)))
31
32declare <4 x i32> @llvm.bswap.v4i32(<4 x i32>)
33
34define <4 x i32> @test_v4i32_xor_bswap_splatconst(<4 x i32> %a0) {
35; CHECK-LABEL: @test_v4i32_xor_bswap_splatconst(
36; CHECK-NEXT:    [[TMP1:%.*]] = xor <4 x i32> [[A0:%.*]], <i32 -16777216, i32 -16777216, i32 -16777216, i32 -16777216>
37; CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> [[TMP1]])
38; CHECK-NEXT:    ret <4 x i32> [[TMP2]]
39;
40  %1 = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> %a0)
41  %2 = xor  <4 x i32> %1, <i32 255, i32 255, i32 255, i32 255>
42  ret <4 x i32> %2
43}
44
45define <4 x i32> @test_v4i32_xor_bswap_const(<4 x i32> %a0) {
46; CHECK-LABEL: @test_v4i32_xor_bswap_const(
47; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> [[A0:%.*]])
48; CHECK-NEXT:    [[TMP2:%.*]] = xor <4 x i32> [[TMP1]], <i32 0, i32 -16777216, i32 2, i32 3>
49; CHECK-NEXT:    ret <4 x i32> [[TMP2]]
50;
51  %1 = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> %a0)
52  %2 = xor  <4 x i32> %1, <i32 0, i32 -16777216, i32 2, i32 3>
53  ret <4 x i32> %2
54}
55
56define <4 x i32> @test_v4i32_xor_bswap_const_undef(<4 x i32> %a0) {
57; CHECK-LABEL: @test_v4i32_xor_bswap_const_undef(
58; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> [[A0:%.*]])
59; CHECK-NEXT:    [[TMP2:%.*]] = xor <4 x i32> [[TMP1]], <i32 undef, i32 0, i32 2, i32 3>
60; CHECK-NEXT:    ret <4 x i32> [[TMP2]]
61;
62  %1 = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> %a0)
63  %2 = xor  <4 x i32> %1, <i32 undef, i32 0, i32 2, i32 3>
64  ret <4 x i32> %2
65}
66
67; DeMorgan's Law: ~(~X & Y) --> (X | ~Y)
68
69define <4 x i32> @test_v4i32_demorgan_and(<4 x i32> %x, <4 x i32> %y) {
70; CHECK-LABEL: @test_v4i32_demorgan_and(
71; CHECK-NEXT:    [[Y_NOT:%.*]] = xor <4 x i32> [[Y:%.*]], <i32 -1, i32 -1, i32 -1, i32 -1>
72; CHECK-NEXT:    [[TMP1:%.*]] = or <4 x i32> [[Y_NOT]], [[X:%.*]]
73; CHECK-NEXT:    ret <4 x i32> [[TMP1]]
74;
75  %1 = xor <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %x
76  %2 = and <4 x i32> %1, %y
77  %3 = xor <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %2
78  ret <4 x i32> %3
79}
80
81; DeMorgan's Law: ~(~X | Y) --> (X & ~Y)
82
83define <4 x i32> @test_v4i32_demorgan_or(<4 x i32> %x, <4 x i32> %y) {
84; CHECK-LABEL: @test_v4i32_demorgan_or(
85; CHECK-NEXT:    [[Y_NOT:%.*]] = xor <4 x i32> [[Y:%.*]], <i32 -1, i32 -1, i32 -1, i32 -1>
86; CHECK-NEXT:    [[TMP1:%.*]] = and <4 x i32> [[Y_NOT]], [[X:%.*]]
87; CHECK-NEXT:    ret <4 x i32> [[TMP1]]
88;
89  %1 = xor <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %x
90  %2 = or  <4 x i32> %1, %y
91  %3 = xor <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %2
92  ret <4 x i32> %3
93}
94
95; ~(~X >>s Y) --> (X >>s Y)
96
97define <4 x i32> @test_v4i32_not_ashr_not(<4 x i32> %x, <4 x i32> %y) {
98; CHECK-LABEL: @test_v4i32_not_ashr_not(
99; CHECK-NEXT:    [[TMP1:%.*]] = ashr <4 x i32> [[X:%.*]], [[Y:%.*]]
100; CHECK-NEXT:    ret <4 x i32> [[TMP1]]
101;
102  %1 = xor  <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %x
103  %2 = ashr <4 x i32> %1, %y
104  %3 = xor  <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %2
105  ret <4 x i32> %3
106}
107
108define <4 x i32> @test_v4i32_not_ashr_not_undef(<4 x i32> %x, <4 x i32> %y) {
109; CHECK-LABEL: @test_v4i32_not_ashr_not_undef(
110; CHECK-NEXT:    [[TMP1:%.*]] = ashr <4 x i32> [[X:%.*]], [[Y:%.*]]
111; CHECK-NEXT:    ret <4 x i32> [[TMP1]]
112;
113  %1 = xor  <4 x i32> <i32 -1, i32 -1, i32 -1, i32 undef>, %x
114  %2 = ashr <4 x i32> %1, %y
115  %3 = xor  <4 x i32> <i32 -1, i32 -1, i32 undef, i32 -1>, %2
116  ret <4 x i32> %3
117}
118
119; ~(C >>s Y) --> ~C >>u Y (when inverting the replicated sign bits)
120
121define <4 x i32> @test_v4i32_not_ashr_negative_splatconst(<4 x i32> %a0) {
122; CHECK-LABEL: @test_v4i32_not_ashr_negative_splatconst(
123; CHECK-NEXT:    [[TMP1:%.*]] = lshr <4 x i32> <i32 2, i32 2, i32 2, i32 2>, [[A0:%.*]]
124; CHECK-NEXT:    ret <4 x i32> [[TMP1]]
125;
126  %1 = ashr <4 x i32> <i32 -3, i32 -3, i32 -3, i32 -3>, %a0
127  %2 = xor  <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %1
128  ret <4 x i32> %2
129}
130
131define <4 x i32> @test_v4i32_not_ashr_negative_const(<4 x i32> %a0) {
132; CHECK-LABEL: @test_v4i32_not_ashr_negative_const(
133; CHECK-NEXT:    [[TMP1:%.*]] = lshr <4 x i32> <i32 2, i32 4, i32 6, i32 8>, [[A0:%.*]]
134; CHECK-NEXT:    ret <4 x i32> [[TMP1]]
135;
136  %1 = ashr <4 x i32> <i32 -3, i32 -5, i32 -7, i32 -9>, %a0
137  %2 = xor  <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %1
138  ret <4 x i32> %2
139}
140
141define <4 x i32> @test_v4i32_not_ashr_negative_const_undef(<4 x i32> %a0) {
142; CHECK-LABEL: @test_v4i32_not_ashr_negative_const_undef(
143; CHECK-NEXT:    [[TMP1:%.*]] = lshr <4 x i32> <i32 2, i32 4, i32 0, i32 8>, [[A0:%.*]]
144; CHECK-NEXT:    ret <4 x i32> [[TMP1]]
145;
146  %1 = ashr <4 x i32> <i32 -3, i32 -5, i32 undef, i32 -9>, %a0
147  %2 = xor  <4 x i32> <i32 -1, i32 -1, i32 -1, i32 undef>, %1
148  ret <4 x i32> %2
149}
150
151; ~(C >>u Y) --> ~C >>s Y (when inverting the replicated sign bits)
152
153define <4 x i32> @test_v4i32_not_lshr_nonnegative_splatconst(<4 x i32> %a0) {
154; CHECK-LABEL: @test_v4i32_not_lshr_nonnegative_splatconst(
155; CHECK-NEXT:    [[TMP1:%.*]] = ashr <4 x i32> <i32 -4, i32 -4, i32 -4, i32 -4>, [[A0:%.*]]
156; CHECK-NEXT:    ret <4 x i32> [[TMP1]]
157;
158  %1 = lshr <4 x i32> <i32  3, i32  3, i32  3, i32  3>, %a0
159  %2 = xor  <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %1
160  ret <4 x i32> %2
161}
162
163define <4 x i32> @test_v4i32_not_lshr_nonnegative_const(<4 x i32> %a0) {
164; CHECK-LABEL: @test_v4i32_not_lshr_nonnegative_const(
165; CHECK-NEXT:    [[TMP1:%.*]] = ashr <4 x i32> <i32 -4, i32 -6, i32 -8, i32 -10>, [[A0:%.*]]
166; CHECK-NEXT:    ret <4 x i32> [[TMP1]]
167;
168  %1 = lshr <4 x i32> <i32  3, i32  5, i32  7, i32  9>, %a0
169  %2 = xor  <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %1
170  ret <4 x i32> %2
171}
172
173define <4 x i32> @test_v4i32_not_lshr_nonnegative_const_undef(<4 x i32> %a0) {
174; CHECK-LABEL: @test_v4i32_not_lshr_nonnegative_const_undef(
175; CHECK-NEXT:    [[TMP1:%.*]] = ashr <4 x i32> <i32 -4, i32 -6, i32 -1, i32 -10>, [[A0:%.*]]
176; CHECK-NEXT:    ret <4 x i32> [[TMP1]]
177;
178  %1 = lshr <4 x i32> <i32  3, i32  5, i32 undef, i32  9>, %a0
179  %2 = xor  <4 x i32> <i32 -1, i32 -1, i32 -1, i32 undef>, %1
180  ret <4 x i32> %2
181}
182
183; ~(C-X) == X-C-1 == X+(-C-1)
184
185define <4 x i32> @test_v4i32_not_sub_splatconst(<4 x i32> %a0) {
186; CHECK-LABEL: @test_v4i32_not_sub_splatconst(
187; CHECK-NEXT:    [[TMP1:%.*]] = add <4 x i32> [[A0:%.*]], <i32 -4, i32 -4, i32 -4, i32 -4>
188; CHECK-NEXT:    ret <4 x i32> [[TMP1]]
189;
190  %1 = sub <4 x i32> <i32  3, i32  3, i32  3, i32  3>, %a0
191  %2 = xor <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %1
192  ret <4 x i32> %2
193}
194
195define <4 x i32> @test_v4i32_not_sub_const(<4 x i32> %a0) {
196; CHECK-LABEL: @test_v4i32_not_sub_const(
197; CHECK-NEXT:    [[TMP1:%.*]] = add <4 x i32> [[A0:%.*]], <i32 -4, i32 -6, i32 0, i32 -16>
198; CHECK-NEXT:    ret <4 x i32> [[TMP1]]
199;
200  %1 = sub <4 x i32> <i32  3, i32  5, i32 -1, i32 15>, %a0
201  %2 = xor <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %1
202  ret <4 x i32> %2
203}
204
205define <4 x i32> @test_v4i32_not_sub_const_undef(<4 x i32> %a0) {
206; CHECK-LABEL: @test_v4i32_not_sub_const_undef(
207; CHECK-NEXT:    [[TMP1:%.*]] = add <4 x i32> [[A0:%.*]], <i32 -4, i32 undef, i32 0, i32 -16>
208; CHECK-NEXT:    ret <4 x i32> [[TMP1]]
209;
210  %1 = sub <4 x i32> <i32  3, i32 undef, i32 -1, i32 15>, %a0
211  %2 = xor <4 x i32> <i32 -1, i32 -1, i32 -1, i32 undef>, %1
212  ret <4 x i32> %2
213}
214
215; (C - X) ^ signmask -> (C + signmask - X)
216
217define <4 x i32> @test_v4i32_xor_signmask_sub_splatconst(<4 x i32> %a0) {
218; CHECK-LABEL: @test_v4i32_xor_signmask_sub_splatconst(
219; CHECK-NEXT:    [[TMP1:%.*]] = sub <4 x i32> <i32 -2147483645, i32 -2147483645, i32 -2147483645, i32 -2147483645>, [[A0:%.*]]
220; CHECK-NEXT:    ret <4 x i32> [[TMP1]]
221;
222  %1 = sub <4 x i32> <i32  3, i32  3, i32  3, i32  3>, %a0
223  %2 = xor <4 x i32> <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>, %1
224  ret <4 x i32> %2
225}
226
227define <4 x i32> @test_v4i32_xor_signmask_sub_const(<4 x i32> %a0) {
228; CHECK-LABEL: @test_v4i32_xor_signmask_sub_const(
229; CHECK-NEXT:    [[TMP1:%.*]] = sub <4 x i32> <i32 3, i32 5, i32 -1, i32 15>, [[A0:%.*]]
230; CHECK-NEXT:    [[TMP2:%.*]] = xor <4 x i32> [[TMP1]], <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>
231; CHECK-NEXT:    ret <4 x i32> [[TMP2]]
232;
233  %1 = sub <4 x i32> <i32  3, i32 5, i32 -1, i32 15>, %a0
234  %2 = xor <4 x i32> <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>, %1
235  ret <4 x i32> %2
236}
237
238define <4 x i32> @test_v4i32_xor_signmask_sub_const_undef(<4 x i32> %a0) {
239; CHECK-LABEL: @test_v4i32_xor_signmask_sub_const_undef(
240; CHECK-NEXT:    [[TMP1:%.*]] = sub <4 x i32> <i32 3, i32 undef, i32 -1, i32 15>, [[A0:%.*]]
241; CHECK-NEXT:    [[TMP2:%.*]] = xor <4 x i32> [[TMP1]], <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 undef>
242; CHECK-NEXT:    ret <4 x i32> [[TMP2]]
243;
244  %1 = sub <4 x i32> <i32  3, i32 undef, i32 -1, i32 15>, %a0
245  %2 = xor <4 x i32> <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 undef>, %1
246  ret <4 x i32> %2
247}
248
249; (X + C) ^ signmask -> (X + C + signmask)
250
251define <4 x i32> @test_v4i32_xor_signmask_add_splatconst(<4 x i32> %a0) {
252; CHECK-LABEL: @test_v4i32_xor_signmask_add_splatconst(
253; CHECK-NEXT:    [[TMP1:%.*]] = add <4 x i32> [[A0:%.*]], <i32 -2147483645, i32 -2147483645, i32 -2147483645, i32 -2147483645>
254; CHECK-NEXT:    ret <4 x i32> [[TMP1]]
255;
256  %1 = add <4 x i32> <i32  3, i32  3, i32  3, i32  3>, %a0
257  %2 = xor <4 x i32> <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>, %1
258  ret <4 x i32> %2
259}
260
261define <4 x i32> @test_v4i32_xor_signmask_add_const(<4 x i32> %a0) {
262; CHECK-LABEL: @test_v4i32_xor_signmask_add_const(
263; CHECK-NEXT:    [[TMP1:%.*]] = add <4 x i32> [[A0:%.*]], <i32 3, i32 5, i32 -1, i32 15>
264; CHECK-NEXT:    [[TMP2:%.*]] = xor <4 x i32> [[TMP1]], <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>
265; CHECK-NEXT:    ret <4 x i32> [[TMP2]]
266;
267  %1 = add <4 x i32> <i32  3, i32 5, i32 -1, i32 15>, %a0
268  %2 = xor <4 x i32> <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>, %1
269  ret <4 x i32> %2
270}
271
272define <4 x i32> @test_v4i32_xor_signmask_add_const_undef(<4 x i32> %a0) {
273; CHECK-LABEL: @test_v4i32_xor_signmask_add_const_undef(
274; CHECK-NEXT:    [[TMP1:%.*]] = add <4 x i32> [[A0:%.*]], <i32 3, i32 undef, i32 -1, i32 15>
275; CHECK-NEXT:    [[TMP2:%.*]] = xor <4 x i32> [[TMP1]], <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 undef>
276; CHECK-NEXT:    ret <4 x i32> [[TMP2]]
277;
278  %1 = add <4 x i32> <i32  3, i32 undef, i32 -1, i32 15>, %a0
279  %2 = xor <4 x i32> <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 undef>, %1
280  ret <4 x i32> %2
281}
282