1; RUN: llc -march=amdgcn -mcpu=gfx600 -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=GFX600 %s
2; RUN: llc -march=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=GFX700 %s
3; RUN: llc -march=amdgcn -mcpu=gfx801 -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=GFX801 %s
4; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=GFX900 %s
5; RUN: llc -march=amdgcn -mcpu=gfx906 -verify-machineinstrs < %s | FileCheck --check-prefix=GCN-DL --check-prefix=GFX906 %s
6
7; GCN-LABEL: {{^}}scalar_xnor_i32_one_use
8; GCN: s_xnor_b32
9define amdgpu_kernel void @scalar_xnor_i32_one_use(
10    i32 addrspace(1)* %r0, i32 %a, i32 %b) {
11entry:
12  %xor = xor i32 %a, %b
13  %r0.val = xor i32 %xor, -1
14  store i32 %r0.val, i32 addrspace(1)* %r0
15  ret void
16}
17
18; GCN-LABEL: {{^}}scalar_xnor_i32_mul_use
19; GCN-NOT: s_xnor_b32
20; GCN: s_xor_b32
21; GCN: s_not_b32
22; GCN: s_add_i32
23define amdgpu_kernel void @scalar_xnor_i32_mul_use(
24    i32 addrspace(1)* %r0, i32 addrspace(1)* %r1, i32 %a, i32 %b) {
25entry:
26  %xor = xor i32 %a, %b
27  %r0.val = xor i32 %xor, -1
28  %r1.val = add i32 %xor, %a
29  store i32 %r0.val, i32 addrspace(1)* %r0
30  store i32 %r1.val, i32 addrspace(1)* %r1
31  ret void
32}
33
34; GCN-LABEL: {{^}}scalar_xnor_i64_one_use
35; GCN: s_xnor_b64
36define amdgpu_kernel void @scalar_xnor_i64_one_use(
37    i64 addrspace(1)* %r0, i64 %a, i64 %b) {
38entry:
39  %xor = xor i64 %a, %b
40  %r0.val = xor i64 %xor, -1
41  store i64 %r0.val, i64 addrspace(1)* %r0
42  ret void
43}
44
45; GCN-LABEL: {{^}}scalar_xnor_i64_mul_use
46; GCN-NOT: s_xnor_b64
47; GCN: s_xor_b64
48; GCN: s_not_b64
49; GCN: s_add_u32
50; GCN: s_addc_u32
51define amdgpu_kernel void @scalar_xnor_i64_mul_use(
52    i64 addrspace(1)* %r0, i64 addrspace(1)* %r1, i64 %a, i64 %b) {
53entry:
54  %xor = xor i64 %a, %b
55  %r0.val = xor i64 %xor, -1
56  %r1.val = add i64 %xor, %a
57  store i64 %r0.val, i64 addrspace(1)* %r0
58  store i64 %r1.val, i64 addrspace(1)* %r1
59  ret void
60}
61
62; GCN-LABEL: {{^}}vector_xnor_i32_one_use
63; GCN-NOT: s_xnor_b32
64; GCN: v_not_b32
65; GCN: v_xor_b32
66; GCN-DL: v_xnor_b32
67define i32 @vector_xnor_i32_one_use(i32 %a, i32 %b) {
68entry:
69  %xor = xor i32 %a, %b
70  %r = xor i32 %xor, -1
71  ret i32 %r
72}
73
74; GCN-LABEL: {{^}}vector_xnor_i64_one_use
75; GCN-NOT: s_xnor_b64
76; GCN: v_not_b32
77; GCN: v_not_b32
78; GCN: v_xor_b32
79; GCN: v_xor_b32
80; GCN-DL: v_xnor_b32
81; GCN-DL: v_xnor_b32
82define i64 @vector_xnor_i64_one_use(i64 %a, i64 %b) {
83entry:
84  %xor = xor i64 %a, %b
85  %r = xor i64 %xor, -1
86  ret i64 %r
87}
88
89; GCN-LABEL: {{^}}xnor_s_v_i32_one_use
90; GCN-NOT: s_xnor_b32
91; GCN: s_not_b32
92; GCN: v_xor_b32
93define amdgpu_kernel void @xnor_s_v_i32_one_use(i32 addrspace(1)* %out, i32 %s) {
94  %v = call i32 @llvm.amdgcn.workitem.id.x() #1
95  %xor = xor i32 %s, %v
96  %d = xor i32 %xor, -1
97  store i32 %d, i32 addrspace(1)* %out
98  ret void
99}
100
101; GCN-LABEL: {{^}}xnor_v_s_i32_one_use
102; GCN-NOT: s_xnor_b32
103; GCN: s_not_b32
104; GCN: v_xor_b32
105define amdgpu_kernel void @xnor_v_s_i32_one_use(i32 addrspace(1)* %out, i32 %s) {
106  %v = call i32 @llvm.amdgcn.workitem.id.x() #1
107  %xor = xor i32 %v, %s
108  %d = xor i32 %xor, -1
109  store i32 %d, i32 addrspace(1)* %out
110  ret void
111}
112
113; GCN-LABEL: {{^}}xnor_i64_s_v_one_use
114; GCN-NOT: s_xnor_b64
115; GCN: s_not_b64
116; GCN: v_xor_b32
117; GCN: v_xor_b32
118; GCN-DL: v_xnor_b32
119; GCN-DL: v_xnor_b32
120define amdgpu_kernel void @xnor_i64_s_v_one_use(
121  i64 addrspace(1)* %r0, i64 %a) {
122entry:
123  %b32 = call i32 @llvm.amdgcn.workitem.id.x() #1
124  %b64 = zext i32 %b32 to i64
125  %b = shl i64 %b64, 29
126  %xor = xor i64 %a, %b
127  %r0.val = xor i64 %xor, -1
128  store i64 %r0.val, i64 addrspace(1)* %r0
129  ret void
130}
131
132; GCN-LABEL: {{^}}xnor_i64_v_s_one_use
133; GCN-NOT: s_xnor_b64
134; GCN: s_not_b64
135; GCN: v_xor_b32
136; GCN: v_xor_b32
137; GCN-DL: v_xnor_b32
138; GCN-DL: v_xnor_b32
139define amdgpu_kernel void @xnor_i64_v_s_one_use(
140  i64 addrspace(1)* %r0, i64 %a) {
141entry:
142  %b32 = call i32 @llvm.amdgcn.workitem.id.x() #1
143  %b64 = zext i32 %b32 to i64
144  %b = shl i64 %b64, 29
145  %xor = xor i64 %b, %a
146  %r0.val = xor i64 %xor, -1
147  store i64 %r0.val, i64 addrspace(1)* %r0
148  ret void
149}
150
151; GCN-LABEL: {{^}}vector_xor_na_b_i32_one_use
152; GCN-NOT: s_xnor_b32
153; GCN: v_not_b32
154; GCN: v_xor_b32
155; GCN-DL: v_xnor_b32
156define i32 @vector_xor_na_b_i32_one_use(i32 %a, i32 %b) {
157entry:
158  %na = xor i32 %a, -1
159  %r = xor i32 %na, %b
160  ret i32 %r
161}
162
163; GCN-LABEL: {{^}}vector_xor_a_nb_i32_one_use
164; GCN-NOT: s_xnor_b32
165; GCN: v_not_b32
166; GCN: v_xor_b32
167; GCN-DL: v_xnor_b32
168define i32 @vector_xor_a_nb_i32_one_use(i32 %a, i32 %b) {
169entry:
170  %nb = xor i32 %b, -1
171  %r = xor i32 %a, %nb
172  ret i32 %r
173}
174
175; GCN-LABEL: {{^}}scalar_xor_a_nb_i64_one_use
176; GCN: s_xnor_b64
177define amdgpu_kernel void @scalar_xor_a_nb_i64_one_use(
178    i64 addrspace(1)* %r0, i64 %a, i64 %b) {
179entry:
180  %nb = xor i64 %b, -1
181  %r0.val = xor i64 %a, %nb
182  store i64 %r0.val, i64 addrspace(1)* %r0
183  ret void
184}
185
186; GCN-LABEL: {{^}}scalar_xor_na_b_i64_one_use
187; GCN: s_xnor_b64
188define amdgpu_kernel void @scalar_xor_na_b_i64_one_use(
189    i64 addrspace(1)* %r0, i64 %a, i64 %b) {
190entry:
191  %na = xor i64 %a, -1
192  %r0.val = xor i64 %na, %b
193  store i64 %r0.val, i64 addrspace(1)* %r0
194  ret void
195}
196
197; Function Attrs: nounwind readnone
198declare i32 @llvm.amdgcn.workitem.id.x() #0
199