1; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN %s
2; RUN: llc -march=amdgcn -mcpu=gfx902  -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GFX9 %s
3
4; GCN-LABEL: {{^}}add1:
5; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
6; GCN: v_addc_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, 0, v{{[0-9]+}}, [[CC]]
7; GCN-NOT: v_cndmask
8
9; GFX9-LABEL: {{^}}add1:
10; GFX9: v_addc_co_u32_e{{32|64}} v{{[0-9]+}}, vcc
11define amdgpu_kernel void @add1(i32 addrspace(1)* nocapture %arg) {
12bb:
13  %x = tail call i32 @llvm.amdgcn.workitem.id.x()
14  %y = tail call i32 @llvm.amdgcn.workitem.id.y()
15  %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
16  %v = load i32, i32 addrspace(1)* %gep, align 4
17  %cmp = icmp ugt i32 %x, %y
18  %ext = zext i1 %cmp to i32
19  %add = add i32 %v, %ext
20  store i32 %add, i32 addrspace(1)* %gep, align 4
21  ret void
22}
23
24; GCN-LABEL: {{^}}add1_i16:
25; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
26; GCN: v_addc_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, 0, v{{[0-9]+}}, [[CC]]
27; GCN-NOT: v_cndmask
28
29; GFX9-LABEL: {{^}}add1_i16:
30; GFX9: v_addc_co_u32_e{{32|64}} v{{[0-9]+}}, vcc
31define i16 @add1_i16(i32 addrspace(1)* nocapture %arg, i16 addrspace(1)* nocapture %dst) {
32bb:
33  %x = tail call i32 @llvm.amdgcn.workitem.id.x()
34  %y = tail call i32 @llvm.amdgcn.workitem.id.y()
35  %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
36  %v = load i32, i32 addrspace(1)* %gep, align 4
37  %cmp = icmp ugt i32 %x, %y
38  %ext = zext i1 %cmp to i32
39  %add = add i32 %v, %ext
40  %trunc = trunc i32 %add to i16
41  ret i16 %trunc
42}
43
44; GCN-LABEL: {{^}}sub1:
45; GCN: v_cmp_gt_u32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
46; GCN: v_subbrev_u32_e32 v{{[0-9]+}}, vcc, 0, v{{[0-9]+}}, vcc
47; GCN-NOT: v_cndmask
48
49; GFX9-LABEL: {{^}}sub1:
50; GFX9: v_subbrev_co_u32_e{{32|64}} v{{[0-9]+}}, vcc
51define amdgpu_kernel void @sub1(i32 addrspace(1)* nocapture %arg) {
52bb:
53  %x = tail call i32 @llvm.amdgcn.workitem.id.x()
54  %y = tail call i32 @llvm.amdgcn.workitem.id.y()
55  %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
56  %v = load i32, i32 addrspace(1)* %gep, align 4
57  %cmp = icmp ugt i32 %x, %y
58  %ext = sext i1 %cmp to i32
59  %add = add i32 %v, %ext
60  store i32 %add, i32 addrspace(1)* %gep, align 4
61  ret void
62}
63
64; GCN-LABEL: {{^}}add_adde:
65; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
66; GCN: v_addc_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CC]]
67; GCN-NOT: v_cndmask
68; GCN-NOT: v_add
69
70; GFX9-LABEL: {{^}}add_adde:
71; GFX9: v_addc_co_u32_e{{32|64}} v{{[0-9]+}}, vcc
72define amdgpu_kernel void @add_adde(i32 addrspace(1)* nocapture %arg, i32 %a) {
73bb:
74  %x = tail call i32 @llvm.amdgcn.workitem.id.x()
75  %y = tail call i32 @llvm.amdgcn.workitem.id.y()
76  %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
77  %v = load i32, i32 addrspace(1)* %gep, align 4
78  %cmp = icmp ugt i32 %x, %y
79  %ext = zext i1 %cmp to i32
80  %adde = add i32 %v, %ext
81  %add2 = add i32 %adde, %a
82  store i32 %add2, i32 addrspace(1)* %gep, align 4
83  ret void
84}
85
86; GCN-LABEL: {{^}}adde_add:
87; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
88; GCN: v_addc_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CC]]
89; GCN-NOT: v_cndmask
90; GCN-NOT: v_add
91
92; GFX9-LABEL: {{^}}adde_add:
93; GFX9: v_addc_co_u32_e{{32|64}} v{{[0-9]+}}, vcc
94define amdgpu_kernel void @adde_add(i32 addrspace(1)* nocapture %arg, i32 %a) {
95bb:
96  %x = tail call i32 @llvm.amdgcn.workitem.id.x()
97  %y = tail call i32 @llvm.amdgcn.workitem.id.y()
98  %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
99  %v = load i32, i32 addrspace(1)* %gep, align 4
100  %cmp = icmp ugt i32 %x, %y
101  %ext = zext i1 %cmp to i32
102  %add = add i32 %v, %a
103  %adde = add i32 %add, %ext
104  store i32 %adde, i32 addrspace(1)* %gep, align 4
105  ret void
106}
107
108; GCN-LABEL: {{^}}sub_sube:
109; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
110; GCN: v_subb_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CC]]
111; GCN-NOT: v_cndmask
112; GCN-NOT: v_sub
113
114; GFX9-LABEL: {{^}}sub_sube:
115; GFX9: v_subb_co_u32_e{{32|64}} v{{[0-9]+}}, vcc
116define amdgpu_kernel void @sub_sube(i32 addrspace(1)* nocapture %arg, i32 %a) {
117bb:
118  %x = tail call i32 @llvm.amdgcn.workitem.id.x()
119  %y = tail call i32 @llvm.amdgcn.workitem.id.y()
120  %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
121  %v = load i32, i32 addrspace(1)* %gep, align 4
122  %cmp = icmp ugt i32 %x, %y
123  %ext = sext i1 %cmp to i32
124  %adde = add i32 %v, %ext
125  %sub = sub i32 %adde, %a
126  store i32 %sub, i32 addrspace(1)* %gep, align 4
127  ret void
128}
129
130; GCN-LABEL: {{^}}sub_sube_commuted:
131; GCN-DAG: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
132; GCN-DAG: buffer_load_dword [[V:v[0-9]+]],
133; GCN: v_cndmask_b32_e64 [[CCZEXT:v[0-9]+]], 0, 1, [[CC]]
134; GCN: v_sub_i32_e32 [[SUB:v[0-9]+]], vcc, [[CCZEXT]], v4
135; GCN: v_add_i32_e32 [[ADD:v[0-9]+]], vcc, {{.*}}, [[SUB]]
136; GCN: v_add_i32_e32 {{.*}}, 0x64, [[ADD]]
137
138; GFX9-LABEL: {{^}}sub_sube_commuted:
139; GFX9-DAG: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
140; GFX9-DAG: global_load_dword [[V:v[0-9]+]],
141; GFX9-DAG: v_cndmask_b32_e64 [[CCZEXT:v[0-9]+]], 0, 1, [[CC]]
142; GFX9: v_sub_u32_e32 {{.*}}, [[CCZEXT]]
143; GFX9: v_add_u32_e32
144; GFX9: v_add_u32_e32 {{.*}}, 0x64,
145define amdgpu_kernel void @sub_sube_commuted(i32 addrspace(1)* nocapture %arg, i32 %a) {
146bb:
147  %x = tail call i32 @llvm.amdgcn.workitem.id.x()
148  %y = tail call i32 @llvm.amdgcn.workitem.id.y()
149  %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
150  %v = load i32, i32 addrspace(1)* %gep, align 4
151  %cmp = icmp ugt i32 %x, %y
152  %ext = sext i1 %cmp to i32
153  %adde = add i32 %v, %ext
154  %sub = sub i32 %adde, %a
155  %sub2 = sub i32 100, %sub
156  store i32 %sub2, i32 addrspace(1)* %gep, align 4
157  ret void
158}
159
160; GCN-LABEL: {{^}}sube_sub:
161; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
162; GCN: v_subb_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CC]]
163; GCN-NOT: v_cndmask
164; GCN-NOT: v_sub
165
166; GFX9-LABEL: {{^}}sube_sub:
167; GFX9: v_subb_co_u32_e{{32|64}} v{{[0-9]+}}, vcc
168define amdgpu_kernel void @sube_sub(i32 addrspace(1)* nocapture %arg, i32 %a) {
169bb:
170  %x = tail call i32 @llvm.amdgcn.workitem.id.x()
171  %y = tail call i32 @llvm.amdgcn.workitem.id.y()
172  %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
173  %v = load i32, i32 addrspace(1)* %gep, align 4
174  %cmp = icmp ugt i32 %x, %y
175  %ext = sext i1 %cmp to i32
176  %sub = sub i32 %v, %a
177  %adde = add i32 %sub, %ext
178  store i32 %adde, i32 addrspace(1)* %gep, align 4
179  ret void
180}
181
182; GCN-LABEL: {{^}}zext_flclass:
183; GCN: v_cmp_class_f32_e{{32|64}} [[CC:[^,]+]],
184; GCN: v_addc_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, 0, v{{[0-9]+}}, [[CC]]
185; GCN-NOT: v_cndmask
186
187; GFX9-LABEL: {{^}}zext_flclass:
188; GFX9: v_addc_co_u32_e{{32|64}} v{{[0-9]+}}, vcc
189define amdgpu_kernel void @zext_flclass(i32 addrspace(1)* nocapture %arg, float %x) {
190bb:
191  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
192  %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %id
193  %v = load i32, i32 addrspace(1)* %gep, align 4
194  %cmp = tail call zeroext i1 @llvm.amdgcn.class.f32(float %x, i32 608)
195  %ext = zext i1 %cmp to i32
196  %add = add i32 %v, %ext
197  store i32 %add, i32 addrspace(1)* %gep, align 4
198  ret void
199}
200
201; GCN-LABEL: {{^}}sext_flclass:
202; GCN: v_cmp_class_f32_e32 vcc,
203; GCN: v_subbrev_u32_e32 v{{[0-9]+}}, vcc, 0, v{{[0-9]+}}, vcc
204; GCN-NOT: v_cndmask
205
206; GFX9-LABEL: {{^}}sext_flclass:
207; GFX9: v_subbrev_co_u32_e32 v{{[0-9]+}}, vcc
208define amdgpu_kernel void @sext_flclass(i32 addrspace(1)* nocapture %arg, float %x) {
209bb:
210  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
211  %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %id
212  %v = load i32, i32 addrspace(1)* %gep, align 4
213  %cmp = tail call zeroext i1 @llvm.amdgcn.class.f32(float %x, i32 608)
214  %ext = sext i1 %cmp to i32
215  %add = add i32 %v, %ext
216  store i32 %add, i32 addrspace(1)* %gep, align 4
217  ret void
218}
219
220; GCN-LABEL: {{^}}add_and:
221; GCN: s_and_b64 [[CC:[^,]+]],
222; GCN: v_addc_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, 0, v{{[0-9]+}}, [[CC]]
223; GCN-NOT: v_cndmask
224
225; GFX9-LABEL: {{^}}add_and:
226; GFX9: v_addc_co_u32_e{{32|64}} v{{[0-9]+}}, vcc
227define amdgpu_kernel void @add_and(i32 addrspace(1)* nocapture %arg) {
228bb:
229  %x = tail call i32 @llvm.amdgcn.workitem.id.x()
230  %y = tail call i32 @llvm.amdgcn.workitem.id.y()
231  %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
232  %v = load i32, i32 addrspace(1)* %gep, align 4
233  %cmp1 = icmp ugt i32 %x, %y
234  %cmp2 = icmp ugt i32 %x, 1
235  %cmp = and i1 %cmp1, %cmp2
236  %ext = zext i1 %cmp to i32
237  %add = add i32 %v, %ext
238  store i32 %add, i32 addrspace(1)* %gep, align 4
239  ret void
240}
241
242; sub x, sext (setcc) => addcarry x, 0, setcc
243; GCN-LABEL: {{^}}cmp_sub_sext:
244; GCN: v_cmp_gt_u32_e32 vcc, v
245; GCN-NOT: vcc
246; GCN: v_addc_u32_e32 [[RESULT:v[0-9]+]], vcc, 0, v{{[0-9]+}}, vcc
247define amdgpu_kernel void @cmp_sub_sext(i32 addrspace(1)* nocapture %arg) {
248bb:
249  %x = tail call i32 @llvm.amdgcn.workitem.id.x()
250  %y = tail call i32 @llvm.amdgcn.workitem.id.y()
251  %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
252  %v = load i32, i32 addrspace(1)* %gep, align 4
253  %cmp = icmp ugt i32 %x, %y
254  %ext = sext i1 %cmp to i32
255  %add = sub i32 %v, %ext
256  store i32 %add, i32 addrspace(1)* %gep, align 4
257  ret void
258}
259
260; sub x, zext (setcc) => subcarry x, 0, setcc
261; GCN-LABEL: {{^}}cmp_sub_zext:
262; GCN: v_cmp_gt_u32_e32 vcc, v
263; GCN-NOT: vcc
264; GCN: v_subbrev_u32_e32 [[RESULT:v[0-9]+]], vcc, 0, v{{[0-9]+}}, vcc
265define amdgpu_kernel void @cmp_sub_zext(i32 addrspace(1)* nocapture %arg) {
266bb:
267  %x = tail call i32 @llvm.amdgcn.workitem.id.x()
268  %y = tail call i32 @llvm.amdgcn.workitem.id.y()
269  %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
270  %v = load i32, i32 addrspace(1)* %gep, align 4
271  %cmp = icmp ugt i32 %x, %y
272  %ext = zext i1 %cmp to i32
273  %add = sub i32 %v, %ext
274  store i32 %add, i32 addrspace(1)* %gep, align 4
275  ret void
276}
277
278; GCN-LABEL: {{^}}sub_addcarry:
279; GCN: v_cmp_gt_u32_e32 vcc, v
280; GCN-NOT: vcc
281; GCN: v_addc_u32_e32 [[ADDC:v[0-9]+]], vcc, 0, v{{[0-9]+}}, vcc
282; GCN-NOT: vcc
283; GCN: v_subrev_i32_e32 [[RESULT:v[0-9]+]], vcc,
284define amdgpu_kernel void @sub_addcarry(i32 addrspace(1)* nocapture %arg, i32 %a) {
285bb:
286  %x = tail call i32 @llvm.amdgcn.workitem.id.x()
287  %y = tail call i32 @llvm.amdgcn.workitem.id.y()
288  %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
289  %v = load i32, i32 addrspace(1)* %gep, align 4
290  %cmp = icmp ugt i32 %x, %y
291  %ext = zext i1 %cmp to i32
292  %adde = add i32 %v, %ext
293  %add2 = sub i32 %adde, %a
294  store i32 %add2, i32 addrspace(1)* %gep, align 4
295  ret void
296}
297
298; GCN-LABEL: {{^}}sub_subcarry:
299; GCN: v_cmp_gt_u32_e32 vcc, v
300; GCN-NOT: vcc
301; GCN: v_subb_u32_e32 [[RESULT:v[0-9]+]], vcc, v{{[0-9]+}}, v{{[0-9]+}}, vcc
302define amdgpu_kernel void @sub_subcarry(i32 addrspace(1)* nocapture %arg, i32 %a) {
303bb:
304  %x = tail call i32 @llvm.amdgcn.workitem.id.x()
305  %y = tail call i32 @llvm.amdgcn.workitem.id.y()
306  %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
307  %v = load i32, i32 addrspace(1)* %gep, align 4
308  %cmp = icmp ugt i32 %x, %y
309  %ext = zext i1 %cmp to i32
310  %adde = sub i32 %v, %ext
311  %add2 = sub i32 %adde, %a
312  store i32 %add2, i32 addrspace(1)* %gep, align 4
313  ret void
314}
315
316; Check case where sub is commuted with zext
317; GCN-LABEL: {{^}}sub_zext_setcc_commute:
318; GCN: v_cmp_gt_u32_e32 vcc, v
319; GCN: v_cndmask
320; GCN: v_sub_i32_e32
321; GCN: v_add_i32_e32 [[ADD:v[0-9]+]], vcc,
322; GCN: v_subrev_i32_e32 [[RESULT:v[0-9]+]], vcc, s{{[0-9]+}}, [[ADD]]
323define amdgpu_kernel void @sub_zext_setcc_commute(i32 addrspace(1)* nocapture %arg, i32 %a, i32%b) {
324bb:
325  %x = tail call i32 @llvm.amdgcn.workitem.id.x()
326  %y = tail call i32 @llvm.amdgcn.workitem.id.y()
327  %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
328  %v = load i32, i32 addrspace(1)* %gep, align 4
329  %cmp = icmp ugt i32 %x, %y
330  %ext = zext i1 %cmp to i32
331  %adde = sub i32 %v, %ext
332  %sub = sub i32 %a, %adde
333  %sub2 = sub i32 %sub, %b
334  store i32 %sub2, i32 addrspace(1)* %gep, align 4
335  ret void
336}
337
338; Check case where sub is commuted with sext
339; GCN-LABEL: {{^}}sub_sext_setcc_commute:
340; GCN: v_cmp_gt_u32_e32 vcc, v
341; GCN: v_cndmask
342; GCN: v_sub_i32_e32
343; GCN: v_add_i32_e32 [[ADD:v[0-9]+]], vcc,
344; GCN: v_subrev_i32_e32 [[RESULT:v[0-9]+]], vcc, s{{[0-9]+}}, [[ADD]]
345define amdgpu_kernel void @sub_sext_setcc_commute(i32 addrspace(1)* nocapture %arg, i32 %a, i32%b) {
346bb:
347  %x = tail call i32 @llvm.amdgcn.workitem.id.x()
348  %y = tail call i32 @llvm.amdgcn.workitem.id.y()
349  %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
350  %v = load i32, i32 addrspace(1)* %gep, align 4
351  %cmp = icmp ugt i32 %x, %y
352  %ext = sext i1 %cmp to i32
353  %adde = sub i32 %v, %ext
354  %sub = sub i32 %a, %adde
355  %sub2 = sub i32 %sub, %b
356  store i32 %sub2, i32 addrspace(1)* %gep, align 4
357  ret void
358}
359
360declare i1 @llvm.amdgcn.class.f32(float, i32) #0
361
362declare i32 @llvm.amdgcn.workitem.id.x() #0
363
364declare i32 @llvm.amdgcn.workitem.id.y() #0
365
366attributes #0 = { nounwind readnone speculatable }
367