1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=amdgcn--amdpal -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CI %s
3; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9 %s
4
5declare i32 @llvm.amdgcn.workitem.id.x() #0
6
7@lds.obj = addrspace(3) global [256 x i32] undef, align 4
8
9define amdgpu_kernel void @write_ds_sub0_offset0_global() #0 {
10; CI-LABEL: write_ds_sub0_offset0_global:
11; CI:       ; %bb.0: ; %entry
12; CI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
13; CI-NEXT:    v_sub_i32_e32 v0, vcc, 0, v0
14; CI-NEXT:    v_mov_b32_e32 v1, 0x7b
15; CI-NEXT:    s_mov_b32 m0, -1
16; CI-NEXT:    ds_write_b32 v0, v1 offset:12
17; CI-NEXT:    s_endpgm
18;
19; GFX9-LABEL: write_ds_sub0_offset0_global:
20; GFX9:       ; %bb.0: ; %entry
21; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
22; GFX9-NEXT:    v_sub_u32_e32 v0, 0, v0
23; GFX9-NEXT:    v_mov_b32_e32 v1, 0x7b
24; GFX9-NEXT:    ds_write_b32 v0, v1 offset:12
25; GFX9-NEXT:    s_endpgm
26entry:
27  %x.i = call i32 @llvm.amdgcn.workitem.id.x() #1
28  %sub1 = sub i32 0, %x.i
29  %tmp0 = getelementptr [256 x i32], [256 x i32] addrspace(3)* @lds.obj, i32 0, i32 %sub1
30  %arrayidx = getelementptr inbounds i32, i32 addrspace(3)* %tmp0, i32 3
31  store i32 123, i32 addrspace(3)* %arrayidx
32  ret void
33}
34
35define amdgpu_kernel void @write_ds_sub0_offset0_global_clamp_bit(float %dummy.val) #0 {
36; CI-LABEL: write_ds_sub0_offset0_global_clamp_bit:
37; CI:       ; %bb.0: ; %entry
38; CI-NEXT:    s_load_dword s0, s[0:1], 0x9
39; CI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
40; CI-NEXT:    v_sub_i32_e32 v0, vcc, 0, v0
41; CI-NEXT:    s_mov_b64 vcc, 0
42; CI-NEXT:    v_mov_b32_e32 v2, 0x7b
43; CI-NEXT:    s_waitcnt lgkmcnt(0)
44; CI-NEXT:    v_mov_b32_e32 v1, s0
45; CI-NEXT:    s_mov_b32 s0, 0
46; CI-NEXT:    v_div_fmas_f32 v1, v1, v1, v1
47; CI-NEXT:    s_mov_b32 m0, -1
48; CI-NEXT:    s_mov_b32 s3, 0xf000
49; CI-NEXT:    s_mov_b32 s2, -1
50; CI-NEXT:    s_mov_b32 s1, s0
51; CI-NEXT:    ds_write_b32 v0, v2 offset:12
52; CI-NEXT:    buffer_store_dword v1, off, s[0:3], 0
53; CI-NEXT:    s_endpgm
54;
55; GFX9-LABEL: write_ds_sub0_offset0_global_clamp_bit:
56; GFX9:       ; %bb.0: ; %entry
57; GFX9-NEXT:    s_load_dword s0, s[0:1], 0x24
58; GFX9-NEXT:    s_mov_b64 vcc, 0
59; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
60; GFX9-NEXT:    v_sub_u32_e32 v0, 0, v0
61; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
62; GFX9-NEXT:    v_mov_b32_e32 v1, s0
63; GFX9-NEXT:    v_div_fmas_f32 v2, v1, v1, v1
64; GFX9-NEXT:    v_mov_b32_e32 v1, 0x7b
65; GFX9-NEXT:    ds_write_b32 v0, v1 offset:12
66; GFX9-NEXT:    v_mov_b32_e32 v0, 0
67; GFX9-NEXT:    v_mov_b32_e32 v1, 0
68; GFX9-NEXT:    global_store_dword v[0:1], v2, off
69; GFX9-NEXT:    s_endpgm
70entry:
71  %x.i = call i32 @llvm.amdgcn.workitem.id.x() #1
72  %sub1 = sub i32 0, %x.i
73  %tmp0 = getelementptr [256 x i32], [256 x i32] addrspace(3)* @lds.obj, i32 0, i32 %sub1
74  %arrayidx = getelementptr inbounds i32, i32 addrspace(3)* %tmp0, i32 3
75  store i32 123, i32 addrspace(3)* %arrayidx
76  %fmas = call float @llvm.amdgcn.div.fmas.f32(float %dummy.val, float %dummy.val, float %dummy.val, i1 false)
77  store volatile float %fmas, float addrspace(1)* null
78  ret void
79}
80
81define amdgpu_kernel void @add_x_shl_neg_to_sub_max_offset() #1 {
82; CI-LABEL: add_x_shl_neg_to_sub_max_offset:
83; CI:       ; %bb.0:
84; CI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
85; CI-NEXT:    v_sub_i32_e32 v0, vcc, 0, v0
86; CI-NEXT:    v_mov_b32_e32 v1, 13
87; CI-NEXT:    s_mov_b32 m0, -1
88; CI-NEXT:    ds_write_b8 v0, v1 offset:65535
89; CI-NEXT:    s_endpgm
90;
91; GFX9-LABEL: add_x_shl_neg_to_sub_max_offset:
92; GFX9:       ; %bb.0:
93; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
94; GFX9-NEXT:    v_sub_u32_e32 v0, 0, v0
95; GFX9-NEXT:    v_mov_b32_e32 v1, 13
96; GFX9-NEXT:    ds_write_b8 v0, v1 offset:65535
97; GFX9-NEXT:    s_endpgm
98  %x.i = call i32 @llvm.amdgcn.workitem.id.x() #0
99  %neg = sub i32 0, %x.i
100  %shl = shl i32 %neg, 2
101  %add = add i32 65535, %shl
102  %ptr = inttoptr i32 %add to i8 addrspace(3)*
103  store i8 13, i8 addrspace(3)* %ptr
104  ret void
105}
106
107define amdgpu_kernel void @add_x_shl_neg_to_sub_max_offset_p1() #1 {
108; CI-LABEL: add_x_shl_neg_to_sub_max_offset_p1:
109; CI:       ; %bb.0:
110; CI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
111; CI-NEXT:    v_sub_i32_e32 v0, vcc, 0x10000, v0
112; CI-NEXT:    v_mov_b32_e32 v1, 13
113; CI-NEXT:    s_mov_b32 m0, -1
114; CI-NEXT:    ds_write_b8 v0, v1
115; CI-NEXT:    s_endpgm
116;
117; GFX9-LABEL: add_x_shl_neg_to_sub_max_offset_p1:
118; GFX9:       ; %bb.0:
119; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
120; GFX9-NEXT:    v_sub_u32_e32 v0, 0x10000, v0
121; GFX9-NEXT:    v_mov_b32_e32 v1, 13
122; GFX9-NEXT:    ds_write_b8 v0, v1
123; GFX9-NEXT:    s_endpgm
124  %x.i = call i32 @llvm.amdgcn.workitem.id.x() #0
125  %neg = sub i32 0, %x.i
126  %shl = shl i32 %neg, 2
127  %add = add i32 65536, %shl
128  %ptr = inttoptr i32 %add to i8 addrspace(3)*
129  store i8 13, i8 addrspace(3)* %ptr
130  ret void
131}
132
133define amdgpu_kernel void @add_x_shl_neg_to_sub_multi_use() #1 {
134; CI-LABEL: add_x_shl_neg_to_sub_multi_use:
135; CI:       ; %bb.0:
136; CI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
137; CI-NEXT:    v_sub_i32_e32 v0, vcc, 0, v0
138; CI-NEXT:    v_mov_b32_e32 v1, 13
139; CI-NEXT:    s_mov_b32 m0, -1
140; CI-NEXT:    ds_write_b32 v0, v1 offset:123
141; CI-NEXT:    ds_write_b32 v0, v1 offset:456
142; CI-NEXT:    s_endpgm
143;
144; GFX9-LABEL: add_x_shl_neg_to_sub_multi_use:
145; GFX9:       ; %bb.0:
146; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
147; GFX9-NEXT:    v_sub_u32_e32 v0, 0, v0
148; GFX9-NEXT:    v_mov_b32_e32 v1, 13
149; GFX9-NEXT:    ds_write_b32 v0, v1 offset:123
150; GFX9-NEXT:    ds_write_b32 v0, v1 offset:456
151; GFX9-NEXT:    s_endpgm
152  %x.i = call i32 @llvm.amdgcn.workitem.id.x() #0
153  %neg = sub i32 0, %x.i
154  %shl = shl i32 %neg, 2
155  %add0 = add i32 123, %shl
156  %add1 = add i32 456, %shl
157  %ptr0 = inttoptr i32 %add0 to i32 addrspace(3)*
158  store volatile i32 13, i32 addrspace(3)* %ptr0
159  %ptr1 = inttoptr i32 %add1 to i32 addrspace(3)*
160  store volatile i32 13, i32 addrspace(3)* %ptr1
161  ret void
162}
163
164define amdgpu_kernel void @add_x_shl_neg_to_sub_multi_use_same_offset() #1 {
165; CI-LABEL: add_x_shl_neg_to_sub_multi_use_same_offset:
166; CI:       ; %bb.0:
167; CI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
168; CI-NEXT:    v_sub_i32_e32 v0, vcc, 0, v0
169; CI-NEXT:    v_mov_b32_e32 v1, 13
170; CI-NEXT:    s_mov_b32 m0, -1
171; CI-NEXT:    ds_write_b32 v0, v1 offset:123
172; CI-NEXT:    ds_write_b32 v0, v1 offset:123
173; CI-NEXT:    s_endpgm
174;
175; GFX9-LABEL: add_x_shl_neg_to_sub_multi_use_same_offset:
176; GFX9:       ; %bb.0:
177; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
178; GFX9-NEXT:    v_sub_u32_e32 v0, 0, v0
179; GFX9-NEXT:    v_mov_b32_e32 v1, 13
180; GFX9-NEXT:    ds_write_b32 v0, v1 offset:123
181; GFX9-NEXT:    ds_write_b32 v0, v1 offset:123
182; GFX9-NEXT:    s_endpgm
183  %x.i = call i32 @llvm.amdgcn.workitem.id.x() #0
184  %neg = sub i32 0, %x.i
185  %shl = shl i32 %neg, 2
186  %add = add i32 123, %shl
187  %ptr = inttoptr i32 %add to i32 addrspace(3)*
188  store volatile i32 13, i32 addrspace(3)* %ptr
189  store volatile i32 13, i32 addrspace(3)* %ptr
190  ret void
191}
192
193define amdgpu_kernel void @add_x_shl_neg_to_sub_misaligned_i64_max_offset() #1 {
194; CI-LABEL: add_x_shl_neg_to_sub_misaligned_i64_max_offset:
195; CI:       ; %bb.0:
196; CI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
197; CI-NEXT:    v_sub_i32_e32 v0, vcc, 0x3fb, v0
198; CI-NEXT:    v_mov_b32_e32 v1, 0x7b
199; CI-NEXT:    v_mov_b32_e32 v2, 0
200; CI-NEXT:    s_mov_b32 m0, -1
201; CI-NEXT:    ds_write2_b32 v0, v1, v2 offset1:1
202; CI-NEXT:    s_endpgm
203;
204; GFX9-LABEL: add_x_shl_neg_to_sub_misaligned_i64_max_offset:
205; GFX9:       ; %bb.0:
206; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
207; GFX9-NEXT:    v_sub_u32_e32 v0, 0x3fb, v0
208; GFX9-NEXT:    v_mov_b32_e32 v1, 0x7b
209; GFX9-NEXT:    v_mov_b32_e32 v2, 0
210; GFX9-NEXT:    ds_write2_b32 v0, v1, v2 offset1:1
211; GFX9-NEXT:    s_endpgm
212  %x.i = call i32 @llvm.amdgcn.workitem.id.x() #0
213  %neg = sub i32 0, %x.i
214  %shl = shl i32 %neg, 2
215  %add = add i32 1019, %shl
216  %ptr = inttoptr i32 %add to i64 addrspace(3)*
217  store i64 123, i64 addrspace(3)* %ptr, align 4
218  ret void
219}
220
221define amdgpu_kernel void @add_x_shl_neg_to_sub_misaligned_i64_max_offset_clamp_bit(float %dummy.val) #1 {
222; CI-LABEL: add_x_shl_neg_to_sub_misaligned_i64_max_offset_clamp_bit:
223; CI:       ; %bb.0:
224; CI-NEXT:    s_load_dword s0, s[0:1], 0x9
225; CI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
226; CI-NEXT:    v_sub_i32_e32 v0, vcc, 0x3fb, v0
227; CI-NEXT:    s_mov_b64 vcc, 0
228; CI-NEXT:    v_mov_b32_e32 v2, 0x7b
229; CI-NEXT:    s_waitcnt lgkmcnt(0)
230; CI-NEXT:    v_mov_b32_e32 v1, s0
231; CI-NEXT:    s_mov_b32 s0, 0
232; CI-NEXT:    v_div_fmas_f32 v1, v1, v1, v1
233; CI-NEXT:    v_mov_b32_e32 v3, 0
234; CI-NEXT:    s_mov_b32 m0, -1
235; CI-NEXT:    s_mov_b32 s3, 0xf000
236; CI-NEXT:    s_mov_b32 s2, -1
237; CI-NEXT:    s_mov_b32 s1, s0
238; CI-NEXT:    ds_write2_b32 v0, v2, v3 offset1:1
239; CI-NEXT:    buffer_store_dword v1, off, s[0:3], 0
240; CI-NEXT:    s_endpgm
241;
242; GFX9-LABEL: add_x_shl_neg_to_sub_misaligned_i64_max_offset_clamp_bit:
243; GFX9:       ; %bb.0:
244; GFX9-NEXT:    s_load_dword s0, s[0:1], 0x24
245; GFX9-NEXT:    s_mov_b64 vcc, 0
246; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
247; GFX9-NEXT:    v_sub_u32_e32 v0, 0x3fb, v0
248; GFX9-NEXT:    v_mov_b32_e32 v3, 0
249; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
250; GFX9-NEXT:    v_mov_b32_e32 v1, s0
251; GFX9-NEXT:    v_div_fmas_f32 v2, v1, v1, v1
252; GFX9-NEXT:    v_mov_b32_e32 v1, 0x7b
253; GFX9-NEXT:    ds_write2_b32 v0, v1, v3 offset1:1
254; GFX9-NEXT:    v_mov_b32_e32 v0, 0
255; GFX9-NEXT:    v_mov_b32_e32 v1, 0
256; GFX9-NEXT:    global_store_dword v[0:1], v2, off
257; GFX9-NEXT:    s_endpgm
258  %x.i = call i32 @llvm.amdgcn.workitem.id.x() #0
259  %neg = sub i32 0, %x.i
260  %shl = shl i32 %neg, 2
261  %add = add i32 1019, %shl
262  %ptr = inttoptr i32 %add to i64 addrspace(3)*
263  store i64 123, i64 addrspace(3)* %ptr, align 4
264  %fmas = call float @llvm.amdgcn.div.fmas.f32(float %dummy.val, float %dummy.val, float %dummy.val, i1 false)
265  store volatile float %fmas, float addrspace(1)* null
266  ret void
267}
268
269define amdgpu_kernel void @add_x_shl_neg_to_sub_misaligned_i64_max_offset_p1() #1 {
270; CI-LABEL: add_x_shl_neg_to_sub_misaligned_i64_max_offset_p1:
271; CI:       ; %bb.0:
272; CI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
273; CI-NEXT:    v_sub_i32_e32 v0, vcc, 0x3fc, v0
274; CI-NEXT:    v_mov_b32_e32 v1, 0x7b
275; CI-NEXT:    v_mov_b32_e32 v2, 0
276; CI-NEXT:    s_mov_b32 m0, -1
277; CI-NEXT:    ds_write2_b32 v0, v1, v2 offset1:1
278; CI-NEXT:    s_endpgm
279;
280; GFX9-LABEL: add_x_shl_neg_to_sub_misaligned_i64_max_offset_p1:
281; GFX9:       ; %bb.0:
282; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
283; GFX9-NEXT:    v_sub_u32_e32 v0, 0x3fc, v0
284; GFX9-NEXT:    v_mov_b32_e32 v1, 0x7b
285; GFX9-NEXT:    v_mov_b32_e32 v2, 0
286; GFX9-NEXT:    ds_write2_b32 v0, v1, v2 offset1:1
287; GFX9-NEXT:    s_endpgm
288  %x.i = call i32 @llvm.amdgcn.workitem.id.x() #0
289  %neg = sub i32 0, %x.i
290  %shl = shl i32 %neg, 2
291  %add = add i32 1020, %shl
292  %ptr = inttoptr i32 %add to i64 addrspace(3)*
293  store i64 123, i64 addrspace(3)* %ptr, align 4
294  ret void
295}
296
297declare float @llvm.amdgcn.div.fmas.f32(float, float, float, i1)
298
299attributes #0 = { nounwind readnone }
300attributes #1 = { nounwind }
301attributes #2 = { nounwind convergent }
302