1; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=SI %s
2; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=CI -check-prefix=CI-NOHSA %s
3; RUN:  llc -amdgpu-scalarize-global-loads=false  -mtriple=amdgcn--amdhsa -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI --check-prefix=GCN-HSA %s
4
5declare i32 @llvm.amdgcn.workitem.id.x() #0
6declare i32 @llvm.amdgcn.workitem.id.y() #0
7
8; In this test both the pointer and the offset operands to the
9; BUFFER_LOAD instructions end up being stored in vgprs.  This
10; requires us to add the pointer and offset together, store the
11; result in the offset operand (vaddr), and then store 0 in an
12; sgpr register pair and use that for the pointer operand
13; (low 64-bits of srsrc).
14
15; GCN-LABEL: {{^}}mubuf:
16
17; Make sure we aren't using VGPRs for the source operand of s_mov_b64
18; GCN-NOT: s_mov_b64 s[{{[0-9]+:[0-9]+}}], v
19
20; Make sure we aren't using VGPR's for the srsrc operand of BUFFER_LOAD_*
21; instructions
22; GCN-NOHSA: buffer_load_ubyte v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0 addr64
23; GCN-NOHSA: buffer_load_ubyte v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0 addr64
24; GCN-HSA: flat_load_ubyte v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}
25; GCN-HSA: flat_load_ubyte v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}
26
27define amdgpu_kernel void @mubuf(i32 addrspace(1)* %out, i8 addrspace(1)* %in) #1 {
28entry:
29  %tmp = call i32 @llvm.amdgcn.workitem.id.x()
30  %tmp1 = call i32 @llvm.amdgcn.workitem.id.y()
31  %tmp2 = sext i32 %tmp to i64
32  %tmp3 = sext i32 %tmp1 to i64
33  br label %loop
34
35loop:                                             ; preds = %loop, %entry
36  %tmp4 = phi i64 [ 0, %entry ], [ %tmp5, %loop ]
37  %tmp5 = add i64 %tmp2, %tmp4
38  %tmp6 = getelementptr i8, i8 addrspace(1)* %in, i64 %tmp5
39  %tmp7 = load i8, i8 addrspace(1)* %tmp6, align 1
40  %tmp8 = or i64 %tmp5, 1
41  %tmp9 = getelementptr i8, i8 addrspace(1)* %in, i64 %tmp8
42  %tmp10 = load i8, i8 addrspace(1)* %tmp9, align 1
43  %tmp11 = add i8 %tmp7, %tmp10
44  %tmp12 = sext i8 %tmp11 to i32
45  store i32 %tmp12, i32 addrspace(1)* %out
46  %tmp13 = icmp slt i64 %tmp5, 10
47  br i1 %tmp13, label %loop, label %done
48
49done:                                             ; preds = %loop
50  ret void
51}
52
53; Test moving an SMRD instruction to the VALU
54; FIXME: movs can be moved before nop to reduce count
55
56; GCN-LABEL: {{^}}smrd_valu:
57; SI: s_movk_i32 [[OFFSET:s[0-9]+]], 0x2ee0
58; GCN: v_readfirstlane_b32 s[[PTR_LO:[0-9]+]], v{{[0-9]+}}
59; GCN: v_readfirstlane_b32 s[[PTR_HI:[0-9]+]], v{{[0-9]+}}
60; SI: s_mov_b32
61; SI: s_nop 1
62; SI: s_load_dword [[OUT:s[0-9]+]], s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, [[OFFSET]]
63
64; CI: s_load_dword [[OUT:s[0-9]+]], s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0xbb8
65; GCN: v_mov_b32_e32 [[V_OUT:v[0-9]+]], [[OUT]]
66; GCN-NOHSA: buffer_store_dword [[V_OUT]]
67; GCN-HSA: flat_store_dword {{.*}}, [[V_OUT]]
68define amdgpu_kernel void @smrd_valu(i32 addrspace(4)* addrspace(1)* %in, i32 %a, i32 %b, i32 addrspace(1)* %out) #1 {
69entry:
70  %tmp = icmp ne i32 %a, 0
71  br i1 %tmp, label %if, label %else
72
73if:                                               ; preds = %entry
74  %tmp1 = load i32 addrspace(4)*, i32 addrspace(4)* addrspace(1)* %in
75  br label %endif
76
77else:                                             ; preds = %entry
78  %tmp2 = getelementptr i32 addrspace(4)*, i32 addrspace(4)* addrspace(1)* %in
79  %tmp3 = load i32 addrspace(4)*, i32 addrspace(4)* addrspace(1)* %tmp2
80  br label %endif
81
82endif:                                            ; preds = %else, %if
83  %tmp4 = phi i32 addrspace(4)* [ %tmp1, %if ], [ %tmp3, %else ]
84  %tmp5 = getelementptr i32, i32 addrspace(4)* %tmp4, i32 3000
85  %tmp6 = load i32, i32 addrspace(4)* %tmp5
86  store i32 %tmp6, i32 addrspace(1)* %out
87  ret void
88}
89
90; Test moving an SMRD with an immediate offset to the VALU
91
92; GCN-LABEL: {{^}}smrd_valu2:
93; GCN-NOHSA-NOT: v_add
94; GCN-NOHSA: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], 0 addr64 offset:16{{$}}
95; GCN-HSA: flat_load_dword v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
96define amdgpu_kernel void @smrd_valu2(i32 addrspace(1)* %out, [8 x i32] addrspace(4)* %in) #1 {
97entry:
98  %tmp = call i32 @llvm.amdgcn.workitem.id.x()
99  %tmp1 = add i32 %tmp, 4
100  %tmp2 = getelementptr [8 x i32], [8 x i32] addrspace(4)* %in, i32 %tmp, i32 4
101  %tmp3 = load i32, i32 addrspace(4)* %tmp2
102  store i32 %tmp3, i32 addrspace(1)* %out
103  ret void
104}
105
106; Use a big offset that will use the SMRD literal offset on CI
107; GCN-LABEL: {{^}}smrd_valu_ci_offset:
108; GCN-NOHSA-NOT: v_add
109; GCN-NOHSA: s_movk_i32 [[OFFSET:s[0-9]+]], 0x4e20{{$}}
110; GCN-NOHSA-NOT: v_add
111; GCN-NOHSA: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], [[OFFSET]] addr64{{$}}
112; GCN-NOHSA: v_add_i32_e32
113; GCN-NOHSA: buffer_store_dword
114; GCN-HSA: flat_load_dword v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
115; GCN-HSA: flat_store_dword v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}
116define amdgpu_kernel void @smrd_valu_ci_offset(i32 addrspace(1)* %out, i32 addrspace(4)* %in, i32 %c) #1 {
117entry:
118  %tmp = call i32 @llvm.amdgcn.workitem.id.x()
119  %tmp2 = getelementptr i32, i32 addrspace(4)* %in, i32 %tmp
120  %tmp3 = getelementptr i32, i32 addrspace(4)* %tmp2, i32 5000
121  %tmp4 = load i32, i32 addrspace(4)* %tmp3
122  %tmp5 = add i32 %tmp4, %c
123  store i32 %tmp5, i32 addrspace(1)* %out
124  ret void
125}
126
127; GCN-LABEL: {{^}}smrd_valu_ci_offset_x2:
128; GCN-NOHSA-NOT: v_add
129; GCN-NOHSA: s_mov_b32 [[OFFSET:s[0-9]+]], 0x9c40{{$}}
130; GCN-NOHSA-NOT: v_add
131; GCN-NOHSA: buffer_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], [[OFFSET]] addr64{{$}}
132; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
133; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
134; GCN-NOHSA: buffer_store_dwordx2
135; GCN-HSA: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
136define amdgpu_kernel void @smrd_valu_ci_offset_x2(i64 addrspace(1)* %out, i64 addrspace(4)* %in, i64 %c) #1 {
137entry:
138  %tmp = call i32 @llvm.amdgcn.workitem.id.x()
139  %tmp2 = getelementptr i64, i64 addrspace(4)* %in, i32 %tmp
140  %tmp3 = getelementptr i64, i64 addrspace(4)* %tmp2, i32 5000
141  %tmp4 = load i64, i64 addrspace(4)* %tmp3
142  %tmp5 = or i64 %tmp4, %c
143  store i64 %tmp5, i64 addrspace(1)* %out
144  ret void
145}
146
147; GCN-LABEL: {{^}}smrd_valu_ci_offset_x4:
148; GCN-NOHSA-NOT: v_add
149; GCN-NOHSA: s_movk_i32 [[OFFSET:s[0-9]+]], 0x4d20{{$}}
150; GCN-NOHSA-NOT: v_add
151; GCN-NOHSA: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], [[OFFSET]] addr64{{$}}
152; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
153; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
154; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
155; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
156; GCN-NOHSA: buffer_store_dwordx4
157; GCN-HSA: flat_load_dwordx4 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
158define amdgpu_kernel void @smrd_valu_ci_offset_x4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(4)* %in, <4 x i32> %c) #1 {
159entry:
160  %tmp = call i32 @llvm.amdgcn.workitem.id.x()
161  %tmp2 = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %in, i32 %tmp
162  %tmp3 = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %tmp2, i32 1234
163  %tmp4 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp3
164  %tmp5 = or <4 x i32> %tmp4, %c
165  store <4 x i32> %tmp5, <4 x i32> addrspace(1)* %out
166  ret void
167}
168
169; Original scalar load uses SGPR offset on SI and 32-bit literal on
170; CI.
171
172; GCN-LABEL: {{^}}smrd_valu_ci_offset_x8:
173; GCN-NOHSA-DAG: s_mov_b32 [[OFFSET0:s[0-9]+]], 0x9a40{{$}}
174; CI-NOHSA-DAG: s_mov_b32 [[OFFSET1:s[0-9]+]], 0x9a50{{$}}
175; CI-NOHSA-NOT: v_add
176; CI-NOHSA: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], [[OFFSET1]] addr64{{$}}
177; GCN-NOHSA: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], [[OFFSET0]] addr64{{$}}
178; SI: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], 0 addr64 offset:16
179
180; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
181; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
182; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
183; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
184; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
185; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
186; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
187; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
188; GCN-NOHSA: buffer_store_dwordx4
189; GCN-NOHSA: buffer_store_dwordx4
190; GCN-HSA: flat_load_dwordx4
191; GCN-HSA: flat_load_dwordx4
192define amdgpu_kernel void @smrd_valu_ci_offset_x8(<8 x i32> addrspace(1)* %out, <8 x i32> addrspace(4)* %in, <8 x i32> %c) #1 {
193entry:
194  %tmp = call i32 @llvm.amdgcn.workitem.id.x()
195  %tmp2 = getelementptr <8 x i32>, <8 x i32> addrspace(4)* %in, i32 %tmp
196  %tmp3 = getelementptr <8 x i32>, <8 x i32> addrspace(4)* %tmp2, i32 1234
197  %tmp4 = load <8 x i32>, <8 x i32> addrspace(4)* %tmp3
198  %tmp5 = or <8 x i32> %tmp4, %c
199  store <8 x i32> %tmp5, <8 x i32> addrspace(1)* %out
200  ret void
201}
202
203; GCN-LABEL: {{^}}smrd_valu_ci_offset_x16:
204
205; SI: s_mov_b32 {{s[0-9]+}}, 0x13480
206; SI: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], 0 addr64 offset:16
207; SI: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], 0 addr64 offset:32
208; SI: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], {{s[0-9]+}} addr64
209; SI: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], 0 addr64 offset:48
210; CI-NOHSA-DAG: s_mov_b32 [[OFFSET0:s[0-9]+]], 0x13480{{$}}
211; CI-NOHSA-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], [[OFFSET0]] addr64{{$}}
212; CI-NOHSA-DAG: s_mov_b32 [[OFFSET1:s[0-9]+]], 0x13490{{$}}
213; CI-NOHSA-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], [[OFFSET1]] addr64{{$}}
214; CI-NOHSA-DAG: s_mov_b32 [[OFFSET2:s[0-9]+]], 0x134a0{{$}}
215; CI-NOHSA-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], [[OFFSET2]] addr64{{$}}
216; CI-NOHSA-DAG: s_mov_b32 [[OFFSET3:s[0-9]+]], 0x134b0{{$}}
217; CI-NOHSA-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], [[OFFSET3]] addr64{{$}}
218
219; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
220; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
221; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
222; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
223; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
224; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
225; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
226; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
227; GCN-NOHSA: buffer_store_dwordx4
228; GCN-NOHSA: buffer_store_dwordx4
229; GCN-NOHSA: buffer_store_dwordx4
230; GCN-NOHSA: buffer_store_dwordx4
231
232; GCN-HSA: flat_load_dwordx4
233; GCN-HSA: flat_load_dwordx4
234; GCN-HSA: flat_load_dwordx4
235; GCN-HSA: flat_load_dwordx4
236
237; GCN: s_endpgm
238define amdgpu_kernel void @smrd_valu_ci_offset_x16(<16 x i32> addrspace(1)* %out, <16 x i32> addrspace(4)* %in, <16 x i32> %c) #1 {
239entry:
240  %tmp = call i32 @llvm.amdgcn.workitem.id.x()
241  %tmp2 = getelementptr <16 x i32>, <16 x i32> addrspace(4)* %in, i32 %tmp
242  %tmp3 = getelementptr <16 x i32>, <16 x i32> addrspace(4)* %tmp2, i32 1234
243  %tmp4 = load <16 x i32>, <16 x i32> addrspace(4)* %tmp3
244  %tmp5 = or <16 x i32> %tmp4, %c
245  store <16 x i32> %tmp5, <16 x i32> addrspace(1)* %out
246  ret void
247}
248
249; GCN-LABEL: {{^}}smrd_valu2_salu_user:
250; GCN-NOHSA: buffer_load_dword [[MOVED:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16{{$}}
251; GCN-HSA: flat_load_dword [[MOVED:v[0-9]+]], v[{{[0-9+:[0-9]+}}]
252; GCN: v_add_i32_e32 [[ADD:v[0-9]+]], vcc, s{{[0-9]+}}, [[MOVED]]
253; GCN-NOHSA: buffer_store_dword [[ADD]]
254; GCN-HSA: flat_store_dword {{.*}}, [[ADD]]
255define amdgpu_kernel void @smrd_valu2_salu_user(i32 addrspace(1)* %out, [8 x i32] addrspace(4)* %in, i32 %a) #1 {
256entry:
257  %tmp = call i32 @llvm.amdgcn.workitem.id.x()
258  %tmp1 = add i32 %tmp, 4
259  %tmp2 = getelementptr [8 x i32], [8 x i32] addrspace(4)* %in, i32 %tmp, i32 4
260  %tmp3 = load i32, i32 addrspace(4)* %tmp2
261  %tmp4 = add i32 %tmp3, %a
262  store i32 %tmp4, i32 addrspace(1)* %out
263  ret void
264}
265
266; GCN-LABEL: {{^}}smrd_valu2_max_smrd_offset:
267; GCN-NOHSA: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:1020{{$}}
268; GCN-HSA: flat_load_dword v{{[0-9]}}, v[{{[0-9]+:[0-9]+}}]
269define amdgpu_kernel void @smrd_valu2_max_smrd_offset(i32 addrspace(1)* %out, [1024 x i32] addrspace(4)* %in) #1 {
270entry:
271  %tmp = call i32 @llvm.amdgcn.workitem.id.x()
272  %tmp1 = add i32 %tmp, 4
273  %tmp2 = getelementptr [1024 x i32], [1024 x i32] addrspace(4)* %in, i32 %tmp, i32 255
274  %tmp3 = load i32, i32 addrspace(4)* %tmp2
275  store i32 %tmp3, i32 addrspace(1)* %out
276  ret void
277}
278
279; GCN-LABEL: {{^}}smrd_valu2_mubuf_offset:
280; GCN-NOHSA-NOT: v_add
281; GCN-NOHSA: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:1024{{$}}
282; GCN-HSA: flat_load_dword v{{[0-9]}}, v[{{[0-9]+:[0-9]+}}]
283define amdgpu_kernel void @smrd_valu2_mubuf_offset(i32 addrspace(1)* %out, [1024 x i32] addrspace(4)* %in) #1 {
284entry:
285  %tmp = call i32 @llvm.amdgcn.workitem.id.x()
286  %tmp1 = add i32 %tmp, 4
287  %tmp2 = getelementptr [1024 x i32], [1024 x i32] addrspace(4)* %in, i32 %tmp, i32 256
288  %tmp3 = load i32, i32 addrspace(4)* %tmp2
289  store i32 %tmp3, i32 addrspace(1)* %out
290  ret void
291}
292
293; GCN-LABEL: {{^}}s_load_imm_v8i32:
294; GCN-NOHSA: buffer_load_dwordx4
295; GCN-NOHSA: buffer_load_dwordx4
296; GCN-HSA: flat_load_dwordx4
297; GCN-HSA: flat_load_dwordx4
298define amdgpu_kernel void @s_load_imm_v8i32(<8 x i32> addrspace(1)* %out, i32 addrspace(4)* nocapture readonly %in) #1 {
299entry:
300  %tmp0 = tail call i32 @llvm.amdgcn.workitem.id.x()
301  %tmp1 = getelementptr inbounds i32, i32 addrspace(4)* %in, i32 %tmp0
302  %tmp2 = bitcast i32 addrspace(4)* %tmp1 to <8 x i32> addrspace(4)*
303  %tmp3 = load <8 x i32>, <8 x i32> addrspace(4)* %tmp2, align 4
304  store <8 x i32> %tmp3, <8 x i32> addrspace(1)* %out, align 32
305  ret void
306}
307
308; GCN-LABEL: {{^}}s_load_imm_v8i32_salu_user:
309; GCN-NOHSA: buffer_load_dwordx4
310; GCN-NOHSA: buffer_load_dwordx4
311; GCN-NOHSA: v_add_i32_e32
312; GCN-NOHSA: v_add_i32_e32
313; GCN-NOHSA: v_add_i32_e32
314; GCN-NOHSA: v_add_i32_e32
315; GCN-NOHSA: v_add_i32_e32
316; GCN-NOHSA: v_add_i32_e32
317; GCN-NOHSA: v_add_i32_e32
318; GCN-NOHSA: buffer_store_dword
319; GCN-HSA: flat_load_dwordx4
320; GCN-HSA: flat_load_dwordx4
321define amdgpu_kernel void @s_load_imm_v8i32_salu_user(i32 addrspace(1)* %out, i32 addrspace(4)* nocapture readonly %in) #1 {
322entry:
323  %tmp0 = tail call i32 @llvm.amdgcn.workitem.id.x()
324  %tmp1 = getelementptr inbounds i32, i32 addrspace(4)* %in, i32 %tmp0
325  %tmp2 = bitcast i32 addrspace(4)* %tmp1 to <8 x i32> addrspace(4)*
326  %tmp3 = load <8 x i32>, <8 x i32> addrspace(4)* %tmp2, align 4
327
328  %elt0 = extractelement <8 x i32> %tmp3, i32 0
329  %elt1 = extractelement <8 x i32> %tmp3, i32 1
330  %elt2 = extractelement <8 x i32> %tmp3, i32 2
331  %elt3 = extractelement <8 x i32> %tmp3, i32 3
332  %elt4 = extractelement <8 x i32> %tmp3, i32 4
333  %elt5 = extractelement <8 x i32> %tmp3, i32 5
334  %elt6 = extractelement <8 x i32> %tmp3, i32 6
335  %elt7 = extractelement <8 x i32> %tmp3, i32 7
336
337  %add0 = add i32 %elt0, %elt1
338  %add1 = add i32 %add0, %elt2
339  %add2 = add i32 %add1, %elt3
340  %add3 = add i32 %add2, %elt4
341  %add4 = add i32 %add3, %elt5
342  %add5 = add i32 %add4, %elt6
343  %add6 = add i32 %add5, %elt7
344
345  store i32 %add6, i32 addrspace(1)* %out
346  ret void
347}
348
349; GCN-LABEL: {{^}}s_load_imm_v16i32:
350; GCN-NOHSA: buffer_load_dwordx4
351; GCN-NOHSA: buffer_load_dwordx4
352; GCN-NOHSA: buffer_load_dwordx4
353; GCN-NOHSA: buffer_load_dwordx4
354; GCN-HSA: flat_load_dwordx4
355; GCN-HSA: flat_load_dwordx4
356; GCN-HSA: flat_load_dwordx4
357; GCN-HSA: flat_load_dwordx4
358define amdgpu_kernel void @s_load_imm_v16i32(<16 x i32> addrspace(1)* %out, i32 addrspace(4)* nocapture readonly %in) #1 {
359entry:
360  %tmp0 = tail call i32 @llvm.amdgcn.workitem.id.x()
361  %tmp1 = getelementptr inbounds i32, i32 addrspace(4)* %in, i32 %tmp0
362  %tmp2 = bitcast i32 addrspace(4)* %tmp1 to <16 x i32> addrspace(4)*
363  %tmp3 = load <16 x i32>, <16 x i32> addrspace(4)* %tmp2, align 4
364  store <16 x i32> %tmp3, <16 x i32> addrspace(1)* %out, align 32
365  ret void
366}
367
368; GCN-LABEL: {{^}}s_load_imm_v16i32_salu_user:
369; GCN-NOHSA: buffer_load_dwordx4
370; GCN-NOHSA: buffer_load_dwordx4
371; GCN-NOHSA: buffer_load_dwordx4
372; GCN-NOHSA: buffer_load_dwordx4
373; GCN-NOHSA: v_add_i32_e32
374; GCN-NOHSA: v_add_i32_e32
375; GCN-NOHSA: v_add_i32_e32
376; GCN-NOHSA: v_add_i32_e32
377; GCN-NOHSA: v_add_i32_e32
378; GCN-NOHSA: v_add_i32_e32
379; GCN-NOHSA: v_add_i32_e32
380; GCN-NOHSA: v_add_i32_e32
381; GCN-NOHSA: v_add_i32_e32
382; GCN-NOHSA: v_add_i32_e32
383; GCN-NOHSA: v_add_i32_e32
384; GCN-NOHSA: v_add_i32_e32
385; GCN-NOHSA: v_add_i32_e32
386; GCN-NOHSA: v_add_i32_e32
387; GCN-NOHSA: v_add_i32_e32
388; GCN-NOHSA: buffer_store_dword
389; GCN-HSA: flat_load_dwordx4
390; GCN-HSA: flat_load_dwordx4
391; GCN-HSA: flat_load_dwordx4
392; GCN-HSA: flat_load_dwordx4
393define amdgpu_kernel void @s_load_imm_v16i32_salu_user(i32 addrspace(1)* %out, i32 addrspace(4)* nocapture readonly %in) #1 {
394entry:
395  %tmp0 = tail call i32 @llvm.amdgcn.workitem.id.x()
396  %tmp1 = getelementptr inbounds i32, i32 addrspace(4)* %in, i32 %tmp0
397  %tmp2 = bitcast i32 addrspace(4)* %tmp1 to <16 x i32> addrspace(4)*
398  %tmp3 = load <16 x i32>, <16 x i32> addrspace(4)* %tmp2, align 4
399
400  %elt0 = extractelement <16 x i32> %tmp3, i32 0
401  %elt1 = extractelement <16 x i32> %tmp3, i32 1
402  %elt2 = extractelement <16 x i32> %tmp3, i32 2
403  %elt3 = extractelement <16 x i32> %tmp3, i32 3
404  %elt4 = extractelement <16 x i32> %tmp3, i32 4
405  %elt5 = extractelement <16 x i32> %tmp3, i32 5
406  %elt6 = extractelement <16 x i32> %tmp3, i32 6
407  %elt7 = extractelement <16 x i32> %tmp3, i32 7
408  %elt8 = extractelement <16 x i32> %tmp3, i32 8
409  %elt9 = extractelement <16 x i32> %tmp3, i32 9
410  %elt10 = extractelement <16 x i32> %tmp3, i32 10
411  %elt11 = extractelement <16 x i32> %tmp3, i32 11
412  %elt12 = extractelement <16 x i32> %tmp3, i32 12
413  %elt13 = extractelement <16 x i32> %tmp3, i32 13
414  %elt14 = extractelement <16 x i32> %tmp3, i32 14
415  %elt15 = extractelement <16 x i32> %tmp3, i32 15
416
417  %add0 = add i32 %elt0, %elt1
418  %add1 = add i32 %add0, %elt2
419  %add2 = add i32 %add1, %elt3
420  %add3 = add i32 %add2, %elt4
421  %add4 = add i32 %add3, %elt5
422  %add5 = add i32 %add4, %elt6
423  %add6 = add i32 %add5, %elt7
424  %add7 = add i32 %add6, %elt8
425  %add8 = add i32 %add7, %elt9
426  %add9 = add i32 %add8, %elt10
427  %add10 = add i32 %add9, %elt11
428  %add11 = add i32 %add10, %elt12
429  %add12 = add i32 %add11, %elt13
430  %add13 = add i32 %add12, %elt14
431  %add14 = add i32 %add13, %elt15
432
433  store i32 %add14, i32 addrspace(1)* %out
434  ret void
435}
436
437; Make sure we legalize vopc operands after moving an sopc to the value.
438
439; {{^}}sopc_vopc_legalize_bug:
440; GCN: s_load_dword [[SGPR:s[0-9]+]]
441; GCN: v_cmp_le_u32_e32 vcc, [[SGPR]], v{{[0-9]+}}
442; GCN: s_and_b64 vcc, exec, vcc
443; GCN: s_cbranch_vccnz [[EXIT:[A-Z0-9_]+]]
444; GCN: v_mov_b32_e32 [[ONE:v[0-9]+]], 1
445; GCN-NOHSA: buffer_store_dword [[ONE]]
446; GCN-HSA: flat_store_dword v[{{[0-9]+:[0-9]+}}], [[ONE]]
447; GCN: {{^}}[[EXIT]]:
448; GCN: s_endpgm
449define amdgpu_kernel void @sopc_vopc_legalize_bug(i32 %cond, i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
450bb3:                                              ; preds = %bb2
451  %tmp0 = bitcast i32 %cond to float
452  %tmp1 = fadd float %tmp0, 2.500000e-01
453  %tmp2 = bitcast float %tmp1 to i32
454  %tmp3 = icmp ult i32 %tmp2, %cond
455  br i1 %tmp3, label %bb6, label %bb7
456
457bb6:
458  store i32 1, i32 addrspace(1)* %out
459  br label %bb7
460
461bb7:                                              ; preds = %bb3
462  ret void
463}
464
465; GCN-LABEL: {{^}}phi_visit_order:
466; GCN: v_add_i32_e64 v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 1, v{{[0-9]+}}
467define amdgpu_kernel void @phi_visit_order() {
468bb:
469  br label %bb1
470
471bb1:
472  %tmp = phi i32 [ 0, %bb ], [ %tmp5, %bb4 ]
473  %tid = call i32 @llvm.amdgcn.workitem.id.x()
474  %cnd = icmp eq i32 %tid, 0
475  br i1 %cnd, label %bb4, label %bb2
476
477bb2:
478  %tmp3 = add nsw i32 %tmp, 1
479  br label %bb4
480
481bb4:
482  %tmp5 = phi i32 [ %tmp3, %bb2 ], [ %tmp, %bb1 ]
483  store volatile i32 %tmp5, i32 addrspace(1)* undef
484  br label %bb1
485}
486
487; GCN-LABEL: {{^}}phi_imm_in_sgprs
488; GCN: s_movk_i32 [[A:s[0-9]+]], 0x400
489; GCN: s_movk_i32 [[B:s[0-9]+]], 0x400
490; GCN: [[LOOP_LABEL:[0-9a-zA-Z_]+]]:
491; GCN: s_xor_b32 [[B]], [[B]], [[A]]
492; GCN: s_cbranch_scc{{[01]}} [[LOOP_LABEL]]
493define amdgpu_kernel void @phi_imm_in_sgprs(i32 addrspace(3)* %out, i32 %cond) {
494entry:
495  br label %loop
496
497loop:
498  %i = phi i32 [0, %entry], [%i.add, %loop]
499  %offset = phi i32 [1024, %entry], [%offset.xor, %loop]
500  %offset.xor = xor i32 %offset, 1024
501  %offset.i = add i32 %offset.xor, %i
502  %ptr = getelementptr i32, i32 addrspace(3)* %out, i32 %offset.i
503  store i32 0, i32 addrspace(3)* %ptr
504  %i.add = add i32 %i, 1
505  %cmp = icmp ult i32 %i.add, %cond
506  br i1 %cmp, label %loop, label %exit
507
508exit:
509  ret void
510}
511
512attributes #0 = { nounwind readnone }
513attributes #1 = { nounwind }
514