1; RUN: llc -march=amdgcn -mcpu=tahiti  -verify-machineinstrs -show-mc-encoding < %s | FileCheck -check-prefix=SI   -check-prefix=GCN -check-prefix=SICIVI -check-prefix=SICI -check-prefix=SIVIGFX9 %s
2; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -show-mc-encoding < %s | FileCheck -check-prefix=CI   -check-prefix=GCN -check-prefix=SICIVI -check-prefix=SICI %s
3; RUN: llc -march=amdgcn -mcpu=tonga   -verify-machineinstrs -show-mc-encoding < %s | FileCheck -check-prefix=VI   -check-prefix=GCN -check-prefix=SICIVI -check-prefix=VIGFX9 -check-prefix=SIVIGFX9 %s
4; RUN: llc -march=amdgcn -mcpu=gfx900  -verify-machineinstrs -show-mc-encoding < %s | FileCheck -check-prefix=GFX9 -check-prefix=GCN -check-prefix=VIGFX9 -check-prefix=SIVIGFX9  %s
5
6; SMRD load with an immediate offset.
7; GCN-LABEL: {{^}}smrd0:
8; SICI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x1 ; encoding: [0x01
9; VIGFX9: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x4
10define amdgpu_kernel void @smrd0(i32 addrspace(1)* %out, i32 addrspace(4)* %ptr) #0 {
11entry:
12  %tmp = getelementptr i32, i32 addrspace(4)* %ptr, i64 1
13  %tmp1 = load i32, i32 addrspace(4)* %tmp
14  store i32 %tmp1, i32 addrspace(1)* %out
15  ret void
16}
17
18; SMRD load with the largest possible immediate offset.
19; GCN-LABEL: {{^}}smrd1:
20; SICI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0xff ; encoding: [0xff,0x{{[0-9]+[137]}}
21; VIGFX9: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x3fc
22define amdgpu_kernel void @smrd1(i32 addrspace(1)* %out, i32 addrspace(4)* %ptr) #0 {
23entry:
24  %tmp = getelementptr i32, i32 addrspace(4)* %ptr, i64 255
25  %tmp1 = load i32, i32 addrspace(4)* %tmp
26  store i32 %tmp1, i32 addrspace(1)* %out
27  ret void
28}
29
30; SMRD load with an offset greater than the largest possible immediate.
31; GCN-LABEL: {{^}}smrd2:
32; SI: s_movk_i32 s[[OFFSET:[0-9]]], 0x400
33; SI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], s[[OFFSET]] ; encoding: [0x0[[OFFSET]]
34; CI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x100
35; VIGFX9: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x400
36; GCN: s_endpgm
37define amdgpu_kernel void @smrd2(i32 addrspace(1)* %out, i32 addrspace(4)* %ptr) #0 {
38entry:
39  %tmp = getelementptr i32, i32 addrspace(4)* %ptr, i64 256
40  %tmp1 = load i32, i32 addrspace(4)* %tmp
41  store i32 %tmp1, i32 addrspace(1)* %out
42  ret void
43}
44
45; SMRD load with a 64-bit offset
46; GCN-LABEL: {{^}}smrd3:
47; FIXME: There are too many copies here because we don't fold immediates
48;        through REG_SEQUENCE
49; SI: s_load_dwordx2 s[{{[0-9]:[0-9]}}], s[{{[0-9]:[0-9]}}], 0x13 ; encoding: [0x13
50; TODO: Add VI checks
51; GCN: s_endpgm
52define amdgpu_kernel void @smrd3(i32 addrspace(1)* %out, [8 x i32], i32 addrspace(4)* %ptr) #0 {
53entry:
54  %tmp = getelementptr i32, i32 addrspace(4)* %ptr, i64 4294967296
55  %tmp1 = load i32, i32 addrspace(4)* %tmp
56  store i32 %tmp1, i32 addrspace(1)* %out
57  ret void
58}
59
60; SMRD load with the largest possible immediate offset on VI
61; GCN-LABEL: {{^}}smrd4:
62; SI: s_mov_b32 [[OFFSET:s[0-9]+]], 0xffffc
63; SI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], [[OFFSET]]
64; CI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x3ffff
65; VIGFX9: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0xffffc
66define amdgpu_kernel void @smrd4(i32 addrspace(1)* %out, i32 addrspace(4)* %ptr) #0 {
67entry:
68  %tmp = getelementptr i32, i32 addrspace(4)* %ptr, i64 262143
69  %tmp1 = load i32, i32 addrspace(4)* %tmp
70  store i32 %tmp1, i32 addrspace(1)* %out
71  ret void
72}
73
74; SMRD load with an offset greater than the largest possible immediate on VI
75; GCN-LABEL: {{^}}smrd5:
76; SIVIGFX9: s_mov_b32 [[OFFSET:s[0-9]+]], 0x100000
77; SIVIGFX9: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], [[OFFSET]]
78; CI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x40000
79; GCN: s_endpgm
80define amdgpu_kernel void @smrd5(i32 addrspace(1)* %out, i32 addrspace(4)* %ptr) #0 {
81entry:
82  %tmp = getelementptr i32, i32 addrspace(4)* %ptr, i64 262144
83  %tmp1 = load i32, i32 addrspace(4)* %tmp
84  store i32 %tmp1, i32 addrspace(1)* %out
85  ret void
86}
87
88; GCN-LABEL: {{^}}smrd_hazard:
89; GCN-DAG: s_mov_b32 s3, 3
90; GCN-DAG: s_mov_b32 s2, 2
91; GCN-DAG: s_mov_b32 s1, 1
92; GCN-DAG: s_mov_b32 s0, 0
93; SI-NEXT: nop 3
94; GCN-NEXT: s_buffer_load_dword s0, s[0:3], 0x0
95define amdgpu_ps float @smrd_hazard(<4 x i32> inreg %desc) #0 {
96main_body:
97  %d0 = insertelement <4 x i32> undef, i32 0, i32 0
98  %d1 = insertelement <4 x i32> %d0, i32 1, i32 1
99  %d2 = insertelement <4 x i32> %d1, i32 2, i32 2
100  %d3 = insertelement <4 x i32> %d2, i32 3, i32 3
101  %r = call float @llvm.SI.load.const.v4i32(<4 x i32> %d3, i32 0)
102  ret float %r
103}
104
105; SMRD load using the load.const.v4i32 intrinsic with an immediate offset
106; GCN-LABEL: {{^}}smrd_load_const0:
107; SICI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x4 ; encoding: [0x04
108; VIGFX9: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x10
109define amdgpu_ps void @smrd_load_const0(<4 x i32> addrspace(4)* inreg %arg, <4 x i32> addrspace(4)* inreg %arg1, <32 x i8> addrspace(4)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
110main_body:
111  %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %arg, i32 0
112  %tmp20 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp
113  %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 16)
114  call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %tmp21, i1 true, i1 true) #0
115  ret void
116}
117
118; SMRD load using the load.const.v4i32 intrinsic with the largest possible immediate
119; offset.
120; GCN-LABEL: {{^}}smrd_load_const1:
121; SICI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0xff ; encoding: [0xff
122; VIGFX9: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x3fc
123define amdgpu_ps void @smrd_load_const1(<4 x i32> addrspace(4)* inreg %arg, <4 x i32> addrspace(4)* inreg %arg1, <32 x i8> addrspace(4)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
124main_body:
125  %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %arg, i32 0
126  %tmp20 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp
127  %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 1020)
128  call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %tmp21, i1 true, i1 true) #0
129  ret void
130}
131
132; SMRD load using the load.const.v4i32 intrinsic with an offset greater than the
133; largets possible immediate.
134; immediate offset.
135; GCN-LABEL: {{^}}smrd_load_const2:
136; SI: s_movk_i32 s[[OFFSET:[0-9]]], 0x400
137; SI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], s[[OFFSET]] ; encoding: [0x0[[OFFSET]]
138; CI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x100
139; VIGFX9: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x400
140define amdgpu_ps void @smrd_load_const2(<4 x i32> addrspace(4)* inreg %arg, <4 x i32> addrspace(4)* inreg %arg1, <32 x i8> addrspace(4)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
141main_body:
142  %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %arg, i32 0
143  %tmp20 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp
144  %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 1024)
145  call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %tmp21, i1 true, i1 true) #0
146  ret void
147}
148
149; SMRD load with the largest possible immediate offset on VI
150; GCN-LABEL: {{^}}smrd_load_const3:
151; SI: s_mov_b32 [[OFFSET:s[0-9]+]], 0xffffc
152; SI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], [[OFFSET]]
153; CI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x3ffff
154; VIGFX9: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0xffffc
155define amdgpu_ps void @smrd_load_const3(<4 x i32> addrspace(4)* inreg %arg, <4 x i32> addrspace(4)* inreg %arg1, <32 x i8> addrspace(4)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
156main_body:
157  %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %arg, i32 0
158  %tmp20 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp
159  %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 1048572)
160  call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %tmp21, i1 true, i1 true) #0
161  ret void
162}
163
164; SMRD load with an offset greater than the largest possible immediate on VI
165; GCN-LABEL: {{^}}smrd_load_const4:
166; SIVIGFX9: s_mov_b32 [[OFFSET:s[0-9]+]], 0x100000
167; SIVIGFX9: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], [[OFFSET]]
168; CI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x40000
169; GCN: s_endpgm
170define amdgpu_ps void @smrd_load_const4(<4 x i32> addrspace(4)* inreg %arg, <4 x i32> addrspace(4)* inreg %arg1, <32 x i8> addrspace(4)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
171main_body:
172  %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %arg, i32 0
173  %tmp20 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp
174  %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 1048576)
175  call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %tmp21, i1 true, i1 true) #0
176  ret void
177}
178
179; GCN-LABEL: {{^}}smrd_sgpr_offset:
180; GCN: s_buffer_load_dword s{{[0-9]}}, s[0:3], s4
181define amdgpu_ps float @smrd_sgpr_offset(<4 x i32> inreg %desc, i32 inreg %offset) #0 {
182main_body:
183  %r = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 %offset)
184  ret float %r
185}
186
187; GCN-LABEL: {{^}}smrd_vgpr_offset:
188; GCN: buffer_load_dword v{{[0-9]}}, v0, s[0:3], 0 offen ;
189define amdgpu_ps float @smrd_vgpr_offset(<4 x i32> inreg %desc, i32 %offset) #0 {
190main_body:
191  %r = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 %offset)
192  ret float %r
193}
194
195; GCN-LABEL: {{^}}smrd_vgpr_offset_imm:
196; GCN-NEXT: %bb.
197; GCN-NEXT: buffer_load_dword v{{[0-9]}}, v0, s[0:3], 0 offen offset:4095 ;
198define amdgpu_ps float @smrd_vgpr_offset_imm(<4 x i32> inreg %desc, i32 %offset) #0 {
199main_body:
200  %off = add i32 %offset, 4095
201  %r = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 %off)
202  ret float %r
203}
204
205; GCN-LABEL: {{^}}smrd_vgpr_offset_imm_too_large:
206; GCN-NEXT: %bb.
207; GCN-NEXT: v_add_{{i|u}}32_e32 v0, {{(vcc, )?}}0x1000, v0
208; GCN-NEXT: buffer_load_dword v{{[0-9]}}, v0, s[0:3], 0 offen ;
209define amdgpu_ps float @smrd_vgpr_offset_imm_too_large(<4 x i32> inreg %desc, i32 %offset) #0 {
210main_body:
211  %off = add i32 %offset, 4096
212  %r = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 %off)
213  ret float %r
214}
215
216; GCN-LABEL: {{^}}smrd_imm_merged:
217; GCN-NEXT: %bb.
218; SICI-NEXT: s_buffer_load_dwordx4 s[{{[0-9]}}:{{[0-9]}}], s[0:3], 0x1
219; SICI-NEXT: s_buffer_load_dwordx2 s[{{[0-9]}}:{{[0-9]}}], s[0:3], 0x7
220; VIGFX9-NEXT: s_buffer_load_dwordx4 s[{{[0-9]}}:{{[0-9]}}], s[0:3], 0x4
221; VIGFX9-NEXT: s_buffer_load_dwordx2 s[{{[0-9]}}:{{[0-9]}}], s[0:3], 0x1c
222define amdgpu_ps void @smrd_imm_merged(<4 x i32> inreg %desc) #0 {
223main_body:
224  %r1 = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 4)
225  %r2 = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 8)
226  %r3 = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 12)
227  %r4 = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 16)
228  %r5 = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 28)
229  %r6 = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 32)
230  call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %r1, float %r2, float %r3, float %r4, i1 true, i1 true) #0
231  call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %r5, float %r6, float undef, float undef, i1 true, i1 true) #0
232  ret void
233}
234
235; GCN-LABEL: {{^}}smrd_imm_merge_m0:
236;
237; SICIVI: s_buffer_load_dwordx2
238; SICIVI: s_mov_b32 m0
239; SICIVI_DAG: v_interp_p1_f32
240; SICIVI_DAG: v_interp_p1_f32
241; SICIVI_DAG: v_interp_p1_f32
242; SICIVI_DAG: v_interp_p2_f32
243; SICIVI_DAG: v_interp_p2_f32
244; SICIVI_DAG: v_interp_p2_f32
245; SICIVI: s_mov_b32 m0
246; SICIVI: v_movrels_b32_e32
247;
248; Merging is still thwarted on GFX9 due to s_set_gpr_idx
249;
250; GFX9: s_buffer_load_dword
251; GFX9: s_buffer_load_dword
252define amdgpu_ps float @smrd_imm_merge_m0(<4 x i32> inreg %desc, i32 inreg %prim, float %u, float %v) #0 {
253main_body:
254  %idx1.f = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 0)
255  %idx1 = bitcast float %idx1.f to i32
256
257  %v0.x1 = call nsz float @llvm.amdgcn.interp.p1(float %u, i32 0, i32 0, i32 %prim)
258  %v0.x = call nsz float @llvm.amdgcn.interp.p2(float %v0.x1, float %v, i32 0, i32 0, i32 %prim)
259  %v0.y1 = call nsz float @llvm.amdgcn.interp.p1(float %u, i32 0, i32 1, i32 %prim)
260  %v0.y = call nsz float @llvm.amdgcn.interp.p2(float %v0.y1, float %v, i32 0, i32 1, i32 %prim)
261  %v0.z1 = call nsz float @llvm.amdgcn.interp.p1(float %u, i32 0, i32 2, i32 %prim)
262  %v0.z = call nsz float @llvm.amdgcn.interp.p2(float %v0.z1, float %v, i32 0, i32 2, i32 %prim)
263  %v0.tmp0 = insertelement <3 x float> undef, float %v0.x, i32 0
264  %v0.tmp1 = insertelement <3 x float> %v0.tmp0, float %v0.y, i32 1
265  %v0 = insertelement <3 x float> %v0.tmp1, float %v0.z, i32 2
266  %a = extractelement <3 x float> %v0, i32 %idx1
267
268  %v1.x1 = call nsz float @llvm.amdgcn.interp.p1(float %u, i32 1, i32 0, i32 %prim)
269  %v1.x = call nsz float @llvm.amdgcn.interp.p2(float %v1.x1, float %v, i32 1, i32 0, i32 %prim)
270  %v1.y1 = call nsz float @llvm.amdgcn.interp.p1(float %u, i32 1, i32 1, i32 %prim)
271  %v1.y = call nsz float @llvm.amdgcn.interp.p2(float %v1.y1, float %v, i32 1, i32 1, i32 %prim)
272  %v1.z1 = call nsz float @llvm.amdgcn.interp.p1(float %u, i32 1, i32 2, i32 %prim)
273  %v1.z = call nsz float @llvm.amdgcn.interp.p2(float %v1.z1, float %v, i32 1, i32 2, i32 %prim)
274  %v1.tmp0 = insertelement <3 x float> undef, float %v0.x, i32 0
275  %v1.tmp1 = insertelement <3 x float> %v0.tmp0, float %v0.y, i32 1
276  %v1 = insertelement <3 x float> %v0.tmp1, float %v0.z, i32 2
277
278  %b = extractelement <3 x float> %v1, i32 %idx1
279  %c = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 4)
280
281  %res.tmp = fadd float %a, %b
282  %res = fadd float %res.tmp, %c
283  ret float %res
284}
285
286; GCN-LABEL: {{^}}smrd_vgpr_merged:
287; GCN-NEXT: %bb.
288; GCN-NEXT: buffer_load_dwordx4 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:4
289; GCN-NEXT: buffer_load_dwordx2 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:28
290define amdgpu_ps void @smrd_vgpr_merged(<4 x i32> inreg %desc, i32 %a) #0 {
291main_body:
292  %a1 = add i32 %a, 4
293  %a2 = add i32 %a, 8
294  %a3 = add i32 %a, 12
295  %a4 = add i32 %a, 16
296  %a5 = add i32 %a, 28
297  %a6 = add i32 %a, 32
298  %r1 = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 %a1)
299  %r2 = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 %a2)
300  %r3 = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 %a3)
301  %r4 = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 %a4)
302  %r5 = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 %a5)
303  %r6 = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 %a6)
304  call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %r1, float %r2, float %r3, float %r4, i1 true, i1 true) #0
305  call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %r5, float %r6, float undef, float undef, i1 true, i1 true) #0
306  ret void
307}
308
309; GCN-LABEL: {{^}}smrd_sgpr_descriptor_promoted
310; GCN: v_readfirstlane
311define amdgpu_cs void @smrd_sgpr_descriptor_promoted([0 x i8] addrspace(4)* inreg noalias dereferenceable(18446744073709551615), i32) #0 {
312main_body:
313  %descptr = bitcast [0 x i8] addrspace(4)* %0 to <4 x i32> addrspace(4)*, !amdgpu.uniform !0
314  br label %.outer_loop_header
315
316ret_block:                                       ; preds = %.outer, %.label22, %main_body
317  ret void
318
319.outer_loop_header:
320  br label %.inner_loop_header
321
322.inner_loop_header:                                     ; preds = %.inner_loop_body, %.outer_loop_header
323  %loopctr.1 = phi i32 [ 0, %.outer_loop_header ], [ %loopctr.2, %.inner_loop_body ]
324  %loopctr.2 = add i32 %loopctr.1, 1
325  %inner_br1 = icmp slt i32 %loopctr.2, 10
326  br i1 %inner_br1, label %.inner_loop_body, label %ret_block
327
328.inner_loop_body:
329  %descriptor = load <4 x i32>, <4 x i32> addrspace(4)* %descptr, align 16, !invariant.load !0
330  %load1result = call float @llvm.SI.load.const.v4i32(<4 x i32> %descriptor, i32 0)
331  store float %load1result, float addrspace(1)* undef
332  %inner_br2 = icmp uge i32 %1, 10
333  br i1 %inner_br2, label %.inner_loop_header, label %.outer_loop_body
334
335.outer_loop_body:
336  %offset = shl i32 %loopctr.2, 6
337  %load2result = call float @llvm.SI.load.const.v4i32(<4 x i32> %descriptor, i32 %offset)
338  %outer_br = fcmp ueq float %load2result, 0x0
339  br i1 %outer_br, label %.outer_loop_header, label %ret_block
340}
341
342declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
343declare float @llvm.SI.load.const.v4i32(<4 x i32>, i32) #1
344declare float @llvm.amdgcn.interp.p1(float, i32, i32, i32) #2
345declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32) #2
346
347attributes #0 = { nounwind }
348attributes #1 = { nounwind readnone }
349attributes #2 = { nounwind readnone speculatable }
350
351!0 = !{}
352