1; FIXME: The si scheduler crashes if when lane mask tracking is enabled, so
2; we need to disable this when the si scheduler is being used.
3; The only way the subtarget knows that the si machine scheduler is being used
4; is to specify -mattr=si-scheduler.  If we just pass --misched=si, the backend
5; won't know what scheduler we are using.
6; RUN: llc -march=amdgcn --misched=si -mattr=si-scheduler < %s | FileCheck %s
7
8; The test checks the "si" machine scheduler pass works correctly.
9
10; CHECK-LABEL: {{^}}main:
11; CHECK: s_wqm
12; CHECK: s_load_dwordx4
13; CHECK: s_load_dwordx8
14; CHECK: s_waitcnt lgkmcnt(0)
15; CHECK: image_sample
16; CHECK: s_waitcnt vmcnt(0)
17; CHECK: exp
18; CHECK: s_endpgm
19define amdgpu_ps void @main([6 x <16 x i8>] addrspace(4)* byval %arg, [17 x <16 x i8>] addrspace(4)* byval %arg1, [17 x <4 x i32>] addrspace(4)* byval %arg2, [34 x <8 x i32>] addrspace(4)* byval %arg3, float inreg %arg4, i32 inreg %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <3 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, <2 x i32> %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, i32 %arg19, float %arg20, float %arg21) #0 {
20main_body:
21  %tmp = bitcast [34 x <8 x i32>] addrspace(4)* %arg3 to <32 x i8> addrspace(4)*
22  %tmp22 = load <32 x i8>, <32 x i8> addrspace(4)* %tmp, align 32, !tbaa !0
23  %tmp23 = bitcast [17 x <4 x i32>] addrspace(4)* %arg2 to <16 x i8> addrspace(4)*
24  %tmp24 = load <16 x i8>, <16 x i8> addrspace(4)* %tmp23, align 16, !tbaa !0
25  %i.i = extractelement <2 x i32> %arg11, i32 0
26  %j.i = extractelement <2 x i32> %arg11, i32 1
27  %i.f.i = bitcast i32 %i.i to float
28  %j.f.i = bitcast i32 %j.i to float
29  %p1.i = call float @llvm.amdgcn.interp.p1(float %i.f.i, i32 0, i32 0, i32 %arg5) #1
30  %p2.i = call float @llvm.amdgcn.interp.p2(float %p1.i, float %j.f.i, i32 0, i32 0, i32 %arg5) #1
31  %i.i1 = extractelement <2 x i32> %arg11, i32 0
32  %j.i2 = extractelement <2 x i32> %arg11, i32 1
33  %i.f.i3 = bitcast i32 %i.i1 to float
34  %j.f.i4 = bitcast i32 %j.i2 to float
35  %p1.i5 = call float @llvm.amdgcn.interp.p1(float %i.f.i3, i32 1, i32 0, i32 %arg5) #1
36  %p2.i6 = call float @llvm.amdgcn.interp.p2(float %p1.i5, float %j.f.i4, i32 1, i32 0, i32 %arg5) #1
37  %tmp22.bc = bitcast <32 x i8> %tmp22 to <8 x i32>
38  %tmp24.bc = bitcast <16 x i8> %tmp24 to <4 x i32>
39  %tmp31 = call <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32 15, float %p2.i, float %p2.i6, <8 x i32> %tmp22.bc, <4 x i32> %tmp24.bc, i1 0, i32 0, i32 0)
40
41  %tmp32 = extractelement <4 x float> %tmp31, i32 0
42  %tmp33 = extractelement <4 x float> %tmp31, i32 1
43  %tmp34 = extractelement <4 x float> %tmp31, i32 2
44  %tmp35 = extractelement <4 x float> %tmp31, i32 3
45  %tmp36 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %tmp32, float %tmp33)
46  %tmp38 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %tmp34, float %tmp35)
47  call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> %tmp36, <2 x half> %tmp38, i1 true, i1 false) #0
48  ret void
49}
50
51declare float @llvm.amdgcn.interp.p1(float, i32, i32, i32) #1
52declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32) #1
53declare void @llvm.amdgcn.exp.compr.v2f16(i32, i32, <2 x half>, <2 x half>, i1, i1) #0
54declare <2 x half> @llvm.amdgcn.cvt.pkrtz(float, float) #1
55declare <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #2
56
57attributes #0 = { nounwind }
58attributes #1 = { nounwind readnone }
59attributes #2 = { nounwind readonly }
60
61!0 = !{!1, !1, i64 0, i32 1}
62!1 = !{!"const", !2}
63!2 = !{!"tbaa root"}
64