1; RUN: opt -mtriple=amdgcn--amdhsa -data-layout=A5 -O3 -S -inline-threshold=1 < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-INL1 %s
2; RUN: opt -mtriple=amdgcn--amdhsa -data-layout=A5 -O3 -S < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-INLDEF %s
3
4define coldcc float @foo(float %x, float %y) {
5entry:
6  %cmp = fcmp ogt float %x, 0.000000e+00
7  %div = fdiv float %y, %x
8  %mul = fmul float %x, %y
9  %cond = select i1 %cmp, float %div, float %mul
10  ret float %cond
11}
12
13define coldcc void @foo_private_ptr(float addrspace(5)* nocapture %p) {
14entry:
15  %tmp1 = load float, float addrspace(5)* %p, align 4
16  %cmp = fcmp ogt float %tmp1, 1.000000e+00
17  br i1 %cmp, label %if.then, label %if.end
18
19if.then:                                          ; preds = %entry
20  %div = fdiv float 1.000000e+00, %tmp1
21  store float %div, float addrspace(5)* %p, align 4
22  br label %if.end
23
24if.end:                                           ; preds = %if.then, %entry
25  ret void
26}
27
28define coldcc void @foo_private_ptr2(float addrspace(5)* nocapture %p1, float addrspace(5)* nocapture %p2) {
29entry:
30  %tmp1 = load float, float addrspace(5)* %p1, align 4
31  %cmp = fcmp ogt float %tmp1, 1.000000e+00
32  br i1 %cmp, label %if.then, label %if.end
33
34if.then:
35  %div = fdiv float 2.000000e+00, %tmp1
36  store float %div, float addrspace(5)* %p2, align 4
37  br label %if.end
38
39if.end:
40  ret void
41}
42
43define coldcc float @sin_wrapper(float %x) {
44bb:
45  %call = tail call float @_Z3sinf(float %x)
46  ret float %call
47}
48
49define void @foo_noinline(float addrspace(5)* nocapture %p) #0 {
50entry:
51  %tmp1 = load float, float addrspace(5)* %p, align 4
52  %mul = fmul float %tmp1, 2.000000e+00
53  store float %mul, float addrspace(5)* %p, align 4
54  ret void
55}
56
57; GCN: define amdgpu_kernel void @test_inliner(
58; GCN-INL1:   %c1 = tail call coldcc float @foo(
59; GCN-INLDEF: %cmp.i = fcmp ogt float %tmp2, 0.000000e+00
60; GCN:        %div.i{{[0-9]*}} = fdiv float 1.000000e+00, %c
61; GCN:        %div.i{{[0-9]*}} = fdiv float 2.000000e+00, %tmp1.i
62; GCN:        call void @foo_noinline(
63; GCN:        tail call float @_Z3sinf(
64define amdgpu_kernel void @test_inliner(float addrspace(1)* nocapture %a, i32 %n) {
65entry:
66  %pvt_arr = alloca [64 x float], align 4, addrspace(5)
67  %tid = tail call i32 @llvm.amdgcn.workitem.id.x()
68  %arrayidx = getelementptr inbounds float, float addrspace(1)* %a, i32 %tid
69  %tmp2 = load float, float addrspace(1)* %arrayidx, align 4
70  %add = add i32 %tid, 1
71  %arrayidx2 = getelementptr inbounds float, float addrspace(1)* %a, i32 %add
72  %tmp5 = load float, float addrspace(1)* %arrayidx2, align 4
73  %c1 = tail call coldcc float @foo(float %tmp2, float %tmp5)
74  %or = or i32 %tid, %n
75  %arrayidx5 = getelementptr inbounds [64 x float], [64 x float] addrspace(5)* %pvt_arr, i32 0, i32 %or
76  store float %c1, float addrspace(5)* %arrayidx5, align 4
77  %arrayidx7 = getelementptr inbounds [64 x float], [64 x float] addrspace(5)* %pvt_arr, i32 0, i32 %or
78  call coldcc void @foo_private_ptr(float addrspace(5)* %arrayidx7)
79  %arrayidx8 = getelementptr inbounds [64 x float], [64 x float] addrspace(5)* %pvt_arr, i32 0, i32 1
80  %arrayidx9 = getelementptr inbounds [64 x float], [64 x float] addrspace(5)* %pvt_arr, i32 0, i32 2
81  call coldcc void @foo_private_ptr2(float addrspace(5)* %arrayidx8, float addrspace(5)* %arrayidx9)
82  call void @foo_noinline(float addrspace(5)* %arrayidx7)
83  %and = and i32 %tid, %n
84  %arrayidx11 = getelementptr inbounds [64 x float], [64 x float] addrspace(5)* %pvt_arr, i32 0, i32 %and
85  %tmp12 = load float, float addrspace(5)* %arrayidx11, align 4
86  %c2 = call coldcc float @sin_wrapper(float %tmp12)
87  store float %c2, float addrspace(5)* %arrayidx7, align 4
88  %xor = xor i32 %tid, %n
89  %arrayidx16 = getelementptr inbounds [64 x float], [64 x float] addrspace(5)* %pvt_arr, i32 0, i32 %xor
90  %tmp16 = load float, float addrspace(5)* %arrayidx16, align 4
91  store float %tmp16, float addrspace(1)* %arrayidx, align 4
92  ret void
93}
94
95; GCN: define amdgpu_kernel void @test_inliner_multi_pvt_ptr(
96; GCN: %div.i{{[0-9]*}} = fdiv float 2.000000e+00, %tmp1.i
97define amdgpu_kernel void @test_inliner_multi_pvt_ptr(float addrspace(1)* nocapture %a, i32 %n, float %v) {
98entry:
99  %pvt_arr1 = alloca [32 x float], align 4, addrspace(5)
100  %pvt_arr2 = alloca [32 x float], align 4, addrspace(5)
101  %tid = tail call i32 @llvm.amdgcn.workitem.id.x()
102  %arrayidx = getelementptr inbounds float, float addrspace(1)* %a, i32 %tid
103  %or = or i32 %tid, %n
104  %arrayidx4 = getelementptr inbounds [32 x float], [32 x float] addrspace(5)* %pvt_arr1, i32 0, i32 %or
105  %arrayidx5 = getelementptr inbounds [32 x float], [32 x float] addrspace(5)* %pvt_arr2, i32 0, i32 %or
106  store float %v, float addrspace(5)* %arrayidx4, align 4
107  store float %v, float addrspace(5)* %arrayidx5, align 4
108  %arrayidx8 = getelementptr inbounds [32 x float], [32 x float] addrspace(5)* %pvt_arr1, i32 0, i32 1
109  %arrayidx9 = getelementptr inbounds [32 x float], [32 x float] addrspace(5)* %pvt_arr2, i32 0, i32 2
110  call coldcc void @foo_private_ptr2(float addrspace(5)* %arrayidx8, float addrspace(5)* %arrayidx9)
111  %xor = xor i32 %tid, %n
112  %arrayidx15 = getelementptr inbounds [32 x float], [32 x float] addrspace(5)* %pvt_arr1, i32 0, i32 %xor
113  %arrayidx16 = getelementptr inbounds [32 x float], [32 x float] addrspace(5)* %pvt_arr2, i32 0, i32 %xor
114  %tmp15 = load float, float addrspace(5)* %arrayidx15, align 4
115  %tmp16 = load float, float addrspace(5)* %arrayidx16, align 4
116  %tmp17 = fadd float %tmp15, %tmp16
117  store float %tmp17, float addrspace(1)* %arrayidx, align 4
118  ret void
119}
120
121; GCN: define amdgpu_kernel void @test_inliner_multi_pvt_ptr_cutoff(
122; GCN-INL1:   call coldcc void @foo_private_ptr2
123; GCN-INLDEF: %div.i{{[0-9]*}} = fdiv float 2.000000e+00, %tmp1.i
124define amdgpu_kernel void @test_inliner_multi_pvt_ptr_cutoff(float addrspace(1)* nocapture %a, i32 %n, float %v) {
125entry:
126  %pvt_arr1 = alloca [32 x float], align 4, addrspace(5)
127  %pvt_arr2 = alloca [33 x float], align 4, addrspace(5)
128  %tid = tail call i32 @llvm.amdgcn.workitem.id.x()
129  %arrayidx = getelementptr inbounds float, float addrspace(1)* %a, i32 %tid
130  %or = or i32 %tid, %n
131  %arrayidx4 = getelementptr inbounds [32 x float], [32 x float] addrspace(5)* %pvt_arr1, i32 0, i32 %or
132  %arrayidx5 = getelementptr inbounds [33 x float], [33 x float] addrspace(5)* %pvt_arr2, i32 0, i32 %or
133  store float %v, float addrspace(5)* %arrayidx4, align 4
134  store float %v, float addrspace(5)* %arrayidx5, align 4
135  %arrayidx8 = getelementptr inbounds [32 x float], [32 x float] addrspace(5)* %pvt_arr1, i32 0, i32 1
136  %arrayidx9 = getelementptr inbounds [33 x float], [33 x float] addrspace(5)* %pvt_arr2, i32 0, i32 2
137  call coldcc void @foo_private_ptr2(float addrspace(5)* %arrayidx8, float addrspace(5)* %arrayidx9)
138  %xor = xor i32 %tid, %n
139  %arrayidx15 = getelementptr inbounds [32 x float], [32 x float] addrspace(5)* %pvt_arr1, i32 0, i32 %xor
140  %arrayidx16 = getelementptr inbounds [33 x float], [33 x float] addrspace(5)* %pvt_arr2, i32 0, i32 %xor
141  %tmp15 = load float, float addrspace(5)* %arrayidx15, align 4
142  %tmp16 = load float, float addrspace(5)* %arrayidx16, align 4
143  %tmp17 = fadd float %tmp15, %tmp16
144  store float %tmp17, float addrspace(1)* %arrayidx, align 4
145  ret void
146}
147
148declare i32 @llvm.amdgcn.workitem.id.x() #1
149declare float @_Z3sinf(float) #1
150
151attributes #0 = { noinline }
152attributes #1 = { nounwind readnone }
153