1; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefixes=CHECK,NOHSA %s
2; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -check-prefixes=CHECK,HSA %s
3; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefixes=CHECK,HSA %s
4
5; RUN: llc -global-isel -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefixes=CHECK,NOHSA %s
6; RUN: llc -global-isel -mtriple=amdgcn--amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -check-prefixes=CHECK,HSA %s
7; RUN: llc -global-isel -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefixes=CHECK,HSA %s
8
9@lds0 = addrspace(3) global [512 x float] undef, align 4
10@lds1 = addrspace(3) global [256 x float] undef, align 4
11
12@large = addrspace(3) global [4096 x i32] undef, align 4
13
14; CHECK-LABEL: {{^}}groupstaticsize_test0:
15; NOHSA: v_mov_b32_e32 v{{[0-9]+}}, llvm.amdgcn.groupstaticsize@abs32@lo
16; HSA: v_mov_b32_e32 v{{[0-9]+}}, 0x800{{$}}
17define amdgpu_kernel void @groupstaticsize_test0(float addrspace(1)* %out, i32 addrspace(1)* %lds_size) #0 {
18  %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
19  %idx.0 = add nsw i32 %tid.x, 64
20  %static_lds_size = call i32 @llvm.amdgcn.groupstaticsize() #1
21  store i32 %static_lds_size, i32 addrspace(1)* %lds_size, align 4
22  %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
23  %val0 = load float, float addrspace(3)* %arrayidx0, align 4
24  store float %val0, float addrspace(1)* %out, align 4
25
26  ret void
27}
28
29; CHECK-LABEL: {{^}}groupstaticsize_test1:
30; NOHSA: v_mov_b32_e32 v{{[0-9]+}}, llvm.amdgcn.groupstaticsize@abs32@lo
31; HSA: v_mov_b32_e32 v{{[0-9]+}}, 0xc00{{$}}
32define amdgpu_kernel void @groupstaticsize_test1(float addrspace(1)* %out, i32 %cond, i32 addrspace(1)* %lds_size) {
33entry:
34  %static_lds_size = call i32 @llvm.amdgcn.groupstaticsize() #1
35  store i32 %static_lds_size, i32 addrspace(1)* %lds_size, align 4
36  %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
37  %idx.0 = add nsw i32 %tid.x, 64
38  %tmp = icmp eq i32 %cond, 0
39  br i1 %tmp, label %if, label %else
40
41if:                                               ; preds = %entry
42  %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
43  %val0 = load float, float addrspace(3)* %arrayidx0, align 4
44  store float %val0, float addrspace(1)* %out, align 4
45  br label %endif
46
47else:                                             ; preds = %entry
48  %arrayidx1 = getelementptr inbounds [256 x float], [256 x float] addrspace(3)* @lds1, i32 0, i32 %idx.0
49  %val1 = load float, float addrspace(3)* %arrayidx1, align 4
50  store float %val1, float addrspace(1)* %out, align 4
51  br label %endif
52
53endif:                                            ; preds = %else, %if
54  ret void
55}
56
57; Exceeds 16-bit simm limit of s_movk_i32
58; CHECK-LABEL: {{^}}large_groupstaticsize:
59; NOHSA: v_mov_b32_e32 v{{[0-9]+}}, llvm.amdgcn.groupstaticsize@abs32@lo
60; HSA: v_mov_b32_e32 [[REG:v[0-9]+]], 0x4000{{$}}
61define amdgpu_kernel void @large_groupstaticsize(i32 addrspace(1)* %size, i32 %idx) #0 {
62  %gep = getelementptr inbounds [4096 x i32], [4096 x i32] addrspace(3)* @large, i32 0, i32 %idx
63  store volatile i32 0, i32 addrspace(3)* %gep
64  %static_lds_size = call i32 @llvm.amdgcn.groupstaticsize()
65  store i32 %static_lds_size, i32 addrspace(1)* %size
66  ret void
67}
68
69declare i32 @llvm.amdgcn.groupstaticsize() #1
70declare i32 @llvm.amdgcn.workitem.id.x() #1
71
72attributes #0 = { nounwind }
73attributes #1 = { nounwind readnone }
74