1; RUN: opt -mtriple=amdgcn-- -analyze -divergence -use-gpu-divergence-analysis %s | FileCheck %s
2
3; CHECK: DIVERGENT: %swizzle = call i32 @llvm.amdgcn.ds.swizzle(i32 %src, i32 100) #0
4define amdgpu_kernel void @ds_swizzle(i32 addrspace(1)* %out, i32 %src) #0 {
5  %swizzle = call i32 @llvm.amdgcn.ds.swizzle(i32 %src, i32 100) #0
6  store i32 %swizzle, i32 addrspace(1)* %out, align 4
7  ret void
8}
9
10; CHECK: DIVERGENT: %v = call i32 @llvm.amdgcn.permlane16(i32 %src0, i32 %src0, i32 %src1, i32 %src2, i1 false, i1 false) #0
11define amdgpu_kernel void @v_permlane16_b32(i32 addrspace(1)* %out, i32 %src0, i32 %src1, i32 %src2) #0 {
12  %v = call i32 @llvm.amdgcn.permlane16(i32 %src0, i32 %src0, i32 %src1, i32 %src2, i1 false, i1 false) #0
13  store i32 %v, i32 addrspace(1)* %out
14  ret void
15}
16
17; CHECK: DIVERGENT: %v = call i32 @llvm.amdgcn.permlanex16(i32 %src0, i32 %src0, i32 %src1, i32 %src2, i1 false, i1 false) #0
18define amdgpu_kernel void @v_permlanex16_b32(i32 addrspace(1)* %out, i32 %src0, i32 %src1, i32 %src2) #0 {
19  %v = call i32 @llvm.amdgcn.permlanex16(i32 %src0, i32 %src0, i32 %src1, i32 %src2, i1 false, i1 false) #0
20  store i32 %v, i32 addrspace(1)* %out
21  ret void
22}
23
24; CHECK: DIVERGENT: %tmp0 = call i32 @llvm.amdgcn.update.dpp.i32(i32 %in1, i32 %in2, i32 1, i32 1, i32 1, i1 false) #0
25define amdgpu_kernel void @update_dpp(i32 addrspace(1)* %out, i32 %in1, i32 %in2) #0 {
26  %tmp0 = call i32 @llvm.amdgcn.update.dpp.i32(i32 %in1, i32 %in2, i32 1, i32 1, i32 1, i1 false) #0
27  store i32 %tmp0, i32 addrspace(1)* %out
28  ret void
29}
30
31; CHECK: DIVERGENT: %tmp0 = call i32 @llvm.amdgcn.mov.dpp.i32(i32 %in, i32 1, i32 1, i32 1, i1 true) #0
32define amdgpu_kernel void @mov_dpp(i32 addrspace(1)* %out, i32 %in) #0 {
33  %tmp0 = call i32 @llvm.amdgcn.mov.dpp.i32(i32 %in, i32 1, i32 1, i32 1, i1 true) #0
34  store i32 %tmp0, i32 addrspace(1)* %out
35  ret void
36}
37
38; CHECK: DIVERGENT: %tmp0 = call i32 @llvm.amdgcn.mov.dpp8.i32(i32 %in, i32 1) #0
39define amdgpu_kernel void @mov_dpp8(i32 addrspace(1)* %out, i32 %in) #0 {
40  %tmp0 = call i32 @llvm.amdgcn.mov.dpp8.i32(i32 %in, i32 1) #0
41  store i32 %tmp0, i32 addrspace(1)* %out
42  ret void
43}
44
45; CHECK: DIVERGENT: %tmp0 = call i32 @llvm.amdgcn.writelane(i32 0, i32 1, i32 2)
46define amdgpu_kernel void @writelane(i32 addrspace(1)* %out) #0 {
47  %tmp0 = call i32 @llvm.amdgcn.writelane(i32 0, i32 1, i32 2)
48  store i32 %tmp0, i32 addrspace(1)* %out
49  ret void
50}
51
52declare i32 @llvm.amdgcn.ds.swizzle(i32, i32) #1
53declare i32 @llvm.amdgcn.permlane16(i32, i32, i32, i32, i1, i1) #1
54declare i32 @llvm.amdgcn.permlanex16(i32, i32, i32, i32, i1, i1) #1
55declare i32 @llvm.amdgcn.mov.dpp.i32(i32, i32, i32, i32, i1) #1
56declare i32 @llvm.amdgcn.mov.dpp8.i32(i32, i32) #1
57declare i32 @llvm.amdgcn.update.dpp.i32(i32, i32, i32, i32, i32, i1) #1
58declare i32 @llvm.amdgcn.writelane(i32, i32, i32) #1
59
60attributes #0 = { nounwind convergent }
61attributes #1 = { nounwind readnone convergent }
62