1; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,MOVREL,PREGFX9 %s
2; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,MOVREL,PREGFX9 %s
3; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -amdgpu-vgpr-index-mode -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,IDXMODE,PREGFX9 %s
4
5; Tests for indirect addressing on SI, which is implemented using dynamic
6; indexing of vectors.
7
8; Subtest below moved from file test/CodeGen/AMDGPU/indirect-addressing-si.ll
9; to avoid gfx9 scheduling induced issues.
10
11
12; GCN-LABEL: {{^}}insert_vgpr_offset_multiple_in_block:
13; GCN-DAG: s_load_dwordx16 s{{\[}}[[S_ELT0:[0-9]+]]:[[S_ELT15:[0-9]+]]{{\]}}
14; GCN-DAG: {{buffer|flat|global}}_load_dword [[IDX0:v[0-9]+]]
15; GCN-DAG: v_mov_b32 [[INS0:v[0-9]+]], 62
16
17; GCN-DAG: v_mov_b32_e32 v[[VEC_ELT15:[0-9]+]], s[[S_ELT15]]
18; GCN-DAG: v_mov_b32_e32 v[[VEC_ELT0:[0-9]+]], s[[S_ELT0]]
19
20; GCN-DAG: v_add_{{i32|u32}}_e32 [[IDX1:v[0-9]+]], vcc, 1, [[IDX0]]
21
22; GCN: v_cmp_eq_u32_e32
23; GCN-COUNT-32: v_cndmask_b32
24
25; GCN-COUNT-4: buffer_store_dwordx4
26define amdgpu_kernel void @insert_vgpr_offset_multiple_in_block(<16 x i32> addrspace(1)* %out0, <16 x i32> addrspace(1)* %out1, i32 addrspace(1)* %in, <16 x i32> %vec0) #0 {
27entry:
28  %id = call i32 @llvm.amdgcn.workitem.id.x() #1
29  %id.ext = zext i32 %id to i64
30  %gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %id.ext
31  %idx0 = load volatile i32, i32 addrspace(1)* %gep
32  %idx1 = add i32 %idx0, 1
33  %live.out.val = call i32 asm sideeffect "v_mov_b32 $0, 62", "=v"()
34  %vec1 = insertelement <16 x i32> %vec0, i32 %live.out.val, i32 %idx0
35  %vec2 = insertelement <16 x i32> %vec1, i32 63, i32 %idx1
36  store volatile <16 x i32> %vec2, <16 x i32> addrspace(1)* %out0
37  %cmp = icmp eq i32 %id, 0
38  br i1 %cmp, label %bb1, label %bb2
39
40bb1:
41  store volatile i32 %live.out.val, i32 addrspace(1)* undef
42  br label %bb2
43
44bb2:
45  ret void
46}
47
48declare i32 @llvm.amdgcn.workitem.id.x() #1
49declare void @llvm.amdgcn.s.barrier() #2
50
51attributes #0 = { nounwind }
52attributes #1 = { nounwind readnone }
53attributes #2 = { nounwind convergent }
54