1; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefixes=SI,FUNC,GFX7 %s
2; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=SI,FUNC,GFX8 %s
3
4; On Southern Islands GPUs the local address space(3) uses 32-bit pointers and
5; the global address space(1) uses 64-bit pointers.  These tests check to make sure
6; the correct pointer size is used for the local address space.
7
8; The e{{32|64}} suffix on the instructions refers to the encoding size and not
9; the size of the operands.  The operand size is denoted in the instruction name.
10; Instructions with B32, U32, and I32 in their name take 32-bit operands, while
11; instructions with B64, U64, and I64 take 64-bit operands.
12
13; FUNC-LABEL: {{^}}local_address_load:
14; SI: v_mov_b32_e{{32|64}} [[PTR:v[0-9]]]
15; SI: ds_read_b32 v{{[0-9]+}}, [[PTR]]
16define amdgpu_kernel void @local_address_load(i32 addrspace(1)* %out, i32 addrspace(3)* %in) {
17entry:
18  %0 = load i32, i32 addrspace(3)* %in
19  store i32 %0, i32 addrspace(1)* %out
20  ret void
21}
22
23; FUNC-LABEL: {{^}}local_address_gep:
24; SI: s_add_i32 [[SPTR:s[0-9]]]
25; SI: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
26; SI: ds_read_b32 [[VPTR]]
27define amdgpu_kernel void @local_address_gep(i32 addrspace(1)* %out, i32 addrspace(3)* %in, i32 %offset) {
28entry:
29  %0 = getelementptr i32, i32 addrspace(3)* %in, i32 %offset
30  %1 = load i32, i32 addrspace(3)* %0
31  store i32 %1, i32 addrspace(1)* %out
32  ret void
33}
34
35; FUNC-LABEL: {{^}}local_address_gep_const_offset:
36; SI: v_mov_b32_e32 [[VPTR:v[0-9]+]], s{{[0-9]+}}
37; SI: ds_read_b32 v{{[0-9]+}}, [[VPTR]] offset:4
38define amdgpu_kernel void @local_address_gep_const_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %in) {
39entry:
40  %0 = getelementptr i32, i32 addrspace(3)* %in, i32 1
41  %1 = load i32, i32 addrspace(3)* %0
42  store i32 %1, i32 addrspace(1)* %out
43  ret void
44}
45
46; Offset too large, can't fold into 16-bit immediate offset.
47; FUNC-LABEL: {{^}}local_address_gep_large_const_offset:
48; SI: s_add_i32 [[SPTR:s[0-9]]], s{{[0-9]+}}, 0x10004
49; SI: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
50; SI: ds_read_b32 [[VPTR]]
51define amdgpu_kernel void @local_address_gep_large_const_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %in) {
52entry:
53  %0 = getelementptr i32, i32 addrspace(3)* %in, i32 16385
54  %1 = load i32, i32 addrspace(3)* %0
55  store i32 %1, i32 addrspace(1)* %out
56  ret void
57}
58
59; FUNC-LABEL: {{^}}null_32bit_lds_ptr:
60; GFX7 v_cmp_ne_u32
61; GFX7: v_cndmask_b32
62; GFX8: s_cmp_lg_u32
63; GFX8-NOT: v_cmp_ne_u32
64; GFX8: s_cselect_b32
65define amdgpu_kernel void @null_32bit_lds_ptr(i32 addrspace(1)* %out, i32 addrspace(3)* %lds) nounwind {
66  %cmp = icmp ne i32 addrspace(3)* %lds, null
67  %x = select i1 %cmp, i32 123, i32 456
68  store i32 %x, i32 addrspace(1)* %out
69  ret void
70}
71
72; FUNC-LABEL: {{^}}mul_32bit_ptr:
73; SI: s_mul_i32
74; SI-NEXT: s_add_i32
75; SI: ds_read_b32
76define amdgpu_kernel void @mul_32bit_ptr(float addrspace(1)* %out, [3 x float] addrspace(3)* %lds, i32 %tid) {
77  %ptr = getelementptr [3 x float], [3 x float] addrspace(3)* %lds, i32 %tid, i32 0
78  %val = load float, float addrspace(3)* %ptr
79  store float %val, float addrspace(1)* %out
80  ret void
81}
82
83@g_lds = addrspace(3) global float undef, align 4
84
85; FUNC-LABEL: {{^}}infer_ptr_alignment_global_offset:
86; SI: v_mov_b32_e32 [[PTR:v[0-9]+]], g_lds@abs32@lo
87; SI: ds_read_b32 v{{[0-9]+}}, [[PTR]]
88define amdgpu_kernel void @infer_ptr_alignment_global_offset(float addrspace(1)* %out, i32 %tid) {
89  %val = load float, float addrspace(3)* @g_lds
90  store float %val, float addrspace(1)* %out
91  ret void
92}
93
94
95@ptr = addrspace(3) global i32 addrspace(3)* undef
96@dst = addrspace(3) global [16383 x i32] undef
97
98; FUNC-LABEL: {{^}}global_ptr:
99; SI: ds_write_b32
100define amdgpu_kernel void @global_ptr() nounwind {
101  store i32 addrspace(3)* getelementptr ([16383 x i32], [16383 x i32] addrspace(3)* @dst, i32 0, i32 16), i32 addrspace(3)* addrspace(3)* @ptr
102  ret void
103}
104
105; FUNC-LABEL: {{^}}local_address_store:
106; SI: ds_write_b32
107define amdgpu_kernel void @local_address_store(i32 addrspace(3)* %out, i32 %val) {
108  store i32 %val, i32 addrspace(3)* %out
109  ret void
110}
111
112; FUNC-LABEL: {{^}}local_address_gep_store:
113; SI: s_add_i32 [[SADDR:s[0-9]+]],
114; SI: v_mov_b32_e32 [[ADDR:v[0-9]+]], [[SADDR]]
115; SI: ds_write_b32 [[ADDR]], v{{[0-9]+}}
116define amdgpu_kernel void @local_address_gep_store(i32 addrspace(3)* %out, i32, i32 %val, i32 %offset) {
117  %gep = getelementptr i32, i32 addrspace(3)* %out, i32 %offset
118  store i32 %val, i32 addrspace(3)* %gep, align 4
119  ret void
120}
121
122; FUNC-LABEL: {{^}}local_address_gep_const_offset_store:
123; SI: v_mov_b32_e32 [[VPTR:v[0-9]+]], s{{[0-9]+}}
124; SI: v_mov_b32_e32 [[VAL:v[0-9]+]], s{{[0-9]+}}
125; SI: ds_write_b32 [[VPTR]], [[VAL]] offset:4
126define amdgpu_kernel void @local_address_gep_const_offset_store(i32 addrspace(3)* %out, i32 %val) {
127  %gep = getelementptr i32, i32 addrspace(3)* %out, i32 1
128  store i32 %val, i32 addrspace(3)* %gep, align 4
129  ret void
130}
131
132; Offset too large, can't fold into 16-bit immediate offset.
133; FUNC-LABEL: {{^}}local_address_gep_large_const_offset_store:
134; SI: s_add_i32 [[SPTR:s[0-9]]], s{{[0-9]+}}, 0x10004
135; SI: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
136; SI: ds_write_b32 [[VPTR]], v{{[0-9]+$}}
137define amdgpu_kernel void @local_address_gep_large_const_offset_store(i32 addrspace(3)* %out, i32 %val) {
138  %gep = getelementptr i32, i32 addrspace(3)* %out, i32 16385
139  store i32 %val, i32 addrspace(3)* %gep, align 4
140  ret void
141}
142