1; RUN: llc -show-mc-encoding --amdhsa-code-object-version=2 -mattr=+promote-alloca -disable-promote-alloca-to-vector -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -march=amdgcn < %s | FileCheck -enable-var-scope -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC %s
2; RUN: llc -show-mc-encoding --amdhsa-code-object-version=2 -mattr=+promote-alloca -disable-promote-alloca-to-vector -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -mtriple=amdgcn--amdhsa -mcpu=kaveri -mattr=-unaligned-access-mode < %s | FileCheck -enable-var-scope -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC -check-prefix=HSA-PROMOTE %s
3; RUN: llc -show-mc-encoding --amdhsa-code-object-version=2 -mattr=-promote-alloca -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -march=amdgcn < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC
4; RUN: llc -show-mc-encoding --amdhsa-code-object-version=2 -mattr=-promote-alloca -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -mtriple=amdgcn-amdhsa -mcpu=kaveri -mattr=-unaligned-access-mode < %s | FileCheck -enable-var-scope -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC -check-prefix=HSA-ALLOCA %s
5; RUN: llc -show-mc-encoding --amdhsa-code-object-version=2 -mattr=+promote-alloca -disable-promote-alloca-to-vector -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -mtriple=amdgcn-amdhsa -march=amdgcn -mcpu=tonga -mattr=-unaligned-access-mode < %s | FileCheck -enable-var-scope -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC %s
6; RUN: llc -show-mc-encoding --amdhsa-code-object-version=2 -mattr=+promote-alloca -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -mtriple=amdgcn-amdhsa -march=amdgcn -mcpu=tonga -mattr=-unaligned-access-mode < %s | FileCheck -enable-var-scope -check-prefix=SI-PROMOTE-VECT -check-prefix=SI -check-prefix=FUNC %s
7; RUN: llc -show-mc-encoding --amdhsa-code-object-version=2 -mattr=-promote-alloca -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -mtriple=amdgcn-amdhsa -march=amdgcn -mcpu=tonga -mattr=-unaligned-access-mode < %s | FileCheck -enable-var-scope -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC %s
8
9; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -data-layout=A5 -mcpu=kaveri -amdgpu-promote-alloca -disable-promote-alloca-to-vector < %s | FileCheck -enable-var-scope -check-prefix=HSAOPT -check-prefix=OPT %s
10; RUN: opt -S -mtriple=amdgcn-unknown-unknown -data-layout=A5 -mcpu=kaveri -amdgpu-promote-alloca -disable-promote-alloca-to-vector < %s | FileCheck -enable-var-scope -check-prefix=NOHSAOPT -check-prefix=OPT %s
11
12; RUN: llc -march=r600 -mcpu=cypress -disable-promote-alloca-to-vector < %s | FileCheck %s -check-prefix=R600 -check-prefix=FUNC
13; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck %s -check-prefix=R600-VECT -check-prefix=FUNC
14
15; HSAOPT: @mova_same_clause.stack = internal unnamed_addr addrspace(3) global [256 x [5 x i32]] undef, align 4
16; HSAOPT: @high_alignment.stack = internal unnamed_addr addrspace(3) global [256 x [8 x i32]] undef, align 16
17
18
19; FUNC-LABEL: {{^}}mova_same_clause:
20; OPT-LABEL: @mova_same_clause(
21
22; R600: LDS_WRITE
23; R600: LDS_WRITE
24; R600: LDS_READ
25; R600: LDS_READ
26
27; HSA-PROMOTE: .amd_kernel_code_t
28; HSA-PROMOTE: workgroup_group_segment_byte_size = 5120
29; HSA-PROMOTE: .end_amd_kernel_code_t
30
31; HSA-PROMOTE: s_load_dword s{{[0-9]+}}, s[4:5], 0x2
32
33; SI-PROMOTE: ds_write_b32
34; SI-PROMOTE: ds_write_b32
35; SI-PROMOTE: ds_read_b32
36; SI-PROMOTE: ds_read_b32
37
38; HSA-ALLOCA: .amd_kernel_code_t
39; FIXME: Creating the emergency stack slots causes us to over-estimate scratch
40; by 4 bytes.
41; HSA-ALLOCA: workitem_private_segment_byte_size = 24
42; HSA-ALLOCA: .end_amd_kernel_code_t
43
44; HSA-ALLOCA: s_mov_b32 flat_scratch_lo, s7
45; HSA-ALLOCA: s_add_u32 s6, s6, s9
46; HSA-ALLOCA: s_lshr_b32 flat_scratch_hi, s6, 8
47
48; SI-ALLOCA: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 offen ; encoding: [0x00,0x10,0x70,0xe0
49; SI-ALLOCA: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 offen ; encoding: [0x00,0x10,0x70,0xe0
50
51
52; HSAOPT: [[DISPATCH_PTR:%[0-9]+]] = call noalias nonnull dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
53; HSAOPT: [[CAST_DISPATCH_PTR:%[0-9]+]] = bitcast i8 addrspace(4)* [[DISPATCH_PTR]] to i32 addrspace(4)*
54; HSAOPT: [[GEP0:%[0-9]+]] = getelementptr inbounds i32, i32 addrspace(4)* [[CAST_DISPATCH_PTR]], i64 1
55; HSAOPT: [[LDXY:%[0-9]+]] = load i32, i32 addrspace(4)* [[GEP0]], align 4, !invariant.load !0
56; HSAOPT: [[GEP1:%[0-9]+]] = getelementptr inbounds i32, i32 addrspace(4)* [[CAST_DISPATCH_PTR]], i64 2
57; HSAOPT: [[LDZU:%[0-9]+]] = load i32, i32 addrspace(4)* [[GEP1]], align 4, !range !1, !invariant.load !0
58; HSAOPT: [[EXTRACTY:%[0-9]+]] = lshr i32 [[LDXY]], 16
59
60; HSAOPT: [[WORKITEM_ID_X:%[0-9]+]] = call i32 @llvm.amdgcn.workitem.id.x(), !range !2
61; HSAOPT: [[WORKITEM_ID_Y:%[0-9]+]] = call i32 @llvm.amdgcn.workitem.id.y(), !range !2
62; HSAOPT: [[WORKITEM_ID_Z:%[0-9]+]] = call i32 @llvm.amdgcn.workitem.id.z(), !range !2
63
64; HSAOPT: [[Y_SIZE_X_Z_SIZE:%[0-9]+]] = mul nuw nsw i32 [[EXTRACTY]], [[LDZU]]
65; HSAOPT: [[YZ_X_XID:%[0-9]+]] = mul i32 [[Y_SIZE_X_Z_SIZE]], [[WORKITEM_ID_X]]
66; HSAOPT: [[Y_X_Z_SIZE:%[0-9]+]] = mul nuw nsw i32 [[WORKITEM_ID_Y]], [[LDZU]]
67; HSAOPT: [[ADD_YZ_X_X_YZ_SIZE:%[0-9]+]] = add i32 [[YZ_X_XID]], [[Y_X_Z_SIZE]]
68; HSAOPT: [[ADD_ZID:%[0-9]+]] = add i32 [[ADD_YZ_X_X_YZ_SIZE]], [[WORKITEM_ID_Z]]
69
70; HSAOPT: [[LOCAL_GEP:%[0-9]+]] = getelementptr inbounds [256 x [5 x i32]], [256 x [5 x i32]] addrspace(3)* @mova_same_clause.stack, i32 0, i32 [[ADD_ZID]]
71; HSAOPT: %arrayidx1 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(3)* [[LOCAL_GEP]], i32 0, i32 {{%[0-9]+}}
72; HSAOPT: %arrayidx3 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(3)* [[LOCAL_GEP]], i32 0, i32 {{%[0-9]+}}
73; HSAOPT: %arrayidx10 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(3)* [[LOCAL_GEP]], i32 0, i32 0
74; HSAOPT: %arrayidx12 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(3)* [[LOCAL_GEP]], i32 0, i32 1
75
76
77; NOHSAOPT: call i32 @llvm.r600.read.local.size.y(), !range !0
78; NOHSAOPT: call i32 @llvm.r600.read.local.size.z(), !range !0
79; NOHSAOPT: call i32 @llvm.amdgcn.workitem.id.x(), !range !1
80; NOHSAOPT: call i32 @llvm.amdgcn.workitem.id.y(), !range !1
81; NOHSAOPT: call i32 @llvm.amdgcn.workitem.id.z(), !range !1
82define amdgpu_kernel void @mova_same_clause(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #0 {
83entry:
84  %stack = alloca [5 x i32], align 4, addrspace(5)
85  %0 = load i32, i32 addrspace(1)* %in, align 4
86  %arrayidx1 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(5)* %stack, i32 0, i32 %0
87  store i32 4, i32 addrspace(5)* %arrayidx1, align 4
88  %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 1
89  %1 = load i32, i32 addrspace(1)* %arrayidx2, align 4
90  %arrayidx3 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(5)* %stack, i32 0, i32 %1
91  store i32 5, i32 addrspace(5)* %arrayidx3, align 4
92  %arrayidx10 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(5)* %stack, i32 0, i32 0
93  %2 = load i32, i32 addrspace(5)* %arrayidx10, align 4
94  store i32 %2, i32 addrspace(1)* %out, align 4
95  %arrayidx12 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(5)* %stack, i32 0, i32 1
96  %3 = load i32, i32 addrspace(5)* %arrayidx12
97  %arrayidx13 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 1
98  store i32 %3, i32 addrspace(1)* %arrayidx13
99  ret void
100}
101
102; OPT-LABEL: @high_alignment(
103; OPT: getelementptr inbounds [256 x [8 x i32]], [256 x [8 x i32]] addrspace(3)* @high_alignment.stack, i32 0, i32 %{{[0-9]+}}
104define amdgpu_kernel void @high_alignment(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #0 {
105entry:
106  %stack = alloca [8 x i32], align 16, addrspace(5)
107  %0 = load i32, i32 addrspace(1)* %in, align 4
108  %arrayidx1 = getelementptr inbounds [8 x i32], [8 x i32] addrspace(5)* %stack, i32 0, i32 %0
109  store i32 4, i32 addrspace(5)* %arrayidx1, align 4
110  %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 1
111  %1 = load i32, i32 addrspace(1)* %arrayidx2, align 4
112  %arrayidx3 = getelementptr inbounds [8 x i32], [8 x i32] addrspace(5)* %stack, i32 0, i32 %1
113  store i32 5, i32 addrspace(5)* %arrayidx3, align 4
114  %arrayidx10 = getelementptr inbounds [8 x i32], [8 x i32] addrspace(5)* %stack, i32 0, i32 0
115  %2 = load i32, i32 addrspace(5)* %arrayidx10, align 4
116  store i32 %2, i32 addrspace(1)* %out, align 4
117  %arrayidx12 = getelementptr inbounds [8 x i32], [8 x i32] addrspace(5)* %stack, i32 0, i32 1
118  %3 = load i32, i32 addrspace(5)* %arrayidx12
119  %arrayidx13 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 1
120  store i32 %3, i32 addrspace(1)* %arrayidx13
121  ret void
122}
123
124; FUNC-LABEL: {{^}}no_replace_inbounds_gep:
125; OPT-LABEL: @no_replace_inbounds_gep(
126; OPT: alloca [5 x i32]
127
128; SI-NOT: ds_write
129define amdgpu_kernel void @no_replace_inbounds_gep(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #0 {
130entry:
131  %stack = alloca [5 x i32], align 4, addrspace(5)
132  %0 = load i32, i32 addrspace(1)* %in, align 4
133  %arrayidx1 = getelementptr [5 x i32], [5 x i32] addrspace(5)* %stack, i32 0, i32 %0
134  store i32 4, i32 addrspace(5)* %arrayidx1, align 4
135  %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 1
136  %1 = load i32, i32 addrspace(1)* %arrayidx2, align 4
137  %arrayidx3 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(5)* %stack, i32 0, i32 %1
138  store i32 5, i32 addrspace(5)* %arrayidx3, align 4
139  %arrayidx10 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(5)* %stack, i32 0, i32 0
140  %2 = load i32, i32 addrspace(5)* %arrayidx10, align 4
141  store i32 %2, i32 addrspace(1)* %out, align 4
142  %arrayidx12 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(5)* %stack, i32 0, i32 1
143  %3 = load i32, i32 addrspace(5)* %arrayidx12
144  %arrayidx13 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 1
145  store i32 %3, i32 addrspace(1)* %arrayidx13
146  ret void
147}
148
149; This test checks that the stack offset is calculated correctly for structs.
150; All register loads/stores should be optimized away, so there shouldn't be
151; any MOVA instructions.
152;
153; XXX: This generated code has unnecessary MOVs, we should be able to optimize
154; this.
155
156; FUNC-LABEL: {{^}}multiple_structs:
157; OPT-LABEL: @multiple_structs(
158
159; R600-NOT: MOVA_INT
160; SI-NOT: v_movrel
161; SI-NOT: v_movrel
162%struct.point = type { i32, i32 }
163
164define amdgpu_kernel void @multiple_structs(i32 addrspace(1)* %out) #0 {
165entry:
166  %a = alloca %struct.point, addrspace(5)
167  %b = alloca %struct.point, addrspace(5)
168  %a.x.ptr = getelementptr %struct.point, %struct.point addrspace(5)* %a, i32 0, i32 0
169  %a.y.ptr = getelementptr %struct.point, %struct.point addrspace(5)* %a, i32 0, i32 1
170  %b.x.ptr = getelementptr %struct.point, %struct.point addrspace(5)* %b, i32 0, i32 0
171  %b.y.ptr = getelementptr %struct.point, %struct.point addrspace(5)* %b, i32 0, i32 1
172  store i32 0, i32 addrspace(5)* %a.x.ptr
173  store i32 1, i32 addrspace(5)* %a.y.ptr
174  store i32 2, i32 addrspace(5)* %b.x.ptr
175  store i32 3, i32 addrspace(5)* %b.y.ptr
176  %a.indirect.ptr = getelementptr %struct.point, %struct.point addrspace(5)* %a, i32 0, i32 0
177  %b.indirect.ptr = getelementptr %struct.point, %struct.point addrspace(5)* %b, i32 0, i32 0
178  %a.indirect = load i32, i32 addrspace(5)* %a.indirect.ptr
179  %b.indirect = load i32, i32 addrspace(5)* %b.indirect.ptr
180  %0 = add i32 %a.indirect, %b.indirect
181  store i32 %0, i32 addrspace(1)* %out
182  ret void
183}
184
185; Test direct access of a private array inside a loop.  The private array
186; loads and stores should be lowered to copies, so there shouldn't be any
187; MOVA instructions.
188
189; FUNC-LABEL: {{^}}direct_loop:
190; R600-NOT: MOVA_INT
191; SI-NOT: v_movrel
192
193define amdgpu_kernel void @direct_loop(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
194entry:
195  %prv_array_const = alloca [2 x i32], addrspace(5)
196  %prv_array = alloca [2 x i32], addrspace(5)
197  %a = load i32, i32 addrspace(1)* %in
198  %b_src_ptr = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 1
199  %b = load i32, i32 addrspace(1)* %b_src_ptr
200  %a_dst_ptr = getelementptr inbounds [2 x i32], [2 x i32] addrspace(5)* %prv_array_const, i32 0, i32 0
201  store i32 %a, i32 addrspace(5)* %a_dst_ptr
202  %b_dst_ptr = getelementptr inbounds [2 x i32], [2 x i32] addrspace(5)* %prv_array_const, i32 0, i32 1
203  store i32 %b, i32 addrspace(5)* %b_dst_ptr
204  br label %for.body
205
206for.body:
207  %inc = phi i32 [0, %entry], [%count, %for.body]
208  %x_ptr = getelementptr inbounds [2 x i32], [2 x i32] addrspace(5)* %prv_array_const, i32 0, i32 0
209  %x = load i32, i32 addrspace(5)* %x_ptr
210  %y_ptr = getelementptr inbounds [2 x i32], [2 x i32] addrspace(5)* %prv_array, i32 0, i32 0
211  %y = load i32, i32 addrspace(5)* %y_ptr
212  %xy = add i32 %x, %y
213  store i32 %xy, i32 addrspace(5)* %y_ptr
214  %count = add i32 %inc, 1
215  %done = icmp eq i32 %count, 4095
216  br i1 %done, label %for.end, label %for.body
217
218for.end:
219  %value_ptr = getelementptr inbounds [2 x i32], [2 x i32] addrspace(5)* %prv_array, i32 0, i32 0
220  %value = load i32, i32 addrspace(5)* %value_ptr
221  store i32 %value, i32 addrspace(1)* %out
222  ret void
223}
224
225; FUNC-LABEL: {{^}}short_array:
226
227; R600-VECT: MOVA_INT
228
229; SI-ALLOCA-DAG: buffer_store_short v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], 0 offset:6 ; encoding: [0x06,0x00,0x68,0xe0
230; SI-ALLOCA-DAG: buffer_store_short v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], 0 offset:4 ; encoding: [0x04,0x00,0x68,0xe0
231; Loaded value is 0 or 1, so sext will become zext, so we get buffer_load_ushort instead of buffer_load_sshort.
232; SI-ALLOCA: buffer_load_sshort v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0
233
234; SI-PROMOTE-VECT: s_load_dword [[IDX:s[0-9]+]]
235; SI-PROMOTE-VECT: s_lshl_b32 [[SCALED_IDX:s[0-9]+]], [[IDX]], 4
236; SI-PROMOTE-VECT: s_lshr_b32 [[SREG:s[0-9]+]], 0x10000, [[SCALED_IDX]]
237; SI-PROMOTE-VECT: s_and_b32 s{{[0-9]+}}, [[SREG]], 0xffff
238define amdgpu_kernel void @short_array(i32 addrspace(1)* %out, i32 %index) #0 {
239entry:
240  %0 = alloca [2 x i16], addrspace(5)
241  %1 = getelementptr inbounds [2 x i16], [2 x i16] addrspace(5)* %0, i32 0, i32 0
242  %2 = getelementptr inbounds [2 x i16], [2 x i16] addrspace(5)* %0, i32 0, i32 1
243  store i16 0, i16 addrspace(5)* %1
244  store i16 1, i16 addrspace(5)* %2
245  %3 = getelementptr inbounds [2 x i16], [2 x i16] addrspace(5)* %0, i32 0, i32 %index
246  %4 = load i16, i16 addrspace(5)* %3
247  %5 = sext i16 %4 to i32
248  store i32 %5, i32 addrspace(1)* %out
249  ret void
250}
251
252; FUNC-LABEL: {{^}}char_array:
253
254; R600-VECT: MOVA_INT
255
256; SI-PROMOTE-VECT-DAG: s_lshl_b32
257; SI-PROMOTE-VECT-DAG: v_lshrrev
258
259; SI-ALLOCA-DAG: buffer_store_byte v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], 0 offset:4 ; encoding: [0x04,0x00,0x60,0xe0
260; SI-ALLOCA-DAG: buffer_store_byte v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], 0 offset:5 ; encoding: [0x05,0x00,0x60,0xe0
261define amdgpu_kernel void @char_array(i32 addrspace(1)* %out, i32 %index) #0 {
262entry:
263  %0 = alloca [2 x i8], addrspace(5)
264  %1 = getelementptr inbounds [2 x i8], [2 x i8] addrspace(5)* %0, i32 0, i32 0
265  %2 = getelementptr inbounds [2 x i8], [2 x i8] addrspace(5)* %0, i32 0, i32 1
266  store i8 0, i8 addrspace(5)* %1
267  store i8 1, i8 addrspace(5)* %2
268  %3 = getelementptr inbounds [2 x i8], [2 x i8] addrspace(5)* %0, i32 0, i32 %index
269  %4 = load i8, i8 addrspace(5)* %3
270  %5 = sext i8 %4 to i32
271  store i32 %5, i32 addrspace(1)* %out
272  ret void
273}
274
275; Test that two stack objects are not stored in the same register
276; The second stack object should be in T3.X
277; FUNC-LABEL: {{^}}no_overlap:
278; R600-CHECK: MOV
279; R600-CHECK: [[CHAN:[XYZW]]]+
280; R600-NOT: [[CHAN]]+
281;
282; A total of 5 bytes should be allocated and used.
283; SI: buffer_store_byte v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], 0 offset:4 ;
284define amdgpu_kernel void @no_overlap(i32 addrspace(1)* %out, i32 %in) #0 {
285entry:
286  %0 = alloca [3 x i8], align 1, addrspace(5)
287  %1 = alloca [2 x i8], align 1, addrspace(5)
288  %2 = getelementptr [3 x i8], [3 x i8] addrspace(5)* %0, i32 0, i32 0
289  %3 = getelementptr [3 x i8], [3 x i8] addrspace(5)* %0, i32 0, i32 1
290  %4 = getelementptr [3 x i8], [3 x i8] addrspace(5)* %0, i32 0, i32 2
291  %5 = getelementptr [2 x i8], [2 x i8] addrspace(5)* %1, i32 0, i32 0
292  %6 = getelementptr [2 x i8], [2 x i8] addrspace(5)* %1, i32 0, i32 1
293  store i8 0, i8 addrspace(5)* %2
294  store i8 1, i8 addrspace(5)* %3
295  store i8 2, i8 addrspace(5)* %4
296  store i8 1, i8 addrspace(5)* %5
297  store i8 0, i8 addrspace(5)* %6
298  %7 = getelementptr [3 x i8], [3 x i8] addrspace(5)* %0, i32 0, i32 %in
299  %8 = getelementptr [2 x i8], [2 x i8] addrspace(5)* %1, i32 0, i32 %in
300  %9 = load i8, i8 addrspace(5)* %7
301  %10 = load i8, i8 addrspace(5)* %8
302  %11 = add i8 %9, %10
303  %12 = sext i8 %11 to i32
304  store i32 %12, i32 addrspace(1)* %out
305  ret void
306}
307
308define amdgpu_kernel void @char_array_array(i32 addrspace(1)* %out, i32 %index) #0 {
309entry:
310  %alloca = alloca [2 x [2 x i8]], addrspace(5)
311  %gep0 = getelementptr [2 x [2 x i8]], [2 x [2 x i8]] addrspace(5)* %alloca, i32 0, i32 0, i32 0
312  %gep1 = getelementptr [2 x [2 x i8]], [2 x [2 x i8]] addrspace(5)* %alloca, i32 0, i32 0, i32 1
313  store i8 0, i8 addrspace(5)* %gep0
314  store i8 1, i8 addrspace(5)* %gep1
315  %gep2 = getelementptr [2 x [2 x i8]], [2 x [2 x i8]] addrspace(5)* %alloca, i32 0, i32 0, i32 %index
316  %load = load i8, i8 addrspace(5)* %gep2
317  %sext = sext i8 %load to i32
318  store i32 %sext, i32 addrspace(1)* %out
319  ret void
320}
321
322define amdgpu_kernel void @i32_array_array(i32 addrspace(1)* %out, i32 %index) #0 {
323entry:
324  %alloca = alloca [2 x [2 x i32]], addrspace(5)
325  %gep0 = getelementptr [2 x [2 x i32]], [2 x [2 x i32]] addrspace(5)* %alloca, i32 0, i32 0, i32 0
326  %gep1 = getelementptr [2 x [2 x i32]], [2 x [2 x i32]] addrspace(5)* %alloca, i32 0, i32 0, i32 1
327  store i32 0, i32 addrspace(5)* %gep0
328  store i32 1, i32 addrspace(5)* %gep1
329  %gep2 = getelementptr [2 x [2 x i32]], [2 x [2 x i32]] addrspace(5)* %alloca, i32 0, i32 0, i32 %index
330  %load = load i32, i32 addrspace(5)* %gep2
331  store i32 %load, i32 addrspace(1)* %out
332  ret void
333}
334
335define amdgpu_kernel void @i64_array_array(i64 addrspace(1)* %out, i32 %index) #0 {
336entry:
337  %alloca = alloca [2 x [2 x i64]], addrspace(5)
338  %gep0 = getelementptr [2 x [2 x i64]], [2 x [2 x i64]] addrspace(5)* %alloca, i32 0, i32 0, i32 0
339  %gep1 = getelementptr [2 x [2 x i64]], [2 x [2 x i64]] addrspace(5)* %alloca, i32 0, i32 0, i32 1
340  store i64 0, i64 addrspace(5)* %gep0
341  store i64 1, i64 addrspace(5)* %gep1
342  %gep2 = getelementptr [2 x [2 x i64]], [2 x [2 x i64]] addrspace(5)* %alloca, i32 0, i32 0, i32 %index
343  %load = load i64, i64 addrspace(5)* %gep2
344  store i64 %load, i64 addrspace(1)* %out
345  ret void
346}
347
348%struct.pair32 = type { i32, i32 }
349
350define amdgpu_kernel void @struct_array_array(i32 addrspace(1)* %out, i32 %index) #0 {
351entry:
352  %alloca = alloca [2 x [2 x %struct.pair32]], addrspace(5)
353  %gep0 = getelementptr [2 x [2 x %struct.pair32]], [2 x [2 x %struct.pair32]] addrspace(5)* %alloca, i32 0, i32 0, i32 0, i32 1
354  %gep1 = getelementptr [2 x [2 x %struct.pair32]], [2 x [2 x %struct.pair32]] addrspace(5)* %alloca, i32 0, i32 0, i32 1, i32 1
355  store i32 0, i32 addrspace(5)* %gep0
356  store i32 1, i32 addrspace(5)* %gep1
357  %gep2 = getelementptr [2 x [2 x %struct.pair32]], [2 x [2 x %struct.pair32]] addrspace(5)* %alloca, i32 0, i32 0, i32 %index, i32 0
358  %load = load i32, i32 addrspace(5)* %gep2
359  store i32 %load, i32 addrspace(1)* %out
360  ret void
361}
362
363define amdgpu_kernel void @struct_pair32_array(i32 addrspace(1)* %out, i32 %index) #0 {
364entry:
365  %alloca = alloca [2 x %struct.pair32], addrspace(5)
366  %gep0 = getelementptr [2 x %struct.pair32], [2 x %struct.pair32] addrspace(5)* %alloca, i32 0, i32 0, i32 1
367  %gep1 = getelementptr [2 x %struct.pair32], [2 x %struct.pair32] addrspace(5)* %alloca, i32 0, i32 1, i32 0
368  store i32 0, i32 addrspace(5)* %gep0
369  store i32 1, i32 addrspace(5)* %gep1
370  %gep2 = getelementptr [2 x %struct.pair32], [2 x %struct.pair32] addrspace(5)* %alloca, i32 0, i32 %index, i32 0
371  %load = load i32, i32 addrspace(5)* %gep2
372  store i32 %load, i32 addrspace(1)* %out
373  ret void
374}
375
376define amdgpu_kernel void @select_private(i32 addrspace(1)* %out, i32 %in) nounwind {
377entry:
378  %tmp = alloca [2 x i32], addrspace(5)
379  %tmp1 = getelementptr [2 x i32], [2 x i32] addrspace(5)* %tmp, i32 0, i32 0
380  %tmp2 = getelementptr [2 x i32], [2 x i32] addrspace(5)* %tmp, i32 0, i32 1
381  store i32 0, i32 addrspace(5)* %tmp1
382  store i32 1, i32 addrspace(5)* %tmp2
383  %cmp = icmp eq i32 %in, 0
384  %sel = select i1 %cmp, i32 addrspace(5)* %tmp1, i32 addrspace(5)* %tmp2
385  %load = load i32, i32 addrspace(5)* %sel
386  store i32 %load, i32 addrspace(1)* %out
387  ret void
388}
389
390; AMDGPUPromoteAlloca does not know how to handle ptrtoint.  When it
391; finds one, it should stop trying to promote.
392
393; FUNC-LABEL: ptrtoint:
394; SI-NOT: ds_write
395; SI: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 offen
396; SI: v_add_{{[iu]}}32_e32 [[ADD_OFFSET:v[0-9]+]], vcc, 5,
397; SI: buffer_load_dword v{{[0-9]+}}, [[ADD_OFFSET:v[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0 offen ;
398define amdgpu_kernel void @ptrtoint(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
399  %alloca = alloca [16 x i32], addrspace(5)
400  %tmp0 = getelementptr [16 x i32], [16 x i32] addrspace(5)* %alloca, i32 0, i32 %a
401  store i32 5, i32 addrspace(5)* %tmp0
402  %tmp1 = ptrtoint [16 x i32] addrspace(5)* %alloca to i32
403  %tmp2 = add i32 %tmp1, 5
404  %tmp3 = inttoptr i32 %tmp2 to i32 addrspace(5)*
405  %tmp4 = getelementptr i32, i32 addrspace(5)* %tmp3, i32 %b
406  %tmp5 = load i32, i32 addrspace(5)* %tmp4
407  store i32 %tmp5, i32 addrspace(1)* %out
408  ret void
409}
410
411; OPT-LABEL: @pointer_typed_alloca(
412; OPT:  getelementptr inbounds [256 x i32 addrspace(1)*], [256 x i32 addrspace(1)*] addrspace(3)* @pointer_typed_alloca.A.addr, i32 0, i32 %{{[0-9]+}}
413; OPT: load i32 addrspace(1)*, i32 addrspace(1)* addrspace(3)* %{{[0-9]+}}, align 4
414define amdgpu_kernel void @pointer_typed_alloca(i32 addrspace(1)* %A) #1 {
415entry:
416  %A.addr = alloca i32 addrspace(1)*, align 4, addrspace(5)
417  store i32 addrspace(1)* %A, i32 addrspace(1)* addrspace(5)* %A.addr, align 4
418  %ld0 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(5)* %A.addr, align 4
419  %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %ld0, i32 0
420  store i32 1, i32 addrspace(1)* %arrayidx, align 4
421  %ld1 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(5)* %A.addr, align 4
422  %arrayidx1 = getelementptr inbounds i32, i32 addrspace(1)* %ld1, i32 1
423  store i32 2, i32 addrspace(1)* %arrayidx1, align 4
424  %ld2 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(5)* %A.addr, align 4
425  %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %ld2, i32 2
426  store i32 3, i32 addrspace(1)* %arrayidx2, align 4
427  ret void
428}
429
430; FUNC-LABEL: v16i32_stack:
431
432; R600: MOVA_INT
433; R600: MOVA_INT
434; R600: MOVA_INT
435; R600: MOVA_INT
436; R600: MOVA_INT
437; R600: MOVA_INT
438; R600: MOVA_INT
439; R600: MOVA_INT
440; R600: MOVA_INT
441; R600: MOVA_INT
442; R600: MOVA_INT
443; R600: MOVA_INT
444; R600: MOVA_INT
445; R600: MOVA_INT
446; R600: MOVA_INT
447; R600: MOVA_INT
448
449; SI: buffer_load_dword
450; SI: buffer_load_dword
451; SI: buffer_load_dword
452; SI: buffer_load_dword
453; SI: buffer_load_dword
454; SI: buffer_load_dword
455; SI: buffer_load_dword
456; SI: buffer_load_dword
457; SI: buffer_load_dword
458; SI: buffer_load_dword
459; SI: buffer_load_dword
460; SI: buffer_load_dword
461; SI: buffer_load_dword
462; SI: buffer_load_dword
463; SI: buffer_load_dword
464; SI: buffer_load_dword
465
466define amdgpu_kernel void @v16i32_stack(<16 x i32> addrspace(1)* %out, i32 %a) {
467  %alloca = alloca [2 x <16 x i32>], addrspace(5)
468  %tmp0 = getelementptr [2 x <16 x i32>], [2 x <16 x i32>] addrspace(5)* %alloca, i32 0, i32 %a
469  %tmp5 = load <16 x i32>, <16 x i32> addrspace(5)* %tmp0
470  store <16 x i32> %tmp5, <16 x i32> addrspace(1)* %out
471  ret void
472}
473
474; FUNC-LABEL: v16float_stack:
475
476; R600: MOVA_INT
477; R600: MOVA_INT
478; R600: MOVA_INT
479; R600: MOVA_INT
480; R600: MOVA_INT
481; R600: MOVA_INT
482; R600: MOVA_INT
483; R600: MOVA_INT
484; R600: MOVA_INT
485; R600: MOVA_INT
486; R600: MOVA_INT
487; R600: MOVA_INT
488; R600: MOVA_INT
489; R600: MOVA_INT
490; R600: MOVA_INT
491; R600: MOVA_INT
492
493; SI: buffer_load_dword
494; SI: buffer_load_dword
495; SI: buffer_load_dword
496; SI: buffer_load_dword
497; SI: buffer_load_dword
498; SI: buffer_load_dword
499; SI: buffer_load_dword
500; SI: buffer_load_dword
501; SI: buffer_load_dword
502; SI: buffer_load_dword
503; SI: buffer_load_dword
504; SI: buffer_load_dword
505; SI: buffer_load_dword
506; SI: buffer_load_dword
507; SI: buffer_load_dword
508; SI: buffer_load_dword
509
510define amdgpu_kernel void @v16float_stack(<16 x float> addrspace(1)* %out, i32 %a) {
511  %alloca = alloca [2 x <16 x float>], addrspace(5)
512  %tmp0 = getelementptr [2 x <16 x float>], [2 x <16 x float>] addrspace(5)* %alloca, i32 0, i32 %a
513  %tmp5 = load <16 x float>, <16 x float> addrspace(5)* %tmp0
514  store <16 x float> %tmp5, <16 x float> addrspace(1)* %out
515  ret void
516}
517
518; FUNC-LABEL: v2float_stack:
519
520; R600: MOVA_INT
521; R600: MOVA_INT
522
523; SI: buffer_load_dword
524; SI: buffer_load_dword
525
526define amdgpu_kernel void @v2float_stack(<2 x float> addrspace(1)* %out, i32 %a) {
527  %alloca = alloca [16 x <2 x float>], addrspace(5)
528  %tmp0 = getelementptr [16 x <2 x float>], [16 x <2 x float>] addrspace(5)* %alloca, i32 0, i32 %a
529  %tmp5 = load <2 x float>, <2 x float> addrspace(5)* %tmp0
530  store <2 x float> %tmp5, <2 x float> addrspace(1)* %out
531  ret void
532}
533
534; OPT-LABEL: @direct_alloca_read_0xi32(
535; OPT: store [0 x i32] undef, [0 x i32] addrspace(3)*
536; OPT: load [0 x i32], [0 x i32] addrspace(3)*
537define amdgpu_kernel void @direct_alloca_read_0xi32([0 x i32] addrspace(1)* %out, i32 %index) {
538entry:
539  %tmp = alloca [0 x i32], addrspace(5)
540  store [0 x i32] [], [0 x i32] addrspace(5)* %tmp
541  %load = load [0 x i32], [0 x i32] addrspace(5)* %tmp
542  store [0 x i32] %load, [0 x i32] addrspace(1)* %out
543  ret void
544}
545
546; OPT-LABEL: @direct_alloca_read_1xi32(
547; OPT: store [1 x i32] zeroinitializer, [1 x i32] addrspace(3)*
548; OPT: load [1 x i32], [1 x i32] addrspace(3)*
549define amdgpu_kernel void @direct_alloca_read_1xi32([1 x i32] addrspace(1)* %out, i32 %index) {
550entry:
551  %tmp = alloca [1 x i32], addrspace(5)
552  store [1 x i32] [i32 0], [1 x i32] addrspace(5)* %tmp
553  %load = load [1 x i32], [1 x i32] addrspace(5)* %tmp
554  store [1 x i32] %load, [1 x i32] addrspace(1)* %out
555  ret void
556}
557
558attributes #0 = { nounwind "amdgpu-waves-per-eu"="1,2" "amdgpu-flat-work-group-size"="1,256" }
559attributes #1 = { nounwind "amdgpu-flat-work-group-size"="1,256" }
560
561; HSAOPT: !0 = !{}
562; HSAOPT: !1 = !{i32 0, i32 257}
563; HSAOPT: !2 = !{i32 0, i32 256}
564
565; NOHSAOPT: !0 = !{i32 0, i32 257}
566; NOHSAOPT: !1 = !{i32 0, i32 256}
567