1; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
2
3; Combine on select c, (load x), (load y) -> load (select c, x, y)
4; drops MachinePointerInfo, so it can't be relied on for correctness.
5
6; GCN-LABEL: {{^}}select_ptr_crash_i64_flat:
7; GCN: s_load_dwordx2
8; GCN: s_load_dwordx2
9; GCN: s_load_dwordx2
10
11; GCN: v_cmp_eq_u32
12; GCN: v_cndmask_b32
13; GCN: v_cndmask_b32
14
15; GCN-NOT: load_dword
16; GCN: flat_load_dwordx2
17; GCN-NOT: load_dword
18
19; GCN: flat_store_dwordx2
20define amdgpu_kernel void @select_ptr_crash_i64_flat(i32 %tmp, [8 x i32], i64* %ptr0, [8 x i32], i64* %ptr1, [8 x i32], i64 addrspace(1)* %ptr2) {
21  %tmp2 = icmp eq i32 %tmp, 0
22  %tmp3 = load i64, i64* %ptr0, align 8
23  %tmp4 = load i64, i64* %ptr1, align 8
24  %tmp5 = select i1 %tmp2, i64 %tmp3, i64 %tmp4
25  store i64 %tmp5, i64 addrspace(1)* %ptr2, align 8
26  ret void
27}
28
29; The transform currently doesn't happen for non-addrspace 0, but it
30; should.
31
32; GCN-LABEL: {{^}}select_ptr_crash_i64_global:
33; GCN: s_load_dwordx2
34; GCN: s_load_dwordx2
35; GCN: s_load_dwordx2
36; GCN: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x0{{$}}
37; GCN: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x0{{$}}
38; GCN: v_cndmask_b32
39; GCN: v_cndmask_b32
40; GCN: flat_store_dwordx2
41define amdgpu_kernel void @select_ptr_crash_i64_global(i32 %tmp, [8 x i32], i64 addrspace(1)* %ptr0, [8 x i32], i64 addrspace(1)* %ptr1, [8 x i32], i64 addrspace(1)* %ptr2) {
42  %tmp2 = icmp eq i32 %tmp, 0
43  %tmp3 = load i64, i64 addrspace(1)* %ptr0, align 8
44  %tmp4 = load i64, i64 addrspace(1)* %ptr1, align 8
45  %tmp5 = select i1 %tmp2, i64 %tmp3, i64 %tmp4
46  store i64 %tmp5, i64 addrspace(1)* %ptr2, align 8
47  ret void
48}
49
50; GCN-LABEL: {{^}}select_ptr_crash_i64_local:
51; GCN: ds_read_b64
52; GCN: ds_read_b64
53; GCN: v_cndmask_b32
54; GCN: v_cndmask_b32
55; GCN: flat_store_dwordx2
56define amdgpu_kernel void @select_ptr_crash_i64_local(i32 %tmp, i64 addrspace(3)* %ptr0, i64 addrspace(3)* %ptr1, i64 addrspace(1)* %ptr2) {
57  %tmp2 = icmp eq i32 %tmp, 0
58  %tmp3 = load i64, i64 addrspace(3)* %ptr0, align 8
59  %tmp4 = load i64, i64 addrspace(3)* %ptr1, align 8
60  %tmp5 = select i1 %tmp2, i64 %tmp3, i64 %tmp4
61  store i64 %tmp5, i64 addrspace(1)* %ptr2, align 8
62  ret void
63}
64
65; The transform will break addressing mode matching, so unclear it
66; would be good to do
67
68; GCN-LABEL: {{^}}select_ptr_crash_i64_local_offsets:
69; GCN: ds_read_b64 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}} offset:128
70; GCN: ds_read_b64 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}} offset:512
71; GCN: v_cndmask_b32
72; GCN: v_cndmask_b32
73define amdgpu_kernel void @select_ptr_crash_i64_local_offsets(i32 %tmp, i64 addrspace(3)* %ptr0, i64 addrspace(3)* %ptr1, i64 addrspace(1)* %ptr2) {
74  %tmp2 = icmp eq i32 %tmp, 0
75  %gep0 = getelementptr inbounds i64, i64 addrspace(3)* %ptr0, i64 16
76  %gep1 = getelementptr inbounds i64, i64 addrspace(3)* %ptr1, i64 64
77  %tmp3 = load i64, i64 addrspace(3)* %gep0, align 8
78  %tmp4 = load i64, i64 addrspace(3)* %gep1, align 8
79  %tmp5 = select i1 %tmp2, i64 %tmp3, i64 %tmp4
80  store i64 %tmp5, i64 addrspace(1)* %ptr2, align 8
81  ret void
82}
83