1; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx908 -stop-after=instruction-select -verify-machineinstrs -o - %s | FileCheck %s
3
4; Natural mapping
5define amdgpu_ps void @struct_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, <4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
6  ; CHECK-LABEL: name: struct_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
7  ; CHECK: bb.1 (%ir-block.0):
8  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
9  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
10  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
11  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
12  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
13  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
14  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
15  ; CHECK:   [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
16  ; CHECK:   [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
17  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
18  ; CHECK:   [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
19  ; CHECK:   BUFFER_ATOMIC_ADD_F32_BOTHEN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom "TargetCustom7", align 1, addrspace 4)
20  ; CHECK:   S_ENDPGM 0
21  %ret = call float @llvm.amdgcn.struct.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
22  ret void
23}
24
25define amdgpu_ps void @struct_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_plus4095__sgpr_soffset(float %val, <4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
26  ; CHECK-LABEL: name: struct_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_plus4095__sgpr_soffset
27  ; CHECK: bb.1 (%ir-block.0):
28  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
29  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
30  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
31  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
32  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
33  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
34  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
35  ; CHECK:   [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
36  ; CHECK:   [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
37  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
38  ; CHECK:   [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
39  ; CHECK:   BUFFER_ATOMIC_ADD_F32_BOTHEN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 4095, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom "TargetCustom7" + 4095, align 1, addrspace 4)
40  ; CHECK:   S_ENDPGM 0
41  %voffset.add = add i32 %voffset, 4095
42  %ret = call float @llvm.amdgcn.struct.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0)
43  ret void
44}
45
46define amdgpu_ps void @struct_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__4095_voffset__sgpr_soffset(float %val, <4 x i32> inreg %rsrc, i32 %vindex, i32 inreg %soffset) {
47  ; CHECK-LABEL: name: struct_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__4095_voffset__sgpr_soffset
48  ; CHECK: bb.1 (%ir-block.0):
49  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
50  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
51  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
52  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
53  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
54  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
55  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
56  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
57  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
58  ; CHECK:   BUFFER_ATOMIC_ADD_F32_IDXEN [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom "TargetCustom7" + 4095, align 1, addrspace 4)
59  ; CHECK:   S_ENDPGM 0
60  %ret = call float @llvm.amdgcn.struct.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 %vindex, i32 4095, i32 %soffset, i32 0)
61  ret void
62}
63
64; Natural mapping, no voffset
65define amdgpu_ps void @struct_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset(float %val, <4 x i32> inreg %rsrc, i32 %vindex, i32 inreg %soffset) {
66  ; CHECK-LABEL: name: struct_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset
67  ; CHECK: bb.1 (%ir-block.0):
68  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
69  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
70  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
71  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
72  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
73  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
74  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
75  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
76  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
77  ; CHECK:   BUFFER_ATOMIC_ADD_F32_IDXEN [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom "TargetCustom7", align 1, addrspace 4)
78  ; CHECK:   S_ENDPGM 0
79  %ret = call float @llvm.amdgcn.struct.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 %soffset, i32 0)
80  ret void
81}
82
83; All register operands need legalization
84define amdgpu_ps void @struct_buffer_atomic_add_f32_noret__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset(float inreg %val, <4 x i32> %rsrc, i32 inreg %vindex, i32 inreg %voffset, i32 %soffset) {
85  ; CHECK-LABEL: name: struct_buffer_atomic_add_f32_noret__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset
86  ; CHECK: bb.1 (%ir-block.0):
87  ; CHECK:   successors: %bb.2(0x80000000)
88  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
89  ; CHECK:   [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
90  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
91  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
92  ; CHECK:   [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2
93  ; CHECK:   [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3
94  ; CHECK:   [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
95  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
96  ; CHECK:   [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4
97  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
98  ; CHECK:   [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
99  ; CHECK:   [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
100  ; CHECK:   [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
101  ; CHECK:   [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
102  ; CHECK:   [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
103  ; CHECK:   [[S_MOV_B64_term:%[0-9]+]]:sreg_64_xexec = S_MOV_B64_term $exec
104  ; CHECK: bb.2:
105  ; CHECK:   successors: %bb.3(0x40000000), %bb.2(0x40000000)
106  ; CHECK:   [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY11]].sub0, implicit $exec
107  ; CHECK:   [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY11]].sub1, implicit $exec
108  ; CHECK:   [[REG_SEQUENCE1:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1
109  ; CHECK:   [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE1]], [[COPY11]], implicit $exec
110  ; CHECK:   [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]].sub0, implicit $exec
111  ; CHECK:   [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]].sub1, implicit $exec
112  ; CHECK:   [[REG_SEQUENCE2:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_2]], %subreg.sub0, [[V_READFIRSTLANE_B32_3]], %subreg.sub1
113  ; CHECK:   [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY12]], implicit $exec
114  ; CHECK:   [[S_AND_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc
115  ; CHECK:   [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
116  ; CHECK:   [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
117  ; CHECK:   [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec
118  ; CHECK:   [[S_AND_B64_1:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U32_e64_]], [[S_AND_B64_]], implicit-def $scc
119  ; CHECK:   [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
120  ; CHECK:   BUFFER_ATOMIC_ADD_F32_BOTHEN [[COPY8]], [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom "TargetCustom7", align 1, addrspace 4)
121  ; CHECK:   [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_1]], implicit-def $exec, implicit-def $scc, implicit $exec
122  ; CHECK:   $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc
123  ; CHECK:   S_CBRANCH_EXECNZ %bb.2, implicit $exec
124  ; CHECK: bb.3:
125  ; CHECK:   successors: %bb.4(0x80000000)
126  ; CHECK:   $exec = S_MOV_B64_term [[S_MOV_B64_term]]
127  ; CHECK: bb.4:
128  ; CHECK:   S_ENDPGM 0
129  %ret = call float @llvm.amdgcn.struct.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
130  ret void
131}
132
133; All register operands need legalization, no voffset
134define amdgpu_ps void @struct_buffer_atomic_add_f32_noret__sgpr_val__vgpr_rsrc__0_voffset__vgpr_soffset(float inreg %val, <4 x i32> %rsrc, i32 inreg %vindex, i32 %soffset) {
135  ; CHECK-LABEL: name: struct_buffer_atomic_add_f32_noret__sgpr_val__vgpr_rsrc__0_voffset__vgpr_soffset
136  ; CHECK: bb.1 (%ir-block.0):
137  ; CHECK:   successors: %bb.2(0x80000000)
138  ; CHECK:   liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
139  ; CHECK:   [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
140  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
141  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
142  ; CHECK:   [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2
143  ; CHECK:   [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3
144  ; CHECK:   [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
145  ; CHECK:   [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4
146  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
147  ; CHECK:   [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
148  ; CHECK:   [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
149  ; CHECK:   [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
150  ; CHECK:   [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
151  ; CHECK:   [[S_MOV_B64_term:%[0-9]+]]:sreg_64_xexec = S_MOV_B64_term $exec
152  ; CHECK: bb.2:
153  ; CHECK:   successors: %bb.3(0x40000000), %bb.2(0x40000000)
154  ; CHECK:   [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]].sub0, implicit $exec
155  ; CHECK:   [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]].sub1, implicit $exec
156  ; CHECK:   [[REG_SEQUENCE1:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1
157  ; CHECK:   [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE1]], [[COPY9]], implicit $exec
158  ; CHECK:   [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]].sub0, implicit $exec
159  ; CHECK:   [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]].sub1, implicit $exec
160  ; CHECK:   [[REG_SEQUENCE2:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_2]], %subreg.sub0, [[V_READFIRSTLANE_B32_3]], %subreg.sub1
161  ; CHECK:   [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY10]], implicit $exec
162  ; CHECK:   [[S_AND_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc
163  ; CHECK:   [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
164  ; CHECK:   [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
165  ; CHECK:   [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec
166  ; CHECK:   [[S_AND_B64_1:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U32_e64_]], [[S_AND_B64_]], implicit-def $scc
167  ; CHECK:   BUFFER_ATOMIC_ADD_F32_IDXEN [[COPY7]], [[COPY8]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom "TargetCustom7", align 1, addrspace 4)
168  ; CHECK:   [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_1]], implicit-def $exec, implicit-def $scc, implicit $exec
169  ; CHECK:   $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc
170  ; CHECK:   S_CBRANCH_EXECNZ %bb.2, implicit $exec
171  ; CHECK: bb.3:
172  ; CHECK:   successors: %bb.4(0x80000000)
173  ; CHECK:   $exec = S_MOV_B64_term [[S_MOV_B64_term]]
174  ; CHECK: bb.4:
175  ; CHECK:   S_ENDPGM 0
176  %ret = call float @llvm.amdgcn.struct.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 %soffset, i32 0)
177  ret void
178}
179
180; Natural mapping + slc
181define amdgpu_ps void @struct_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc(float %val, <4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
182  ; CHECK-LABEL: name: struct_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc
183  ; CHECK: bb.1 (%ir-block.0):
184  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
185  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
186  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
187  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
188  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
189  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
190  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
191  ; CHECK:   [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
192  ; CHECK:   [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
193  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
194  ; CHECK:   [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
195  ; CHECK:   BUFFER_ATOMIC_ADD_F32_BOTHEN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store 4 on custom "TargetCustom7", align 1, addrspace 4)
196  ; CHECK:   S_ENDPGM 0
197  %ret = call float @llvm.amdgcn.struct.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 2)
198  ret void
199}
200
201define amdgpu_ps void @struct_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset_slc(float %val, <4 x i32> inreg %rsrc, i32 %vindex, i32 inreg %soffset) {
202  ; CHECK-LABEL: name: struct_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset_slc
203  ; CHECK: bb.1 (%ir-block.0):
204  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
205  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
206  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
207  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
208  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
209  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
210  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
211  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
212  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
213  ; CHECK:   BUFFER_ATOMIC_ADD_F32_IDXEN [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 0, 1, implicit $exec :: (volatile dereferenceable load store 4 on custom "TargetCustom7", align 1, addrspace 4)
214  ; CHECK:   S_ENDPGM 0
215  %ret = call float @llvm.amdgcn.struct.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 %soffset, i32 2)
216  ret void
217}
218
219define amdgpu_ps void @struct_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(<2 x half> %val, <4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
220  ; CHECK-LABEL: name: struct_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
221  ; CHECK: bb.1 (%ir-block.0):
222  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
223  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
224  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
225  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
226  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
227  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
228  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
229  ; CHECK:   [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
230  ; CHECK:   [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
231  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
232  ; CHECK:   [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
233  ; CHECK:   BUFFER_ATOMIC_PK_ADD_F16_BOTHEN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom "TargetCustom7", align 1, addrspace 4)
234  ; CHECK:   S_ENDPGM 0
235  %ret = call <2 x half> @llvm.amdgcn.struct.buffer.atomic.fadd.v2f16(<2 x half> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
236  ret void
237}
238
239define amdgpu_ps void @struct_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset(<2 x half> %val, <4 x i32> inreg %rsrc, i32 %vindex, i32 inreg %soffset) {
240  ; CHECK-LABEL: name: struct_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset
241  ; CHECK: bb.1 (%ir-block.0):
242  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
243  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
244  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
245  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
246  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
247  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
248  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
249  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
250  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
251  ; CHECK:   BUFFER_ATOMIC_PK_ADD_F16_IDXEN [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom "TargetCustom7", align 1, addrspace 4)
252  ; CHECK:   S_ENDPGM 0
253  %ret = call <2 x half> @llvm.amdgcn.struct.buffer.atomic.fadd.v2f16(<2 x half> %val, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 %soffset, i32 0)
254  ret void
255}
256
257declare float @llvm.amdgcn.struct.buffer.atomic.fadd.f32(float, <4 x i32>, i32, i32, i32, i32 immarg) #0
258declare <2 x half> @llvm.amdgcn.struct.buffer.atomic.fadd.v2f16(<2 x half>, <4 x i32>, i32, i32, i32, i32 immarg) #0
259
260attributes #0 = { nounwind }
261