1; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX9 %s
3; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX10 %s
4
5define amdgpu_ps <4 x float> @load_mip_1d(<8 x i32> inreg %rsrc, i16 %s) {
6  ; GFX9-LABEL: name: load_mip_1d
7  ; GFX9: bb.1.main_body:
8  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0
9  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
10  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
11  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
12  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
13  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
14  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
15  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
16  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
17  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
18  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
19  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY [[COPY8]](s32)
20  ; GFX9:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
21  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY9]](s32), [[DEF]](s32)
22  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.1d), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable load 16 from custom "TargetCustom8")
23  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
24  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
25  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
26  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
27  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
28  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
29  ; GFX10-LABEL: name: load_mip_1d
30  ; GFX10: bb.1.main_body:
31  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0
32  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
33  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
34  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
35  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
36  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
37  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
38  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
39  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
40  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
41  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
42  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY [[COPY8]](s32)
43  ; GFX10:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
44  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY9]](s32), [[DEF]](s32)
45  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.1d), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable load 16 from custom "TargetCustom8")
46  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
47  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
48  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
49  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
50  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
51  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
52main_body:
53  %v = call <4 x float> @llvm.amdgcn.image.load.mip.1d.v4f32.i16(i32 15, i16 %s, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
54  ret <4 x float> %v
55}
56
57define amdgpu_ps <4 x float> @load_mip_2d(<8 x i32> inreg %rsrc, i16 %s, i16 %t) {
58  ; GFX9-LABEL: name: load_mip_2d
59  ; GFX9: bb.1.main_body:
60  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1
61  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
62  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
63  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
64  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
65  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
66  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
67  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
68  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
69  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
70  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
71  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
72  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY [[COPY8]](s32)
73  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32)
74  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32)
75  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.2d), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable load 16 from custom "TargetCustom8")
76  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
77  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
78  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
79  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
80  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
81  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
82  ; GFX10-LABEL: name: load_mip_2d
83  ; GFX10: bb.1.main_body:
84  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1
85  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
86  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
87  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
88  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
89  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
90  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
91  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
92  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
93  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
94  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
95  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
96  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY [[COPY8]](s32)
97  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32)
98  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32)
99  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.2d), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable load 16 from custom "TargetCustom8")
100  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
101  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
102  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
103  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
104  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
105  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
106main_body:
107  %v = call <4 x float> @llvm.amdgcn.image.load.mip.2d.v4f32.i16(i32 15, i16 %s, i16 %t, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
108  ret <4 x float> %v
109}
110
111define amdgpu_ps <4 x float> @load_mip_3d(<8 x i32> inreg %rsrc, i16 %s, i16 %t, i16 %u) {
112  ; GFX9-LABEL: name: load_mip_3d
113  ; GFX9: bb.1.main_body:
114  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2
115  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
116  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
117  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
118  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
119  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
120  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
121  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
122  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
123  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
124  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
125  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
126  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
127  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY [[COPY8]](s32)
128  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32)
129  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY11]](s32), [[COPY12]](s32)
130  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY10]](s32)
131  ; GFX9:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
132  ; GFX9:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY13]](s32), [[DEF]](s32)
133  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
134  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.3d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable load 16 from custom "TargetCustom8")
135  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
136  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
137  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
138  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
139  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
140  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
141  ; GFX10-LABEL: name: load_mip_3d
142  ; GFX10: bb.1.main_body:
143  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2
144  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
145  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
146  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
147  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
148  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
149  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
150  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
151  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
152  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
153  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
154  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
155  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
156  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY [[COPY8]](s32)
157  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32)
158  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY11]](s32), [[COPY12]](s32)
159  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY10]](s32)
160  ; GFX10:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
161  ; GFX10:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY13]](s32), [[DEF]](s32)
162  ; GFX10:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
163  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.3d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable load 16 from custom "TargetCustom8")
164  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
165  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
166  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
167  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
168  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
169  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
170main_body:
171  %v = call <4 x float> @llvm.amdgcn.image.load.mip.3d.v4f32.i16(i32 15, i16 %s, i16 %t, i16 %u, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
172  ret <4 x float> %v
173}
174
175define amdgpu_ps <4 x float> @load_mip_1darray(<8 x i32> inreg %rsrc, i16 %s, i16 %t) {
176  ; GFX9-LABEL: name: load_mip_1darray
177  ; GFX9: bb.1.main_body:
178  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1
179  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
180  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
181  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
182  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
183  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
184  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
185  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
186  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
187  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
188  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
189  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
190  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY [[COPY8]](s32)
191  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32)
192  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32)
193  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.1darray), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable load 16 from custom "TargetCustom8")
194  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
195  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
196  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
197  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
198  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
199  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
200  ; GFX10-LABEL: name: load_mip_1darray
201  ; GFX10: bb.1.main_body:
202  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1
203  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
204  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
205  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
206  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
207  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
208  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
209  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
210  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
211  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
212  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
213  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
214  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY [[COPY8]](s32)
215  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32)
216  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32)
217  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.1darray), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable load 16 from custom "TargetCustom8")
218  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
219  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
220  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
221  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
222  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
223  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
224main_body:
225  %v = call <4 x float> @llvm.amdgcn.image.load.mip.1darray.v4f32.i16(i32 15, i16 %s, i16 %t, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
226  ret <4 x float> %v
227}
228
229define amdgpu_ps <4 x float> @load_mip_2darray(<8 x i32> inreg %rsrc, i16 %s, i16 %t, i16 %u) {
230  ; GFX9-LABEL: name: load_mip_2darray
231  ; GFX9: bb.1.main_body:
232  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2
233  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
234  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
235  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
236  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
237  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
238  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
239  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
240  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
241  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
242  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
243  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
244  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
245  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY [[COPY8]](s32)
246  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32)
247  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY11]](s32), [[COPY12]](s32)
248  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY10]](s32)
249  ; GFX9:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
250  ; GFX9:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY13]](s32), [[DEF]](s32)
251  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
252  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.2darray), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable load 16 from custom "TargetCustom8")
253  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
254  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
255  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
256  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
257  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
258  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
259  ; GFX10-LABEL: name: load_mip_2darray
260  ; GFX10: bb.1.main_body:
261  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2
262  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
263  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
264  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
265  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
266  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
267  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
268  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
269  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
270  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
271  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
272  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
273  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
274  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY [[COPY8]](s32)
275  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32)
276  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY11]](s32), [[COPY12]](s32)
277  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY10]](s32)
278  ; GFX10:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
279  ; GFX10:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY13]](s32), [[DEF]](s32)
280  ; GFX10:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
281  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.2darray), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable load 16 from custom "TargetCustom8")
282  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
283  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
284  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
285  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
286  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
287  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
288main_body:
289  %v = call <4 x float> @llvm.amdgcn.image.load.mip.2darray.v4f32.i16(i32 15, i16 %s, i16 %t, i16 %u, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
290  ret <4 x float> %v
291}
292
293define amdgpu_ps <4 x float> @load_mip_cube(<8 x i32> inreg %rsrc, i16 %s, i16 %t, i16 %u) {
294  ; GFX9-LABEL: name: load_mip_cube
295  ; GFX9: bb.1.main_body:
296  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2
297  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
298  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
299  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
300  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
301  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
302  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
303  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
304  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
305  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
306  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
307  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
308  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
309  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY [[COPY8]](s32)
310  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32)
311  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY11]](s32), [[COPY12]](s32)
312  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY10]](s32)
313  ; GFX9:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
314  ; GFX9:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY13]](s32), [[DEF]](s32)
315  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
316  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.cube), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable load 16 from custom "TargetCustom8")
317  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
318  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
319  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
320  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
321  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
322  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
323  ; GFX10-LABEL: name: load_mip_cube
324  ; GFX10: bb.1.main_body:
325  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2
326  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
327  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
328  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
329  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
330  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
331  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
332  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
333  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
334  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
335  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
336  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
337  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
338  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY [[COPY8]](s32)
339  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32)
340  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY11]](s32), [[COPY12]](s32)
341  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY10]](s32)
342  ; GFX10:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
343  ; GFX10:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY13]](s32), [[DEF]](s32)
344  ; GFX10:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
345  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.cube), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable load 16 from custom "TargetCustom8")
346  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
347  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
348  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
349  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
350  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
351  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
352main_body:
353  %v = call <4 x float> @llvm.amdgcn.image.load.mip.cube.v4f32.i16(i32 15, i16 %s, i16 %t, i16 %u, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
354  ret <4 x float> %v
355}
356
357define amdgpu_ps void @store_mip_1d(<8 x i32> inreg %rsrc, <4 x float> %vdata, i16 %s) {
358  ; GFX9-LABEL: name: store_mip_1d
359  ; GFX9: bb.1.main_body:
360  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
361  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
362  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
363  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
364  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
365  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
366  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
367  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
368  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
369  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
370  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
371  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
372  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
373  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
374  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
375  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
376  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
377  ; GFX9:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
378  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY13]](s32), [[DEF]](s32)
379  ; GFX9:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.1d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable store 16 into custom "TargetCustom8")
380  ; GFX9:   S_ENDPGM 0
381  ; GFX10-LABEL: name: store_mip_1d
382  ; GFX10: bb.1.main_body:
383  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
384  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
385  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
386  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
387  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
388  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
389  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
390  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
391  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
392  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
393  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
394  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
395  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
396  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
397  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
398  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
399  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
400  ; GFX10:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
401  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY13]](s32), [[DEF]](s32)
402  ; GFX10:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.1d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable store 16 into custom "TargetCustom8")
403  ; GFX10:   S_ENDPGM 0
404main_body:
405  call void @llvm.amdgcn.image.store.mip.1d.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
406  ret void
407}
408
409define amdgpu_ps void @store_mip_2d(<8 x i32> inreg %rsrc, <4 x float> %vdata, i16 %s, i16 %t) {
410  ; GFX9-LABEL: name: store_mip_2d
411  ; GFX9: bb.1.main_body:
412  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
413  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
414  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
415  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
416  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
417  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
418  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
419  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
420  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
421  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
422  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
423  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
424  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
425  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
426  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
427  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
428  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
429  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
430  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
431  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32)
432  ; GFX9:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.2d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable store 16 into custom "TargetCustom8")
433  ; GFX9:   S_ENDPGM 0
434  ; GFX10-LABEL: name: store_mip_2d
435  ; GFX10: bb.1.main_body:
436  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
437  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
438  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
439  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
440  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
441  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
442  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
443  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
444  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
445  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
446  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
447  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
448  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
449  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
450  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
451  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
452  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
453  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
454  ; GFX10:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
455  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32)
456  ; GFX10:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.2d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable store 16 into custom "TargetCustom8")
457  ; GFX10:   S_ENDPGM 0
458main_body:
459  call void @llvm.amdgcn.image.store.mip.2d.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, i16 %t, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
460  ret void
461}
462
463define amdgpu_ps void @store_mip_3d(<8 x i32> inreg %rsrc, <4 x float> %vdata, i16 %s, i16 %t, i16 %u) {
464  ; GFX9-LABEL: name: store_mip_3d
465  ; GFX9: bb.1.main_body:
466  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
467  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
468  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
469  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
470  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
471  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
472  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
473  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
474  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
475  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
476  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
477  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
478  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
479  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
480  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
481  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr6
482  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
483  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
484  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
485  ; GFX9:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
486  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
487  ; GFX9:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
488  ; GFX9:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
489  ; GFX9:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY17]](s32), [[DEF]](s32)
490  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
491  ; GFX9:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.3d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable store 16 into custom "TargetCustom8")
492  ; GFX9:   S_ENDPGM 0
493  ; GFX10-LABEL: name: store_mip_3d
494  ; GFX10: bb.1.main_body:
495  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
496  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
497  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
498  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
499  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
500  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
501  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
502  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
503  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
504  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
505  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
506  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
507  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
508  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
509  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
510  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr6
511  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
512  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
513  ; GFX10:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
514  ; GFX10:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
515  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
516  ; GFX10:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
517  ; GFX10:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
518  ; GFX10:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY17]](s32), [[DEF]](s32)
519  ; GFX10:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
520  ; GFX10:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.3d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable store 16 into custom "TargetCustom8")
521  ; GFX10:   S_ENDPGM 0
522main_body:
523  call void @llvm.amdgcn.image.store.mip.3d.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, i16 %t, i16 %u, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
524  ret void
525}
526
527define amdgpu_ps void @store_mip_1darray(<8 x i32> inreg %rsrc, <4 x float> %vdata, i16 %s, i16 %t) {
528  ; GFX9-LABEL: name: store_mip_1darray
529  ; GFX9: bb.1.main_body:
530  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
531  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
532  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
533  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
534  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
535  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
536  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
537  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
538  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
539  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
540  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
541  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
542  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
543  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
544  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
545  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
546  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
547  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
548  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
549  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32)
550  ; GFX9:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.1darray), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable store 16 into custom "TargetCustom8")
551  ; GFX9:   S_ENDPGM 0
552  ; GFX10-LABEL: name: store_mip_1darray
553  ; GFX10: bb.1.main_body:
554  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
555  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
556  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
557  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
558  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
559  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
560  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
561  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
562  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
563  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
564  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
565  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
566  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
567  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
568  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
569  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
570  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
571  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
572  ; GFX10:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
573  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32)
574  ; GFX10:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.1darray), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable store 16 into custom "TargetCustom8")
575  ; GFX10:   S_ENDPGM 0
576main_body:
577  call void @llvm.amdgcn.image.store.mip.1darray.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, i16 %t, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
578  ret void
579}
580
581define amdgpu_ps void @store_mip_2darray(<8 x i32> inreg %rsrc, <4 x float> %vdata, i16 %s, i16 %t, i16 %u) {
582  ; GFX9-LABEL: name: store_mip_2darray
583  ; GFX9: bb.1.main_body:
584  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
585  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
586  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
587  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
588  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
589  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
590  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
591  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
592  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
593  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
594  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
595  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
596  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
597  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
598  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
599  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr6
600  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
601  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
602  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
603  ; GFX9:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
604  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
605  ; GFX9:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
606  ; GFX9:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
607  ; GFX9:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY17]](s32), [[DEF]](s32)
608  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
609  ; GFX9:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.2darray), [[BUILD_VECTOR1]](<4 x s32>), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable store 16 into custom "TargetCustom8")
610  ; GFX9:   S_ENDPGM 0
611  ; GFX10-LABEL: name: store_mip_2darray
612  ; GFX10: bb.1.main_body:
613  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
614  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
615  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
616  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
617  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
618  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
619  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
620  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
621  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
622  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
623  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
624  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
625  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
626  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
627  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
628  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr6
629  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
630  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
631  ; GFX10:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
632  ; GFX10:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
633  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
634  ; GFX10:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
635  ; GFX10:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
636  ; GFX10:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY17]](s32), [[DEF]](s32)
637  ; GFX10:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
638  ; GFX10:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.2darray), [[BUILD_VECTOR1]](<4 x s32>), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable store 16 into custom "TargetCustom8")
639  ; GFX10:   S_ENDPGM 0
640main_body:
641  call void @llvm.amdgcn.image.store.mip.2darray.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, i16 %t, i16 %u, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
642  ret void
643}
644
645define amdgpu_ps void @store_mip_cube(<8 x i32> inreg %rsrc, <4 x float> %vdata, i16 %s, i16 %t, i16 %u) {
646  ; GFX9-LABEL: name: store_mip_cube
647  ; GFX9: bb.1.main_body:
648  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
649  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
650  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
651  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
652  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
653  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
654  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
655  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
656  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
657  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
658  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
659  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
660  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
661  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
662  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
663  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr6
664  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
665  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
666  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
667  ; GFX9:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
668  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
669  ; GFX9:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
670  ; GFX9:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
671  ; GFX9:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY17]](s32), [[DEF]](s32)
672  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
673  ; GFX9:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.cube), [[BUILD_VECTOR1]](<4 x s32>), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable store 16 into custom "TargetCustom8")
674  ; GFX9:   S_ENDPGM 0
675  ; GFX10-LABEL: name: store_mip_cube
676  ; GFX10: bb.1.main_body:
677  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
678  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
679  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
680  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
681  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
682  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
683  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
684  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
685  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
686  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
687  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
688  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
689  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
690  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
691  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
692  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr6
693  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
694  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
695  ; GFX10:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
696  ; GFX10:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
697  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
698  ; GFX10:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
699  ; GFX10:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
700  ; GFX10:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY17]](s32), [[DEF]](s32)
701  ; GFX10:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
702  ; GFX10:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.cube), [[BUILD_VECTOR1]](<4 x s32>), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable store 16 into custom "TargetCustom8")
703  ; GFX10:   S_ENDPGM 0
704main_body:
705  call void @llvm.amdgcn.image.store.mip.cube.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, i16 %t, i16 %u, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
706  ret void
707}
708
709declare <4 x float> @llvm.amdgcn.image.load.mip.1d.v4f32.i16(i32 immarg, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #0
710declare <4 x float> @llvm.amdgcn.image.load.mip.2d.v4f32.i16(i32 immarg, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #0
711declare <4 x float> @llvm.amdgcn.image.load.mip.3d.v4f32.i16(i32 immarg, i16, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #0
712declare <4 x float> @llvm.amdgcn.image.load.mip.1darray.v4f32.i16(i32 immarg, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #0
713declare <4 x float> @llvm.amdgcn.image.load.mip.2darray.v4f32.i16(i32 immarg, i16, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #0
714declare <4 x float> @llvm.amdgcn.image.load.mip.cube.v4f32.i16(i32 immarg, i16, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #0
715declare void @llvm.amdgcn.image.store.mip.1d.v4f32.i16(<4 x float>, i32 immarg, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1
716declare void @llvm.amdgcn.image.store.mip.2d.v4f32.i16(<4 x float>, i32 immarg, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1
717declare void @llvm.amdgcn.image.store.mip.3d.v4f32.i16(<4 x float>, i32 immarg, i16, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1
718declare void @llvm.amdgcn.image.store.mip.cube.v4f32.i16(<4 x float>, i32 immarg, i16, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1
719declare void @llvm.amdgcn.image.store.mip.1darray.v4f32.i16(<4 x float>, i32 immarg, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1
720declare void @llvm.amdgcn.image.store.mip.2darray.v4f32.i16(<4 x float>, i32 immarg, i16, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1
721
722attributes #0 = { nounwind readonly }
723attributes #1 = { nounwind writeonly }
724