1; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn-amd-amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope -check-prefixes=GCN,SI %s
2; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn-amd-amdhsa -mcpu=tonga -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope -check-prefixes=GCN,VI %s
3
4; half args should be promoted to float for SI and lower.
5
6; GCN-LABEL: {{^}}load_f16_arg:
7; GCN: s_load_dword [[ARG:s[0-9]+]]
8; GCN: v_mov_b32_e32 [[V_ARG:v[0-9]+]], [[ARG]]
9; GCN: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[V_ARG]]
10define amdgpu_kernel void @load_f16_arg(half addrspace(1)* %out, half %arg) #0 {
11  store half %arg, half addrspace(1)* %out
12  ret void
13}
14
15; GCN-LABEL: {{^}}load_v2f16_arg:
16; GCN: s_load_dword [[ARG:s[0-9]+]]
17; GCN: v_mov_b32_e32 [[V_ARG:v[0-9]+]], [[ARG]]
18; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[V_ARG]]
19define amdgpu_kernel void @load_v2f16_arg(<2 x half> addrspace(1)* %out, <2 x half> %arg) #0 {
20  store <2 x half> %arg, <2 x half> addrspace(1)* %out
21  ret void
22}
23
24; GCN-LABEL: {{^}}load_v3f16_arg:
25; GCN: s_load_dwordx2
26; GCN: s_load_dwordx2
27; GCN-NOT: {buffer|flat|global}}_load_
28
29
30; GCN-NOT: {{flat|global}}_load
31; GCN-DAG: {{flat|global}}_store_dword
32; GCN-DAG: {{flat|global}}_store_short
33; GCN-NOT: {{flat|global}}_store
34; GCN: s_endpgm
35define amdgpu_kernel void @load_v3f16_arg(<3 x half> addrspace(1)* %out, <3 x half> %arg) #0 {
36  store <3 x half> %arg, <3 x half> addrspace(1)* %out
37  ret void
38}
39
40
41; FIXME: Why not one load?
42; GCN-LABEL: {{^}}load_v4f16_arg:
43; GCN-DAG: s_load_dwordx2 s{{\[}}[[ARG0_LO:[0-9]+]]:[[ARG0_HI:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, {{0x2|0x8}}
44; GCN-DAG: v_mov_b32_e32 v[[V_ARG0_LO:[0-9]+]], s[[ARG0_LO]]
45; GCN-DAG: v_mov_b32_e32 v[[V_ARG0_HI:[0-9]+]], s[[ARG0_HI]]
46; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[V_ARG0_LO]]:[[V_ARG0_HI]]{{\]}}
47define amdgpu_kernel void @load_v4f16_arg(<4 x half> addrspace(1)* %out, <4 x half> %arg) #0 {
48  store <4 x half> %arg, <4 x half> addrspace(1)* %out
49  ret void
50}
51
52; GCN-LABEL: {{^}}load_v8f16_arg:
53define amdgpu_kernel void @load_v8f16_arg(<8 x half> addrspace(1)* %out, <8 x half> %arg) #0 {
54  store <8 x half> %arg, <8 x half> addrspace(1)* %out
55  ret void
56}
57
58; GCN-LABEL: {{^}}extload_v2f16_arg:
59define amdgpu_kernel void @extload_v2f16_arg(<2 x float> addrspace(1)* %out, <2 x half> %in) #0 {
60  %fpext = fpext <2 x half> %in to <2 x float>
61  store <2 x float> %fpext, <2 x float> addrspace(1)* %out
62  ret void
63}
64
65; GCN-LABEL: {{^}}extload_f16_to_f32_arg:
66define amdgpu_kernel void @extload_f16_to_f32_arg(float addrspace(1)* %out, half %arg) #0 {
67  %ext = fpext half %arg to float
68  store float %ext, float addrspace(1)* %out
69  ret void
70}
71
72; GCN-LABEL: {{^}}extload_v2f16_to_v2f32_arg:
73define amdgpu_kernel void @extload_v2f16_to_v2f32_arg(<2 x float> addrspace(1)* %out, <2 x half> %arg) #0 {
74  %ext = fpext <2 x half> %arg to <2 x float>
75  store <2 x float> %ext, <2 x float> addrspace(1)* %out
76  ret void
77}
78
79; GCN-LABEL: {{^}}extload_v3f16_to_v3f32_arg:
80; GCN: s_load_dwordx2 s
81; GCN: v_cvt_f32_f16_e32
82; GCN: v_cvt_f32_f16_e32
83; GCN: s_load_dwordx2 s
84; GCN-NOT: _load
85; GCN: v_cvt_f32_f16_e32
86; GCN-NOT: v_cvt_f32_f16
87; GCN-DAG: _store_dwordx3
88; GCN: s_endpgm
89define amdgpu_kernel void @extload_v3f16_to_v3f32_arg(<3 x float> addrspace(1)* %out, <3 x half> %arg) #0 {
90  %ext = fpext <3 x half> %arg to <3 x float>
91  store <3 x float> %ext, <3 x float> addrspace(1)* %out
92  ret void
93}
94
95; GCN-LABEL: {{^}}extload_v4f16_to_v4f32_arg:
96define amdgpu_kernel void @extload_v4f16_to_v4f32_arg(<4 x float> addrspace(1)* %out, <4 x half> %arg) #0 {
97  %ext = fpext <4 x half> %arg to <4 x float>
98  store <4 x float> %ext, <4 x float> addrspace(1)* %out
99  ret void
100}
101
102; GCN-LABEL: {{^}}extload_v8f16_to_v8f32_arg:
103; GCN: s_load_dwordx4
104
105; GCN: v_cvt_f32_f16_e32
106; GCN: v_cvt_f32_f16_e32
107; GCN: v_cvt_f32_f16_e32
108; GCN: v_cvt_f32_f16_e32
109; GCN: v_cvt_f32_f16_e32
110; GCN: v_cvt_f32_f16_e32
111; GCN: v_cvt_f32_f16_e32
112; GCN: v_cvt_f32_f16_e32
113
114; GCN: flat_store_dwordx4
115; GCN: flat_store_dwordx4
116define amdgpu_kernel void @extload_v8f16_to_v8f32_arg(<8 x float> addrspace(1)* %out, <8 x half> %arg) #0 {
117  %ext = fpext <8 x half> %arg to <8 x float>
118  store <8 x float> %ext, <8 x float> addrspace(1)* %out
119  ret void
120}
121
122; GCN-LABEL: {{^}}extload_f16_to_f64_arg:
123; GCN: s_load_dword [[ARG:s[0-9]+]]
124; GCN: v_cvt_f32_f16_e32 v[[ARG_F32:[0-9]+]], [[ARG]]
125; GCN: v_cvt_f64_f32_e32 [[RESULT:v\[[0-9]+:[0-9]+\]]], v[[ARG_F32]]
126; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
127define amdgpu_kernel void @extload_f16_to_f64_arg(double addrspace(1)* %out, half %arg) #0 {
128  %ext = fpext half %arg to double
129  store double %ext, double addrspace(1)* %out
130  ret void
131}
132
133; GCN-LABEL: {{^}}extload_v2f16_to_v2f64_arg:
134; GCN-DAG: s_load_dword s
135; GCN: s_lshr_b32
136
137; GCN-DAG: v_cvt_f32_f16_e32
138; GCN-DAG: v_cvt_f32_f16_e32
139; GCN-DAG: v_cvt_f64_f32_e32
140; GCN-DAG: v_cvt_f64_f32_e32
141; GCN: s_endpgm
142define amdgpu_kernel void @extload_v2f16_to_v2f64_arg(<2 x double> addrspace(1)* %out, <2 x half> %arg) #0 {
143  %ext = fpext <2 x half> %arg to <2 x double>
144  store <2 x double> %ext, <2 x double> addrspace(1)* %out
145  ret void
146}
147
148; GCN-LABEL: {{^}}extload_v3f16_to_v3f64_arg:
149; GCN: s_load_dwordx2 s
150; GCN: s_load_dwordx2 s
151; GCN-DAG: v_cvt_f32_f16_e32
152; GCN-DAG: v_cvt_f32_f16_e32
153; GCN-DAG: v_cvt_f32_f16_e32
154; GCN-DAG: v_cvt_f64_f32_e32
155; GCN-DAG: v_cvt_f64_f32_e32
156; GCN-DAG: v_cvt_f64_f32_e32
157; GCN: s_endpgm
158define amdgpu_kernel void @extload_v3f16_to_v3f64_arg(<3 x double> addrspace(1)* %out, <3 x half> %arg) #0 {
159  %ext = fpext <3 x half> %arg to <3 x double>
160  store <3 x double> %ext, <3 x double> addrspace(1)* %out
161  ret void
162}
163
164; GCN-LABEL: {{^}}extload_v4f16_to_v4f64_arg:
165; GCN: s_load_dwordx2 s
166; GCN: s_load_dwordx2 s
167
168; GCN: v_cvt_f32_f16_e32
169; GCN: v_cvt_f32_f16_e32
170; GCN: v_cvt_f32_f16_e32
171; GCN: v_cvt_f32_f16_e32
172; GCN: v_cvt_f64_f32_e32
173; GCN: v_cvt_f64_f32_e32
174; GCN: v_cvt_f64_f32_e32
175; GCN: v_cvt_f64_f32_e32
176; GCN: s_endpgm
177define amdgpu_kernel void @extload_v4f16_to_v4f64_arg(<4 x double> addrspace(1)* %out, <4 x half> %arg) #0 {
178  %ext = fpext <4 x half> %arg to <4 x double>
179  store <4 x double> %ext, <4 x double> addrspace(1)* %out
180  ret void
181}
182
183; GCN-LABEL: {{^}}extload_v8f16_to_v8f64_arg:
184; GCN: s_load_dwordx2 s
185; GCN: s_load_dwordx4 s
186
187; GCN-DAG: v_cvt_f32_f16_e32
188; GCN-DAG: v_cvt_f32_f16_e32
189; GCN-DAG: v_cvt_f32_f16_e32
190; GCN-DAG: v_cvt_f32_f16_e32
191
192; GCN-DAG: v_cvt_f32_f16_e32
193; GCN-DAG: v_cvt_f32_f16_e32
194; GCN-DAG: v_cvt_f32_f16_e32
195; GCN-DAG: v_cvt_f32_f16_e32
196
197; GCN-DAG: v_cvt_f64_f32_e32
198; GCN-DAG: v_cvt_f64_f32_e32
199; GCN-DAG: v_cvt_f64_f32_e32
200; GCN-DAG: v_cvt_f64_f32_e32
201
202; GCN: v_cvt_f64_f32_e32
203; GCN: v_cvt_f64_f32_e32
204; GCN: v_cvt_f64_f32_e32
205; GCN: v_cvt_f64_f32_e32
206
207; GCN: s_endpgm
208define amdgpu_kernel void @extload_v8f16_to_v8f64_arg(<8 x double> addrspace(1)* %out, <8 x half> %arg) #0 {
209  %ext = fpext <8 x half> %arg to <8 x double>
210  store <8 x double> %ext, <8 x double> addrspace(1)* %out
211  ret void
212}
213
214; GCN-LABEL: {{^}}global_load_store_f16:
215; GCN: flat_load_ushort [[TMP:v[0-9]+]]
216; GCN: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[TMP]]
217define amdgpu_kernel void @global_load_store_f16(half addrspace(1)* %out, half addrspace(1)* %in) #0 {
218  %val = load half, half addrspace(1)* %in
219  store half %val, half addrspace(1)* %out
220  ret void
221}
222
223; GCN-LABEL: {{^}}global_load_store_v2f16:
224; GCN: flat_load_dword [[TMP:v[0-9]+]]
225; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[TMP]]
226define amdgpu_kernel void @global_load_store_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 {
227  %val = load <2 x half>, <2 x half> addrspace(1)* %in
228  store <2 x half> %val, <2 x half> addrspace(1)* %out
229  ret void
230}
231
232; GCN-LABEL: {{^}}global_load_store_v4f16:
233; GCN: flat_load_dwordx2 [[TMP:v\[[0-9]+:[0-9]+\]]]
234; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[TMP]]
235define amdgpu_kernel void @global_load_store_v4f16(<4 x half> addrspace(1)* %in, <4 x half> addrspace(1)* %out) #0 {
236  %val = load <4 x half>, <4 x half> addrspace(1)* %in
237  store <4 x half> %val, <4 x half> addrspace(1)* %out
238  ret void
239}
240
241; GCN-LABEL: {{^}}global_load_store_v8f16:
242; GCN: flat_load_dwordx4 [[TMP:v\[[0-9]+:[0-9]+\]]]
243; GCN: flat_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, [[TMP:v\[[0-9]+:[0-9]+\]]]
244; GCN: s_endpgm
245define amdgpu_kernel void @global_load_store_v8f16(<8 x half> addrspace(1)* %out, <8 x half> addrspace(1)* %in) #0 {
246  %val = load <8 x half>, <8 x half> addrspace(1)* %in
247  store <8 x half> %val, <8 x half> addrspace(1)* %out
248  ret void
249}
250
251; GCN-LABEL: {{^}}global_extload_f16_to_f32:
252; GCN: flat_load_ushort [[LOAD:v[0-9]+]]
253; GCN: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], [[LOAD]]
254; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[CVT]]
255define amdgpu_kernel void @global_extload_f16_to_f32(float addrspace(1)* %out, half addrspace(1)* %in) #0 {
256  %val = load half, half addrspace(1)* %in
257  %cvt = fpext half %val to float
258  store float %cvt, float addrspace(1)* %out
259  ret void
260}
261
262; GCN-LABEL: {{^}}global_extload_v2f16_to_v2f32:
263; GCN: flat_load_dword [[LOAD:v[0-9]+]],
264
265; SI-DAG: v_cvt_f32_f16_e32 v[[CVT0:[0-9]+]], [[LOAD]]
266; SI-DAG: v_lshrrev_b32_e32 [[HI:v[0-9]+]], 16, [[LOAD]]
267
268; SI: v_cvt_f32_f16_e32 v[[CVT1:[0-9]+]], [[HI]]
269
270; VI: v_cvt_f32_f16_e32 v[[CVT0:[0-9]+]], [[LOAD]]
271; VI: v_cvt_f32_f16_sdwa v[[CVT1:[0-9]+]], [[LOAD]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
272
273; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[CVT0]]:[[CVT1]]{{\]}}
274; GCN: s_endpgm
275define amdgpu_kernel void @global_extload_v2f16_to_v2f32(<2 x float> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 {
276  %val = load <2 x half>, <2 x half> addrspace(1)* %in
277  %cvt = fpext <2 x half> %val to <2 x float>
278  store <2 x float> %cvt, <2 x float> addrspace(1)* %out
279  ret void
280}
281
282; GCN-LABEL: {{^}}global_extload_v3f16_to_v3f32:
283define amdgpu_kernel void @global_extload_v3f16_to_v3f32(<3 x float> addrspace(1)* %out, <3 x half> addrspace(1)* %in) #0 {
284  %val = load <3 x half>, <3 x half> addrspace(1)* %in
285  %cvt = fpext <3 x half> %val to <3 x float>
286  store <3 x float> %cvt, <3 x float> addrspace(1)* %out
287  ret void
288}
289
290; GCN-LABEL: {{^}}global_extload_v4f16_to_v4f32:
291define amdgpu_kernel void @global_extload_v4f16_to_v4f32(<4 x float> addrspace(1)* %out, <4 x half> addrspace(1)* %in) #0 {
292  %val = load <4 x half>, <4 x half> addrspace(1)* %in
293  %cvt = fpext <4 x half> %val to <4 x float>
294  store <4 x float> %cvt, <4 x float> addrspace(1)* %out
295  ret void
296}
297
298; GCN-LABEL: {{^}}global_extload_v8f16_to_v8f32:
299define amdgpu_kernel void @global_extload_v8f16_to_v8f32(<8 x float> addrspace(1)* %out, <8 x half> addrspace(1)* %in) #0 {
300  %val = load <8 x half>, <8 x half> addrspace(1)* %in
301  %cvt = fpext <8 x half> %val to <8 x float>
302  store <8 x float> %cvt, <8 x float> addrspace(1)* %out
303  ret void
304}
305
306; GCN-LABEL: {{^}}global_extload_v16f16_to_v16f32:
307; GCN: flat_load_dwordx4
308; GCN: flat_load_dwordx4
309
310; SI: v_cvt_f32_f16_e32
311; SI: v_cvt_f32_f16_e32
312; SI: v_cvt_f32_f16_e32
313; SI: v_cvt_f32_f16_e32
314; SI: v_cvt_f32_f16_e32
315
316; GCN: flat_store_dwordx4
317
318; SI: v_cvt_f32_f16_e32
319; SI: v_cvt_f32_f16_e32
320; SI: v_cvt_f32_f16_e32
321; SI: v_cvt_f32_f16_e32
322; SI: v_cvt_f32_f16_e32
323; SI: v_cvt_f32_f16_e32
324; SI: v_cvt_f32_f16_e32
325; SI: v_cvt_f32_f16_e32
326; SI: v_cvt_f32_f16_e32
327; SI: v_cvt_f32_f16_e32
328; SI: v_cvt_f32_f16_e32
329
330; VI: v_cvt_f32_f16_e32
331; VI: v_cvt_f32_f16_sdwa
332
333
334; GCN: flat_store_dwordx4
335; GCN: flat_store_dwordx4
336; GCN: flat_store_dwordx4
337
338; GCN: s_endpgm
339define amdgpu_kernel void @global_extload_v16f16_to_v16f32(<16 x float> addrspace(1)* %out, <16 x half> addrspace(1)* %in) #0 {
340  %val = load <16 x half>, <16 x half> addrspace(1)* %in
341  %cvt = fpext <16 x half> %val to <16 x float>
342  store <16 x float> %cvt, <16 x float> addrspace(1)* %out
343  ret void
344}
345
346; GCN-LABEL: {{^}}global_extload_f16_to_f64:
347; GCN: flat_load_ushort [[LOAD:v[0-9]+]]
348; GCN: v_cvt_f32_f16_e32 [[CVT0:v[0-9]+]], [[LOAD]]
349; GCN: v_cvt_f64_f32_e32 [[CVT1:v\[[0-9]+:[0-9]+\]]], [[CVT0]]
350; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[CVT1]]
351define amdgpu_kernel void @global_extload_f16_to_f64(double addrspace(1)* %out, half addrspace(1)* %in) #0 {
352  %val = load half, half addrspace(1)* %in
353  %cvt = fpext half %val to double
354  store double %cvt, double addrspace(1)* %out
355  ret void
356}
357
358; GCN-LABEL: {{^}}global_extload_v2f16_to_v2f64:
359; GCN-DAG: flat_load_dword [[LOAD:v[0-9]+]],
360
361; SI-DAG: v_lshrrev_b32_e32 [[HI:v[0-9]+]], 16, [[LOAD]]
362; SI-DAG: v_cvt_f32_f16_e32 v[[CVT0:[0-9]+]], [[LOAD]]
363; SI-DAG: v_cvt_f32_f16_e32 v[[CVT1:[0-9]+]], [[HI]]
364; SI-DAG: v_cvt_f64_f32_e32 v{{\[}}[[CVT2_LO:[0-9]+]]:[[CVT2_HI:[0-9]+]]{{\]}}, v[[CVT0]]
365; SI-DAG: v_cvt_f64_f32_e32 v{{\[}}[[CVT3_LO:[0-9]+]]:[[CVT3_HI:[0-9]+]]{{\]}}, v[[CVT1]]
366
367; VI-DAG: v_cvt_f32_f16_sdwa v[[CVT0:[0-9]+]], [[LOAD]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
368; VI-DAG: v_cvt_f32_f16_e32 v[[CVT1:[0-9]+]], [[LOAD]]
369; VI-DAG: v_cvt_f64_f32_e32 v{{\[}}[[CVT3_LO:[0-9]+]]:[[CVT3_HI:[0-9]+]]{{\]}}, v[[CVT0]]
370; VI-DAG: v_cvt_f64_f32_e32 v{{\[}}[[CVT2_LO:[0-9]+]]:[[CVT2_HI:[0-9]+]]{{\]}}, v[[CVT1]]
371
372; GCN-DAG: flat_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[CVT2_LO]]:[[CVT3_HI]]{{\]}}
373; GCN: s_endpgm
374define amdgpu_kernel void @global_extload_v2f16_to_v2f64(<2 x double> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 {
375  %val = load <2 x half>, <2 x half> addrspace(1)* %in
376  %cvt = fpext <2 x half> %val to <2 x double>
377  store <2 x double> %cvt, <2 x double> addrspace(1)* %out
378  ret void
379}
380
381; GCN-LABEL: {{^}}global_extload_v3f16_to_v3f64:
382
383; XSI: flat_load_dwordx2 [[LOAD:v\[[0-9]+:[0-9]+\]]]
384; XSI: v_cvt_f32_f16_e32
385; XSI: v_cvt_f32_f16_e32
386; XSI-DAG: v_lshrrev_b32_e32 {{v[0-9]+}}, 16, {{v[0-9]+}}
387; XSI: v_cvt_f32_f16_e32
388; XSI-NOT: v_cvt_f32_f16
389
390; XVI: flat_load_dwordx2 [[LOAD:v\[[0-9]+:[0-9]+\]]]
391; XVI: v_cvt_f32_f16_e32
392; XVI: v_cvt_f32_f16_e32
393; XVI: v_cvt_f32_f16_sdwa
394; XVI-NOT: v_cvt_f32_f16
395
396; GCN: flat_load_dwordx2 v{{\[}}[[IN_LO:[0-9]+]]:[[IN_HI:[0-9]+]]
397; GCN: v_cvt_f32_f16_e32
398; GCN: v_cvt_f32_f16_e32
399; SI: v_cvt_f32_f16_e32
400; VI: v_cvt_f32_f16_sdwa
401; GCN-NOT: v_cvt_f32_f16
402
403; GCN: v_cvt_f64_f32_e32
404; GCN: v_cvt_f64_f32_e32
405; GCN: v_cvt_f64_f32_e32
406; GCN-NOT: v_cvt_f64_f32_e32
407
408; GCN-DAG: flat_store_dwordx4
409; GCN-DAG: flat_store_dwordx2
410; GCN: s_endpgm
411define amdgpu_kernel void @global_extload_v3f16_to_v3f64(<3 x double> addrspace(1)* %out, <3 x half> addrspace(1)* %in) #0 {
412  %val = load <3 x half>, <3 x half> addrspace(1)* %in
413  %cvt = fpext <3 x half> %val to <3 x double>
414  store <3 x double> %cvt, <3 x double> addrspace(1)* %out
415  ret void
416}
417
418; GCN-LABEL: {{^}}global_extload_v4f16_to_v4f64:
419define amdgpu_kernel void @global_extload_v4f16_to_v4f64(<4 x double> addrspace(1)* %out, <4 x half> addrspace(1)* %in) #0 {
420  %val = load <4 x half>, <4 x half> addrspace(1)* %in
421  %cvt = fpext <4 x half> %val to <4 x double>
422  store <4 x double> %cvt, <4 x double> addrspace(1)* %out
423  ret void
424}
425
426; GCN-LABEL: {{^}}global_extload_v8f16_to_v8f64:
427define amdgpu_kernel void @global_extload_v8f16_to_v8f64(<8 x double> addrspace(1)* %out, <8 x half> addrspace(1)* %in) #0 {
428  %val = load <8 x half>, <8 x half> addrspace(1)* %in
429  %cvt = fpext <8 x half> %val to <8 x double>
430  store <8 x double> %cvt, <8 x double> addrspace(1)* %out
431  ret void
432}
433
434; GCN-LABEL: {{^}}global_extload_v16f16_to_v16f64:
435define amdgpu_kernel void @global_extload_v16f16_to_v16f64(<16 x double> addrspace(1)* %out, <16 x half> addrspace(1)* %in) #0 {
436  %val = load <16 x half>, <16 x half> addrspace(1)* %in
437  %cvt = fpext <16 x half> %val to <16 x double>
438  store <16 x double> %cvt, <16 x double> addrspace(1)* %out
439  ret void
440}
441
442; GCN-LABEL: {{^}}global_truncstore_f32_to_f16:
443; GCN: flat_load_dword [[LOAD:v[0-9]+]]
444; GCN: v_cvt_f16_f32_e32 [[CVT:v[0-9]+]], [[LOAD]]
445; GCN: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[CVT]]
446define amdgpu_kernel void @global_truncstore_f32_to_f16(half addrspace(1)* %out, float addrspace(1)* %in) #0 {
447  %val = load float, float addrspace(1)* %in
448  %cvt = fptrunc float %val to half
449  store half %cvt, half addrspace(1)* %out
450  ret void
451}
452
453; GCN-LABEL: {{^}}global_truncstore_v2f32_to_v2f16:
454; GCN: flat_load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}
455; GCN-DAG: v_cvt_f16_f32_e32 [[CVT0:v[0-9]+]], v[[LO]]
456
457; SI-DAG: v_cvt_f16_f32_e32 [[CVT1:v[0-9]+]], v[[HI]]
458; SI-DAG: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 16, [[CVT1]]
459; SI:     v_or_b32_e32 [[PACKED:v[0-9]+]], [[CVT0]], [[SHL]]
460
461; VI-DAG: v_cvt_f16_f32_sdwa [[CVT1:v[0-9]+]], v[[HI]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
462; VI:     v_or_b32_e32 [[PACKED:v[0-9]+]], [[CVT0]], [[CVT1]]
463
464; GCN-DAG: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[PACKED]]
465; GCN: s_endpgm
466define amdgpu_kernel void @global_truncstore_v2f32_to_v2f16(<2 x half> addrspace(1)* %out, <2 x float> addrspace(1)* %in) #0 {
467  %val = load <2 x float>, <2 x float> addrspace(1)* %in
468  %cvt = fptrunc <2 x float> %val to <2 x half>
469  store <2 x half> %cvt, <2 x half> addrspace(1)* %out
470  ret void
471}
472
473; GCN-LABEL: {{^}}global_truncstore_v3f32_to_v3f16:
474; GCN: flat_load_dwordx3
475; GCN-DAG: v_cvt_f16_f32_e32
476; SI-DAG:  v_cvt_f16_f32_e32
477; VI-DAG:  v_cvt_f16_f32_sdwa
478; GCN-DAG: v_cvt_f16_f32_e32
479; GCN: flat_store_short
480; GCN: flat_store_dword
481; GCN: s_endpgm
482define amdgpu_kernel void @global_truncstore_v3f32_to_v3f16(<3 x half> addrspace(1)* %out, <3 x float> addrspace(1)* %in) #0 {
483  %val = load <3 x float>, <3 x float> addrspace(1)* %in
484  %cvt = fptrunc <3 x float> %val to <3 x half>
485  store <3 x half> %cvt, <3 x half> addrspace(1)* %out
486  ret void
487}
488
489; GCN-LABEL: {{^}}global_truncstore_v4f32_to_v4f16:
490; GCN: flat_load_dwordx4
491; GCN-DAG: v_cvt_f16_f32_e32
492; SI-DAG:  v_cvt_f16_f32_e32
493; SI-DAG:  v_cvt_f16_f32_e32
494; VI-DAG:  v_cvt_f16_f32_sdwa
495; VI-DAG:  v_cvt_f16_f32_sdwa
496; GCN-DAG: v_cvt_f16_f32_e32
497; GCN: flat_store_dwordx2
498; GCN: s_endpgm
499define amdgpu_kernel void @global_truncstore_v4f32_to_v4f16(<4 x half> addrspace(1)* %out, <4 x float> addrspace(1)* %in) #0 {
500  %val = load <4 x float>, <4 x float> addrspace(1)* %in
501  %cvt = fptrunc <4 x float> %val to <4 x half>
502  store <4 x half> %cvt, <4 x half> addrspace(1)* %out
503  ret void
504}
505
506; GCN-LABEL: {{^}}global_truncstore_v8f32_to_v8f16:
507; GCN: flat_load_dwordx4
508; GCN: flat_load_dwordx4
509; SI:  v_cvt_f16_f32_e32
510; SI:  v_cvt_f16_f32_e32
511; SI:  v_cvt_f16_f32_e32
512; SI:  v_cvt_f16_f32_e32
513; SI:  v_cvt_f16_f32_e32
514; SI:  v_cvt_f16_f32_e32
515; SI:  v_cvt_f16_f32_e32
516; SI:  v_cvt_f16_f32_e32
517; VI-DAG:  v_cvt_f16_f32_e32
518; VI-DAG:  v_cvt_f16_f32_e32
519; VI-DAG:  v_cvt_f16_f32_e32
520; VI-DAG:  v_cvt_f16_f32_e32
521; VI-DAG:  v_cvt_f16_f32_sdwa
522; VI-DAG:  v_cvt_f16_f32_sdwa
523; VI-DAG:  v_cvt_f16_f32_sdwa
524; VI-DAG:  v_cvt_f16_f32_sdwa
525; GCN: flat_store_dwordx4
526; GCN: s_endpgm
527define amdgpu_kernel void @global_truncstore_v8f32_to_v8f16(<8 x half> addrspace(1)* %out, <8 x float> addrspace(1)* %in) #0 {
528  %val = load <8 x float>, <8 x float> addrspace(1)* %in
529  %cvt = fptrunc <8 x float> %val to <8 x half>
530  store <8 x half> %cvt, <8 x half> addrspace(1)* %out
531  ret void
532}
533
534; GCN-LABEL: {{^}}global_truncstore_v16f32_to_v16f16:
535; GCN: flat_load_dwordx4
536; GCN: flat_load_dwordx4
537; GCN: flat_load_dwordx4
538; GCN: flat_load_dwordx4
539; GCN-DAG: v_cvt_f16_f32_e32
540; GCN-DAG: v_cvt_f16_f32_e32
541; GCN-DAG: v_cvt_f16_f32_e32
542; GCN-DAG: v_cvt_f16_f32_e32
543; GCN-DAG: v_cvt_f16_f32_e32
544; GCN-DAG: v_cvt_f16_f32_e32
545; GCN-DAG: v_cvt_f16_f32_e32
546; GCN-DAG: v_cvt_f16_f32_e32
547; GCN-DAG: v_cvt_f16_f32_e32
548; GCN-DAG: v_cvt_f16_f32_e32
549; GCN-DAG: v_cvt_f16_f32_e32
550; GCN-DAG: v_cvt_f16_f32_e32
551; GCN-DAG: v_cvt_f16_f32_e32
552; GCN-DAG: v_cvt_f16_f32_e32
553; GCN-DAG: v_cvt_f16_f32_e32
554; GCN-DAG: v_cvt_f16_f32_e32
555; GCN-DAG: flat_store_dwordx4
556; GCN-DAG: flat_store_dwordx4
557; GCN: s_endpgm
558define amdgpu_kernel void @global_truncstore_v16f32_to_v16f16(<16 x half> addrspace(1)* %out, <16 x float> addrspace(1)* %in) #0 {
559  %val = load <16 x float>, <16 x float> addrspace(1)* %in
560  %cvt = fptrunc <16 x float> %val to <16 x half>
561  store <16 x half> %cvt, <16 x half> addrspace(1)* %out
562  ret void
563}
564
565; FIXME: Unsafe math should fold conversions away
566; GCN-LABEL: {{^}}fadd_f16:
567; SI-DAG: v_cvt_f32_f16_e32 v{{[0-9]+}},
568; SI-DAG: v_cvt_f32_f16_e32 v{{[0-9]+}},
569; SI-DAG: v_cvt_f32_f16_e32 v{{[0-9]+}},
570; SI-DAG: v_cvt_f32_f16_e32 v{{[0-9]+}},
571; SI: v_add_f32
572; GCN: s_endpgm
573define amdgpu_kernel void @fadd_f16(half addrspace(1)* %out, half %a, half %b) #0 {
574   %add = fadd half %a, %b
575   store half %add, half addrspace(1)* %out, align 4
576   ret void
577}
578
579; GCN-LABEL: {{^}}fadd_v2f16:
580; SI: v_add_f32
581; SI: v_add_f32
582; GCN: s_endpgm
583define amdgpu_kernel void @fadd_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %a, <2 x half> %b) #0 {
584  %add = fadd <2 x half> %a, %b
585  store <2 x half> %add, <2 x half> addrspace(1)* %out, align 8
586  ret void
587}
588
589; GCN-LABEL: {{^}}fadd_v4f16:
590; SI: v_add_f32
591; SI: v_add_f32
592; SI: v_add_f32
593; SI: v_add_f32
594; GCN: s_endpgm
595define amdgpu_kernel void @fadd_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %in) #0 {
596  %b_ptr = getelementptr <4 x half>, <4 x half> addrspace(1)* %in, i32 1
597  %a = load <4 x half>, <4 x half> addrspace(1)* %in, align 16
598  %b = load <4 x half>, <4 x half> addrspace(1)* %b_ptr, align 16
599  %result = fadd <4 x half> %a, %b
600  store <4 x half> %result, <4 x half> addrspace(1)* %out, align 16
601  ret void
602}
603
604; GCN-LABEL: {{^}}fadd_v8f16:
605; SI: v_add_f32
606; SI: v_add_f32
607; SI: v_add_f32
608; SI: v_add_f32
609; SI: v_add_f32
610; SI: v_add_f32
611; SI: v_add_f32
612; SI: v_add_f32
613; GCN: s_endpgm
614define amdgpu_kernel void @fadd_v8f16(<8 x half> addrspace(1)* %out, <8 x half> %a, <8 x half> %b) #0 {
615  %add = fadd <8 x half> %a, %b
616  store <8 x half> %add, <8 x half> addrspace(1)* %out, align 32
617  ret void
618}
619
620; GCN-LABEL: {{^}}test_bitcast_from_half:
621; GCN: flat_load_ushort [[TMP:v[0-9]+]]
622; GCN-NOT: [[TMP]]
623; GCN: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[TMP]]
624define amdgpu_kernel void @test_bitcast_from_half(half addrspace(1)* %in, i16 addrspace(1)* %out) #0 {
625  %val = load half, half addrspace(1)* %in
626  %val_int = bitcast half %val to i16
627  store i16 %val_int, i16 addrspace(1)* %out
628  ret void
629}
630
631; GCN-LABEL: {{^}}test_bitcast_to_half:
632; GCN: flat_load_ushort [[TMP:v[0-9]+]]
633; GCN: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[TMP]]
634define amdgpu_kernel void @test_bitcast_to_half(half addrspace(1)* %out, i16 addrspace(1)* %in) #0 {
635  %val = load i16, i16 addrspace(1)* %in
636  %val_fp = bitcast i16 %val to half
637  store half %val_fp, half addrspace(1)* %out
638  ret void
639}
640
641attributes #0 = { nounwind }
642