1; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tahiti -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
2; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
3
4; GCN-LABEL: {{^}}fptoui_f16_to_i16
5; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
6; GCN: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
7; SI:  v_cvt_u32_f32_e32 v[[R_I16:[0-9]+]], v[[A_F32]]
8; VI:  v_cvt_i32_f32_e32 v[[R_I16:[0-9]+]], v[[A_F32]]
9; GCN: buffer_store_short v[[R_I16]]
10; GCN: s_endpgm
11define amdgpu_kernel void @fptoui_f16_to_i16(
12    i16 addrspace(1)* %r,
13    half addrspace(1)* %a) {
14entry:
15  %a.val = load half, half addrspace(1)* %a
16  %r.val = fptoui half %a.val to i16
17  store i16 %r.val, i16 addrspace(1)* %r
18  ret void
19}
20
21; GCN-LABEL: {{^}}fptoui_f16_to_i32
22; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
23; GCN: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
24; GCN: v_cvt_u32_f32_e32 v[[R_I32:[0-9]+]], v[[A_F32]]
25; GCN: buffer_store_dword v[[R_I32]]
26; GCN: s_endpgm
27define amdgpu_kernel void @fptoui_f16_to_i32(
28    i32 addrspace(1)* %r,
29    half addrspace(1)* %a) {
30entry:
31  %a.val = load half, half addrspace(1)* %a
32  %r.val = fptoui half %a.val to i32
33  store i32 %r.val, i32 addrspace(1)* %r
34  ret void
35}
36
37; Need to make sure we promote f16 to f32 when converting f16 to i64. Existing
38; test checks code generated for 'i64 = fp_to_uint f32'.
39
40; GCN-LABEL: {{^}}fptoui_f16_to_i64
41; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
42; GCN: v_mov_b32_e32 v[[R_I64_High:[0-9]+]], 0
43; GCN: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
44; GCN: v_cvt_u32_f32_e32 v[[R_I64_Low:[0-9]+]], v[[A_F32]]
45; GCN: buffer_store_dwordx2 v{{\[}}[[R_I64_Low]]{{\:}}[[R_I64_High]]{{\]}}
46; GCN: s_endpgm
47define amdgpu_kernel void @fptoui_f16_to_i64(
48    i64 addrspace(1)* %r,
49    half addrspace(1)* %a) {
50entry:
51  %a.val = load half, half addrspace(1)* %a
52  %r.val = fptoui half %a.val to i64
53  store i64 %r.val, i64 addrspace(1)* %r
54  ret void
55}
56
57; GCN-LABEL: {{^}}fptoui_v2f16_to_v2i16
58; GCN:     buffer_load_dword v[[A_V2_F16:[0-9]+]]
59
60; SI:     v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
61; SI-DAG: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
62; SI-DAG: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
63; SI:      v_cvt_u32_f32_e32 v[[R_I16_1:[0-9]+]], v[[A_F32_1]]
64; SI:      v_cvt_u32_f32_e32 v[[R_I16_0:[0-9]+]], v[[A_F32_0]]
65; SI:     v_lshlrev_b32_e32 v[[R_I16_HI:[0-9]+]], 16, v[[R_I16_1]]
66; SI:     v_or_b32_e32 v[[R_V2_I16:[0-9]+]], v[[R_I16_0]], v[[R_I16_HI]]
67
68; VI-DAG: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_V2_F16]]
69; VI-DAG: v_cvt_f32_f16_sdwa v[[A_F32_0:[0-9]+]], v[[A_V2_F16]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
70; VI:      v_cvt_i32_f32_e32 v[[R_I16_1:[0-9]+]], v[[A_F32_1]]
71; VI:      v_cvt_i32_f32_sdwa v[[R_I16_0:[0-9]+]], v[[A_F32_0]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
72; VI:     v_or_b32_sdwa v[[R_V2_I16:[0-9]+]], v[[R_I16_1]], v[[R_I16_0]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
73
74; GCN:     buffer_store_dword v[[R_V2_I16]]
75; GCN:     s_endpgm
76
77define amdgpu_kernel void @fptoui_v2f16_to_v2i16(
78    <2 x i16> addrspace(1)* %r,
79    <2 x half> addrspace(1)* %a) {
80entry:
81  %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
82  %r.val = fptoui <2 x half> %a.val to <2 x i16>
83  store <2 x i16> %r.val, <2 x i16> addrspace(1)* %r
84  ret void
85}
86
87; GCN-LABEL: {{^}}fptoui_v2f16_to_v2i32
88; GCN: buffer_load_dword
89; GCN: v_cvt_f32_f16_e32
90; SI: v_cvt_f32_f16_e32
91; VI: v_cvt_f32_f16_sdwa
92; GCN: v_cvt_u32_f32_e32
93; GCN: v_cvt_u32_f32_e32
94; GCN: buffer_store_dwordx2
95; GCN: s_endpgm
96define amdgpu_kernel void @fptoui_v2f16_to_v2i32(
97    <2 x i32> addrspace(1)* %r,
98    <2 x half> addrspace(1)* %a) {
99entry:
100  %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
101  %r.val = fptoui <2 x half> %a.val to <2 x i32>
102  store <2 x i32> %r.val, <2 x i32> addrspace(1)* %r
103  ret void
104}
105
106; Need to make sure we promote f16 to f32 when converting f16 to i64. Existing
107; test checks code generated for 'i64 = fp_to_uint f32'.
108
109; GCN-LABEL: {{^}}fptoui_v2f16_to_v2i64
110; GCN: buffer_load_dword v[[A_F16_0:[0-9]+]]
111; GCN: v_mov_b32_e32 v[[R_I64_1_High:[0-9]+]], 0
112; SI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_F16_0]]
113; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_F16_0]]
114; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
115; SI: v_cvt_u32_f32_e32 v[[R_I64_0_Low:[0-9]+]], v[[A_F32_0]]
116; SI: v_cvt_u32_f32_e32 v[[R_I64_1_Low:[0-9]+]], v[[A_F32_1]]
117; VI: v_cvt_f32_f16_sdwa v[[A_F32_1:[0-9]+]], v[[A_F16_0]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
118; VI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_F16_0]]
119; VI: v_cvt_u32_f32_e32 v[[R_I64_1_Low:[0-9]+]], v[[A_F32_1]]
120; VI: v_cvt_u32_f32_e32 v[[R_I64_0_Low:[0-9]+]], v[[A_F32_0]]
121; GCN: v_mov_b32_e32 v[[R_I64_0_High:[0-9]+]], 0
122; GCN: buffer_store_dwordx4 v{{\[}}[[R_I64_0_Low]]{{\:}}[[R_I64_1_High]]{{\]}}
123; GCN: s_endpgm
124define amdgpu_kernel void @fptoui_v2f16_to_v2i64(
125    <2 x i64> addrspace(1)* %r,
126    <2 x half> addrspace(1)* %a) {
127entry:
128  %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
129  %r.val = fptoui <2 x half> %a.val to <2 x i64>
130  store <2 x i64> %r.val, <2 x i64> addrspace(1)* %r
131  ret void
132}
133