1; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
2; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
3
4; GCN-LABEL: {{^}}fptosi_f16_to_i16
5; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
6; GCN: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
7; GCN: v_cvt_i32_f32_e32 v[[R_I16:[0-9]+]], v[[A_F32]]
8; GCN: buffer_store_short v[[R_I16]]
9; GCN: s_endpgm
10define amdgpu_kernel void @fptosi_f16_to_i16(
11    i16 addrspace(1)* %r,
12    half addrspace(1)* %a) {
13entry:
14  %a.val = load half, half addrspace(1)* %a
15  %r.val = fptosi half %a.val to i16
16  store i16 %r.val, i16 addrspace(1)* %r
17  ret void
18}
19
20; GCN-LABEL: {{^}}fptosi_f16_to_i32
21; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
22; GCN: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
23; GCN: v_cvt_i32_f32_e32 v[[R_I32:[0-9]+]], v[[A_F32]]
24; GCN: buffer_store_dword v[[R_I32]]
25; GCN: s_endpgm
26define amdgpu_kernel void @fptosi_f16_to_i32(
27    i32 addrspace(1)* %r,
28    half addrspace(1)* %a) {
29entry:
30  %a.val = load half, half addrspace(1)* %a
31  %r.val = fptosi half %a.val to i32
32  store i32 %r.val, i32 addrspace(1)* %r
33  ret void
34}
35
36; Need to make sure we promote f16 to f32 when converting f16 to i64. Existing
37; test checks code generated for 'i64 = fp_to_sint f32'.
38
39; GCN-LABEL: {{^}}fptosi_f16_to_i64
40; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
41; GCN: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
42; GCN: v_cvt_i32_f32_e32 v[[R_I64_Low:[0-9]+]], v[[A_F32]]
43; GCN: v_ashrrev_i32_e32 v[[R_I64_High:[0-9]+]], 31, v[[R_I64_Low]]
44; GCN: buffer_store_dwordx2 v{{\[}}[[R_I64_Low]]{{\:}}[[R_I64_High]]{{\]}}
45; GCN: s_endpgm
46define amdgpu_kernel void @fptosi_f16_to_i64(
47    i64 addrspace(1)* %r,
48    half addrspace(1)* %a) {
49entry:
50  %a.val = load half, half addrspace(1)* %a
51  %r.val = fptosi half %a.val to i64
52  store i64 %r.val, i64 addrspace(1)* %r
53  ret void
54}
55
56; GCN-LABEL: {{^}}fptosi_v2f16_to_v2i16
57; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
58
59; SI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
60; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
61; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
62; SI: v_cvt_i32_f32_e32 v[[R_I16_0:[0-9]+]], v[[A_F32_0]]
63; SI-DAG: v_cvt_i32_f32_e32 v[[R_I16_1:[0-9]+]], v[[A_F32_1]]
64; SI-DAG: v_and_b32_e32 v[[R_I16_LO:[0-9]+]], 0xffff, v[[R_I16_0]]
65; SI: v_lshlrev_b32_e32 v[[R_I16_HI:[0-9]+]], 16, v[[R_I16_1]]
66; SI: v_or_b32_e32 v[[R_V2_I16:[0-9]+]], v[[R_I16_LO]], v[[R_I16_HI]]
67
68; VI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
69; VI: v_cvt_f32_f16_sdwa v[[A_F32_1:[0-9]+]], v[[A_V2_F16]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
70; VI: v_cvt_i32_f32_e32 v[[R_I16_0:[0-9]+]], v[[A_F32_0]]
71; VI: v_cvt_i32_f32_sdwa v[[R_I16_1:[0-9]+]], v[[A_F32_1]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
72; VI: v_or_b32_sdwa v[[R_V2_I16:[0-9]+]], v[[R_I16_0]], v[[R_I16_1]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
73
74; GCN: buffer_store_dword v[[R_V2_I16]]
75; GCN: s_endpgm
76
77define amdgpu_kernel void @fptosi_v2f16_to_v2i16(
78    <2 x i16> addrspace(1)* %r,
79    <2 x half> addrspace(1)* %a) {
80entry:
81  %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
82  %r.val = fptosi <2 x half> %a.val to <2 x i16>
83  store <2 x i16> %r.val, <2 x i16> addrspace(1)* %r
84  ret void
85}
86
87; GCN-LABEL: {{^}}fptosi_v2f16_to_v2i32
88; GCN: buffer_load_dword
89; GCN: v_cvt_f32_f16_e32
90; SI: v_cvt_f32_f16_e32
91; VI: v_cvt_f32_f16_sdwa
92; GCN: v_cvt_i32_f32_e32
93; GCN: v_cvt_i32_f32_e32
94; GCN: buffer_store_dwordx2
95; GCN: s_endpgm
96define amdgpu_kernel void @fptosi_v2f16_to_v2i32(
97    <2 x i32> addrspace(1)* %r,
98    <2 x half> addrspace(1)* %a) {
99entry:
100  %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
101  %r.val = fptosi <2 x half> %a.val to <2 x i32>
102  store <2 x i32> %r.val, <2 x i32> addrspace(1)* %r
103  ret void
104}
105
106; Need to make sure we promote f16 to f32 when converting f16 to i64. Existing
107; test checks code generated for 'i64 = fp_to_sint f32'.
108
109; GCN-LABEL: {{^}}fptosi_v2f16_to_v2i64
110; GCN: buffer_load_dword v[[A_F16_0:[0-9]+]]
111; SI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_F16_0]]
112; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_F16_0]]
113; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
114; SI: v_cvt_i32_f32_e32 v[[R_I64_0_Low:[0-9]+]], v[[A_F32_0]]
115; SI: v_ashrrev_i32_e32 v[[R_I64_0_High:[0-9]+]], 31, v[[R_I64_0_Low]]
116; SI: v_cvt_i32_f32_e32 v[[R_I64_1_Low:[0-9]+]], v[[A_F32_1]]
117; SI: v_ashrrev_i32_e32 v[[R_I64_1_High:[0-9]+]], 31, v[[R_I64_1_Low]]
118; VI: v_cvt_f32_f16_sdwa v[[A_F32_1:[0-9]+]], v[[A_F16_0]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
119; VI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_F16_0]]
120; VI: v_cvt_i32_f32_e32 v[[R_I64_1_Low:[0-9]+]], v[[A_F32_1]]
121; VI: v_cvt_i32_f32_e32 v[[R_I64_0_Low:[0-9]+]], v[[A_F32_0]]
122; VI: v_ashrrev_i32_e32 v[[R_I64_1_High:[0-9]+]], 31, v[[R_I64_1_Low]]
123; VI: v_ashrrev_i32_e32 v[[R_I64_0_High:[0-9]+]], 31, v[[R_I64_0_Low]]
124; GCN: buffer_store_dwordx4 v{{\[}}[[R_I64_0_Low]]{{\:}}[[R_I64_1_High]]{{\]}}
125; GCN: s_endpgm
126define amdgpu_kernel void @fptosi_v2f16_to_v2i64(
127    <2 x i64> addrspace(1)* %r,
128    <2 x half> addrspace(1)* %a) {
129entry:
130  %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
131  %r.val = fptosi <2 x half> %a.val to <2 x i64>
132  store <2 x i64> %r.val, <2 x i64> addrspace(1)* %r
133  ret void
134}
135