1; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s
2; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI %s
3
4declare float @llvm.fabs.f32(float) #1
5declare double @llvm.fabs.f64(double) #1
6
7; GCN-LABEL: {{^}}test_isinf_pattern:
8; GCN: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x204{{$}}
9; GCN: v_cmp_class_f32_e32 vcc, s{{[0-9]+}}, [[MASK]]
10; GCN-NOT: v_cmp
11; GCN: s_endpgm
12define amdgpu_kernel void @test_isinf_pattern(i32 addrspace(1)* nocapture %out, float %x) #0 {
13  %fabs = tail call float @llvm.fabs.f32(float %x) #1
14  %cmp = fcmp oeq float %fabs, 0x7FF0000000000000
15  %ext = zext i1 %cmp to i32
16  store i32 %ext, i32 addrspace(1)* %out, align 4
17  ret void
18}
19
20; GCN-LABEL: {{^}}test_not_isinf_pattern_0:
21; GCN-NOT: v_cmp_class
22; GCN: s_endpgm
23define amdgpu_kernel void @test_not_isinf_pattern_0(i32 addrspace(1)* nocapture %out, float %x) #0 {
24  %fabs = tail call float @llvm.fabs.f32(float %x) #1
25  %cmp = fcmp ueq float %fabs, 0x7FF0000000000000
26  %ext = zext i1 %cmp to i32
27  store i32 %ext, i32 addrspace(1)* %out, align 4
28  ret void
29}
30
31; GCN-LABEL: {{^}}test_not_isinf_pattern_1:
32; GCN-NOT: v_cmp_class
33; GCN: s_endpgm
34define amdgpu_kernel void @test_not_isinf_pattern_1(i32 addrspace(1)* nocapture %out, float %x) #0 {
35  %fabs = tail call float @llvm.fabs.f32(float %x) #1
36  %cmp = fcmp oeq float %fabs, 0xFFF0000000000000
37  %ext = zext i1 %cmp to i32
38  store i32 %ext, i32 addrspace(1)* %out, align 4
39  ret void
40}
41
42; GCN-LABEL: {{^}}test_isfinite_pattern_0:
43; GCN-NOT: v_cmp
44; GCN: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x1f8{{$}}
45; GCN: v_cmp_class_f32_e32 vcc, s{{[0-9]+}}, [[MASK]]
46; GCN-NOT: v_cmp
47; GCN: s_endpgm
48define amdgpu_kernel void @test_isfinite_pattern_0(i32 addrspace(1)* nocapture %out, float %x) #0 {
49  %ord = fcmp ord float %x, 0.000000e+00
50  %x.fabs = tail call float @llvm.fabs.f32(float %x) #1
51  %ninf = fcmp une float %x.fabs, 0x7FF0000000000000
52  %and = and i1 %ord, %ninf
53  %ext = zext i1 %and to i32
54  store i32 %ext, i32 addrspace(1)* %out, align 4
55  ret void
56}
57
58; SI-LABEL: {{^}}test_isfinite_pattern_1:
59; SI-NOT: v_cmp
60; SI: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x1f8{{$}}
61; SI: v_cmp_class_f32_e32 vcc, s{{[0-9]+}}, [[MASK]]
62; SI-NOT: v_cmp
63; SI: s_endpgm
64define amdgpu_kernel void @test_isfinite_pattern_1(i32 addrspace(1)* nocapture %out, float %x) #0 {
65  %x.fabs = tail call float @llvm.fabs.f32(float %x) #3
66  %cmpinf = fcmp one float %x.fabs, 0x7FF0000000000000
67  %ext = zext i1 %cmpinf to i32
68  store i32 %ext, i32 addrspace(1)* %out, align 4
69  ret void
70}
71
72; Use negative infinity
73; GCN-LABEL: {{^}}test_isfinite_not_pattern_0:
74; GCN-NOT: v_cmp_class_f32
75; GCN: s_endpgm
76define amdgpu_kernel void @test_isfinite_not_pattern_0(i32 addrspace(1)* nocapture %out, float %x) #0 {
77  %ord = fcmp ord float %x, 0.000000e+00
78  %x.fabs = tail call float @llvm.fabs.f32(float %x) #1
79  %ninf = fcmp une float %x.fabs, 0xFFF0000000000000
80  %and = and i1 %ord, %ninf
81  %ext = zext i1 %and to i32
82  store i32 %ext, i32 addrspace(1)* %out, align 4
83  ret void
84}
85
86; No fabs
87; GCN-LABEL: {{^}}test_isfinite_not_pattern_1:
88; GCN-NOT: v_cmp_class_f32
89; GCN: s_endpgm
90define amdgpu_kernel void @test_isfinite_not_pattern_1(i32 addrspace(1)* nocapture %out, float %x) #0 {
91  %ord = fcmp ord float %x, 0.000000e+00
92  %ninf = fcmp une float %x, 0x7FF0000000000000
93  %and = and i1 %ord, %ninf
94  %ext = zext i1 %and to i32
95  store i32 %ext, i32 addrspace(1)* %out, align 4
96  ret void
97}
98
99; fabs of different value
100; GCN-LABEL: {{^}}test_isfinite_not_pattern_2:
101; GCN-NOT: v_cmp_class_f32
102; GCN: s_endpgm
103define amdgpu_kernel void @test_isfinite_not_pattern_2(i32 addrspace(1)* nocapture %out, float %x, float %y) #0 {
104  %ord = fcmp ord float %x, 0.000000e+00
105  %x.fabs = tail call float @llvm.fabs.f32(float %y) #1
106  %ninf = fcmp une float %x.fabs, 0x7FF0000000000000
107  %and = and i1 %ord, %ninf
108  %ext = zext i1 %and to i32
109  store i32 %ext, i32 addrspace(1)* %out, align 4
110  ret void
111}
112
113; Wrong ordered compare type
114; GCN-LABEL: {{^}}test_isfinite_not_pattern_3:
115; GCN-NOT: v_cmp_class_f32
116; GCN: s_endpgm
117define amdgpu_kernel void @test_isfinite_not_pattern_3(i32 addrspace(1)* nocapture %out, float %x) #0 {
118  %ord = fcmp uno float %x, 0.000000e+00
119  %x.fabs = tail call float @llvm.fabs.f32(float %x) #1
120  %ninf = fcmp une float %x.fabs, 0x7FF0000000000000
121  %and = and i1 %ord, %ninf
122  %ext = zext i1 %and to i32
123  store i32 %ext, i32 addrspace(1)* %out, align 4
124  ret void
125}
126
127; GCN-LABEL: {{^}}test_isfinite_pattern_4:
128; GCN-DAG: s_load_dword [[X:s[0-9]+]]
129; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x1f8
130; GCN-DAG: v_cmp_class_f32_e32 vcc, [[X]], [[K]]
131; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc
132define amdgpu_kernel void @test_isfinite_pattern_4(i32 addrspace(1)* nocapture %out, float %x) #0 {
133  %ord = fcmp ord float %x, 0.000000e+00
134  %x.fabs = tail call float @llvm.fabs.f32(float %x) #1
135  %ninf = fcmp one float %x.fabs, 0x7FF0000000000000
136  %and = and i1 %ord, %ninf
137  %ext = zext i1 %and to i32
138  store i32 %ext, i32 addrspace(1)* %out, align 4
139  ret void
140}
141
142; GCN-LABEL: {{^}}test_isfinite_pattern_4_commute_and:
143; GCN-DAG: s_load_dword [[X:s[0-9]+]]
144; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x1f8
145; GCN-DAG: v_cmp_class_f32_e32 vcc, [[X]], [[K]]
146; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc
147define amdgpu_kernel void @test_isfinite_pattern_4_commute_and(i32 addrspace(1)* nocapture %out, float %x) #0 {
148  %ord = fcmp ord float %x, 0.000000e+00
149  %x.fabs = tail call float @llvm.fabs.f32(float %x) #1
150  %ninf = fcmp one float %x.fabs, 0x7FF0000000000000
151  %and = and i1 %ninf, %ord
152  %ext = zext i1 %and to i32
153  store i32 %ext, i32 addrspace(1)* %out, align 4
154  ret void
155}
156
157; GCN-LABEL: {{^}}test_not_isfinite_pattern_4_wrong_ord_test:
158; GCN-DAG: s_load_dword [[X:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, {{0xb|0x2c}}
159; GCN-DAG: s_load_dword [[Y:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, {{0x14|0x50}}
160
161; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x1f8
162; GCN-DAG: v_mov_b32_e32 [[VY:v[0-9]+]], [[Y]]
163
164; SI-DAG: v_cmp_o_f32_e32 vcc, [[X]], [[VY]]
165; SI-DAG: v_cmp_class_f32_e64 [[CLASS:s\[[0-9]+:[0-9]+\]]], [[X]], [[K]]
166; SI: s_and_b64 [[AND:s\[[0-9]+:[0-9]+\]]], vcc, [[CLASS]]
167
168; VI-DAG: v_cmp_o_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[X]], [[VY]]
169; VI-DAG: v_cmp_class_f32_e32 vcc, [[X]], [[K]]
170; VI: s_and_b64 [[AND:s\[[0-9]+:[0-9]+\]]], [[CMP]], vcc
171
172; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[AND]]
173define amdgpu_kernel void @test_not_isfinite_pattern_4_wrong_ord_test(i32 addrspace(1)* nocapture %out, float %x, [8 x i32], float %y) #0 {
174  %ord = fcmp ord float %x, %y
175  %x.fabs = tail call float @llvm.fabs.f32(float %x) #1
176  %ninf = fcmp one float %x.fabs, 0x7FF0000000000000
177  %and = and i1 %ord, %ninf
178  %ext = zext i1 %and to i32
179  store i32 %ext, i32 addrspace(1)* %out, align 4
180  ret void
181}
182
183attributes #0 = { nounwind }
184attributes #1 = { nounwind readnone }
185