1; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI-SAFE -check-prefix=SI -check-prefix=FUNC %s
2; RUN: llc -enable-no-nans-fp-math -enable-unsafe-fp-math  -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI-NONAN -check-prefix=SI -check-prefix=FUNC %s
3; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
4
5; FIXME: Should replace unsafe-fp-math with no signed zeros.
6
7declare i32 @llvm.r600.read.tidig.x() #1
8
9; The two inputs to the instruction are different SGPRs from the same
10; super register, so we can't fold both SGPR operands even though they
11; are both the same register.
12
13; FUNC-LABEL: {{^}}s_test_fmin_legacy_subreg_inputs_f32:
14; EG: MIN *
15; SI-SAFE: v_min_legacy_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
16; SI-NONAN: v_min_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
17define amdgpu_kernel void @s_test_fmin_legacy_subreg_inputs_f32(<4 x float> addrspace(1)* %out, <4 x float> inreg %reg0) #0 {
18   %r0 = extractelement <4 x float> %reg0, i32 0
19   %r1 = extractelement <4 x float> %reg0, i32 1
20   %r2 = fcmp uge float %r0, %r1
21   %r3 = select i1 %r2, float %r1, float %r0
22   %vec = insertelement <4 x float> undef, float %r3, i32 0
23   store <4 x float> %vec, <4 x float> addrspace(1)* %out, align 16
24   ret void
25}
26
27; FUNC-LABEL: {{^}}s_test_fmin_legacy_ule_f32:
28; SI-DAG: s_load_dwordx2 s{{\[}}[[A:[0-9]+]]:[[B:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xb
29
30; SI-DAG: v_mov_b32_e32 [[VB:v[0-9]+]], s[[B]]
31
32; SI-SAFE: v_min_legacy_f32_e64 {{v[0-9]+}}, [[VB]], s[[A]]
33; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, s[[A]], [[VB]]
34
35define amdgpu_kernel void @s_test_fmin_legacy_ule_f32(float addrspace(1)* %out, float %a, float %b) #0 {
36  %cmp = fcmp ule float %a, %b
37  %val = select i1 %cmp, float %a, float %b
38  store float %val, float addrspace(1)* %out, align 4
39  ret void
40}
41
42; FUNC-LABEL: @test_fmin_legacy_ule_f32
43; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
44; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
45; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
46; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
47define amdgpu_kernel void @test_fmin_legacy_ule_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
48  %tid = call i32 @llvm.r600.read.tidig.x() #1
49  %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
50  %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
51
52  %a = load volatile float, float addrspace(1)* %gep.0, align 4
53  %b = load volatile float, float addrspace(1)* %gep.1, align 4
54
55  %cmp = fcmp ule float %a, %b
56  %val = select i1 %cmp, float %a, float %b
57  store float %val, float addrspace(1)* %out, align 4
58  ret void
59}
60
61; FUNC-LABEL: @test_fmin_legacy_ole_f32
62; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
63; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
64; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
65; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
66define amdgpu_kernel void @test_fmin_legacy_ole_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
67  %tid = call i32 @llvm.r600.read.tidig.x() #1
68  %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
69  %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
70
71  %a = load volatile float, float addrspace(1)* %gep.0, align 4
72  %b = load volatile float, float addrspace(1)* %gep.1, align 4
73
74  %cmp = fcmp ole float %a, %b
75  %val = select i1 %cmp, float %a, float %b
76  store float %val, float addrspace(1)* %out, align 4
77  ret void
78}
79
80; FUNC-LABEL: @test_fmin_legacy_olt_f32
81; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
82; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
83; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
84; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
85define amdgpu_kernel void @test_fmin_legacy_olt_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
86  %tid = call i32 @llvm.r600.read.tidig.x() #1
87  %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
88  %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
89
90  %a = load volatile float, float addrspace(1)* %gep.0, align 4
91  %b = load volatile float, float addrspace(1)* %gep.1, align 4
92
93  %cmp = fcmp olt float %a, %b
94  %val = select i1 %cmp, float %a, float %b
95  store float %val, float addrspace(1)* %out, align 4
96  ret void
97}
98
99; FUNC-LABEL: @test_fmin_legacy_ult_f32
100; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
101; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
102; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
103; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
104define amdgpu_kernel void @test_fmin_legacy_ult_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
105  %tid = call i32 @llvm.r600.read.tidig.x() #1
106  %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
107  %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
108
109  %a = load volatile float, float addrspace(1)* %gep.0, align 4
110  %b = load volatile float, float addrspace(1)* %gep.1, align 4
111
112  %cmp = fcmp ult float %a, %b
113  %val = select i1 %cmp, float %a, float %b
114  store float %val, float addrspace(1)* %out, align 4
115  ret void
116}
117
118; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v1f32:
119; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
120; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
121; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
122; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
123define amdgpu_kernel void @test_fmin_legacy_ult_v1f32(<1 x float> addrspace(1)* %out, <1 x float> addrspace(1)* %in) #0 {
124  %tid = call i32 @llvm.r600.read.tidig.x() #1
125  %gep.0 = getelementptr <1 x float>, <1 x float> addrspace(1)* %in, i32 %tid
126  %gep.1 = getelementptr <1 x float>, <1 x float> addrspace(1)* %gep.0, i32 1
127
128  %a = load <1 x float>, <1 x float> addrspace(1)* %gep.0
129  %b = load <1 x float>, <1 x float> addrspace(1)* %gep.1
130
131  %cmp = fcmp ult <1 x float> %a, %b
132  %val = select <1 x i1> %cmp, <1 x float> %a, <1 x float> %b
133  store <1 x float> %val, <1 x float> addrspace(1)* %out
134  ret void
135}
136
137; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v2f32:
138; SI: buffer_load_dwordx2
139; SI: buffer_load_dwordx2
140; SI-SAFE: v_min_legacy_f32_e32
141; SI-SAFE: v_min_legacy_f32_e32
142
143; SI-NONAN: v_min_f32_e32
144; SI-NONAN: v_min_f32_e32
145define amdgpu_kernel void @test_fmin_legacy_ult_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in) #0 {
146  %tid = call i32 @llvm.r600.read.tidig.x() #1
147  %gep.0 = getelementptr <2 x float>, <2 x float> addrspace(1)* %in, i32 %tid
148  %gep.1 = getelementptr <2 x float>, <2 x float> addrspace(1)* %gep.0, i32 1
149
150  %a = load <2 x float>, <2 x float> addrspace(1)* %gep.0
151  %b = load <2 x float>, <2 x float> addrspace(1)* %gep.1
152
153  %cmp = fcmp ult <2 x float> %a, %b
154  %val = select <2 x i1> %cmp, <2 x float> %a, <2 x float> %b
155  store <2 x float> %val, <2 x float> addrspace(1)* %out
156  ret void
157}
158
159; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v3f32:
160; SI-SAFE: v_min_legacy_f32_e32
161; SI-SAFE: v_min_legacy_f32_e32
162; SI-SAFE: v_min_legacy_f32_e32
163
164; SI-NONAN: v_min_f32_e32
165; SI-NONAN: v_min_f32_e32
166; SI-NONAN: v_min_f32_e32
167define amdgpu_kernel void @test_fmin_legacy_ult_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %in) #0 {
168  %tid = call i32 @llvm.r600.read.tidig.x() #1
169  %gep.0 = getelementptr <3 x float>, <3 x float> addrspace(1)* %in, i32 %tid
170  %gep.1 = getelementptr <3 x float>, <3 x float> addrspace(1)* %gep.0, i32 1
171
172  %a = load <3 x float>, <3 x float> addrspace(1)* %gep.0
173  %b = load <3 x float>, <3 x float> addrspace(1)* %gep.1
174
175  %cmp = fcmp ult <3 x float> %a, %b
176  %val = select <3 x i1> %cmp, <3 x float> %a, <3 x float> %b
177  store <3 x float> %val, <3 x float> addrspace(1)* %out
178  ret void
179}
180
181; FUNC-LABEL: @test_fmin_legacy_ole_f32_multi_use
182; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
183; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
184; SI-NOT: v_min
185; SI: v_cmp_le_f32
186; SI-NEXT: v_cndmask_b32
187; SI-NOT: v_min
188; SI: s_endpgm
189define amdgpu_kernel void @test_fmin_legacy_ole_f32_multi_use(float addrspace(1)* %out0, i1 addrspace(1)* %out1, float addrspace(1)* %in) #0 {
190  %tid = call i32 @llvm.r600.read.tidig.x() #1
191  %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
192  %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
193
194  %a = load volatile float, float addrspace(1)* %gep.0, align 4
195  %b = load volatile float, float addrspace(1)* %gep.1, align 4
196
197  %cmp = fcmp ole float %a, %b
198  %val0 = select i1 %cmp, float %a, float %b
199  store float %val0, float addrspace(1)* %out0, align 4
200  store i1 %cmp, i1 addrspace(1)* %out1
201  ret void
202}
203
204attributes #0 = { nounwind }
205attributes #1 = { nounwind readnone }
206